Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests: add return type hints to tests #183

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions tests/functional/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@


@pytest.fixture
def runner(monkeypatch):
def runner(monkeypatch) -> testing.CliRunner:
"""Click fixture runner"""
# don't let the tests accidentally bring in the project's own
# pyproject.toml
monkeypatch.setattr(config, "find_project_config", lambda x: None)
return testing.CliRunner()


def test_run_no_paths(runner, monkeypatch, tmpdir):
def test_run_no_paths(runner, monkeypatch, tmpdir) -> None:
"""Assume current working directory if no paths are given."""
monkeypatch.setattr(os, "getcwd", lambda: SAMPLE_DIR)

Expand Down Expand Up @@ -75,7 +75,7 @@ def test_run_no_paths(runner, monkeypatch, tmpdir):
(["-f", "40"], 51.4, 0),
),
)
def test_run_shortflags(flags, exp_result, exp_exit_code, runner):
def test_run_shortflags(flags, exp_result, exp_exit_code, runner) -> None:
"""Test CLI with single short flags"""
cli_inputs = flags + [SAMPLE_DIR]
result = runner.invoke(cli.main, cli_inputs)
Expand Down Expand Up @@ -107,7 +107,7 @@ def test_run_shortflags(flags, exp_result, exp_exit_code, runner):
(["--style", "google"], 54.1, 1),
),
)
def test_run_longflags(flags, exp_result, exp_exit_code, runner):
def test_run_longflags(flags, exp_result, exp_exit_code, runner) -> None:
"""Test CLI with single long flags"""
cli_inputs = flags + [SAMPLE_DIR]
result = runner.invoke(cli.main, cli_inputs)
Expand All @@ -125,7 +125,7 @@ def test_run_longflags(flags, exp_result, exp_exit_code, runner):
(["-m", "-f", "45"], 51.4, 0),
),
)
def test_run_multiple_flags(flags, exp_result, exp_exit_code, runner):
def test_run_multiple_flags(flags, exp_result, exp_exit_code, runner) -> None:
"""Test CLI with a hodge-podge of flags"""
cli_inputs = flags + [SAMPLE_DIR]
result = runner.invoke(cli.main, cli_inputs)
Expand All @@ -136,7 +136,7 @@ def test_run_multiple_flags(flags, exp_result, exp_exit_code, runner):


@pytest.mark.parametrize("quiet", (True, False))
def test_generate_badge(quiet, runner, tmp_path):
def test_generate_badge(quiet, runner, tmp_path) -> None:
"""Test expected SVG output when creating a status badge."""
expected_output_path = os.path.join(FIXTURES, "expected_badge.svg")
with open(expected_output_path) as f:
Expand Down Expand Up @@ -171,7 +171,7 @@ def test_generate_badge(quiet, runner, tmp_path):
assert expected_output == actual_output


def test_incompatible_options_badge_format(runner):
def test_incompatible_options_badge_format(runner) -> None:
"""Raise an error when mutually exclusive options are used together."""
result = runner.invoke(cli.main, ["--badge-format", "svg"])
assert 2 == result.exit_code
Expand All @@ -182,7 +182,7 @@ def test_incompatible_options_badge_format(runner):
assert exp_error in result.output


def test_incompatible_options_badge_style(runner):
def test_incompatible_options_badge_style(runner) -> None:
"""Raise an error when mutually exclusive options are used together."""
result = runner.invoke(cli.main, ["--badge-style", "plastic"])
assert 2 == result.exit_code
Expand Down
22 changes: 11 additions & 11 deletions tests/functional/test_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@


@pytest.fixture(autouse=True)
def patch_term_width(monkeypatch):
def patch_term_width(monkeypatch) -> None:
"""Set fixed terminal width when testing output"""
monkeypatch.setattr(coverage.utils.OutputFormatter, "TERMINAL_WIDTH", 80)

Expand Down Expand Up @@ -76,7 +76,7 @@ def patch_term_width(monkeypatch):
),
),
)
def test_coverage_simple(paths, conf, exp_results, mocker):
def test_coverage_simple(paths, conf, exp_results, mocker) -> None:
"""Happy path - get expected results given a file or directory"""
conf = config.InterrogateConfig(**conf)
interrogate_coverage = coverage.InterrogateCoverage(paths=paths, conf=conf)
Expand All @@ -89,7 +89,7 @@ def test_coverage_simple(paths, conf, exp_results, mocker):
assert exp_results[3] == f"{results.perc_covered:.1f}"


def test_coverage_errors(capsys):
def test_coverage_errors(capsys) -> None:
"""Exit when no Python files are found."""
path = os.path.join(SAMPLE_DIR, "ignoreme.txt")
interrogate_coverage = coverage.InterrogateCoverage(paths=[path])
Expand Down Expand Up @@ -120,7 +120,7 @@ def test_coverage_errors(capsys):
(2, "expected_detailed.txt"),
),
)
def test_print_results(level, exp_fixture_file, capsys, monkeypatch):
def test_print_results(level, exp_fixture_file, capsys, monkeypatch) -> None:
"""Output of test results differ by verbosity."""
interrogate_config = config.InterrogateConfig(docstring_style="google")
interrogate_coverage = coverage.InterrogateCoverage(
Expand Down Expand Up @@ -152,7 +152,7 @@ def test_print_results(level, exp_fixture_file, capsys, monkeypatch):
)
def test_print_results_omit_covered(
level, exp_fixture_file, capsys, monkeypatch
):
) -> None:
"""Output of results differ by verbosity, omitting fully covered files."""
interrogate_config = config.InterrogateConfig(
omit_covered_files=True, docstring_style="google"
Expand All @@ -176,7 +176,7 @@ def test_print_results_omit_covered(


@pytest.mark.parametrize("level", (1, 2))
def test_print_results_omit_none(level, capsys, monkeypatch):
def test_print_results_omit_none(level, capsys, monkeypatch) -> None:
"""Output of test results by verbosity, no fully covered files."""
interrogate_config = config.InterrogateConfig(omit_covered_files=True)
interrogate_coverage = coverage.InterrogateCoverage(
Expand All @@ -192,7 +192,7 @@ def test_print_results_omit_none(level, capsys, monkeypatch):
assert "omitted due to complete coverage" not in captured.out


def test_print_results_omit_all_summary(capsys, monkeypatch):
def test_print_results_omit_all_summary(capsys, monkeypatch) -> None:
"""Output of test results for summary view, omitting all covered files."""
interrogate_config = config.InterrogateConfig(
omit_covered_files=True, docstring_style="google"
Expand All @@ -216,7 +216,7 @@ def test_print_results_omit_all_summary(capsys, monkeypatch):
assert expected_out in captured.out


def test_print_results_omit_all_detailed(capsys, monkeypatch):
def test_print_results_omit_all_detailed(capsys, monkeypatch) -> None:
"""Show no detail view when all files are omitted from skipping covered"""
interrogate_config = config.InterrogateConfig(
omit_covered_files=True, docstring_style="google"
Expand Down Expand Up @@ -246,7 +246,7 @@ def test_print_results_omit_all_detailed(capsys, monkeypatch):
)
def test_print_results_ignore_module(
ignore_module, level, exp_fixture_file, capsys, monkeypatch
):
) -> None:
"""Do not print module info if ignore_module is True."""
conf = {"ignore_module": ignore_module, "docstring_style": "google"}
conf = config.InterrogateConfig(**conf)
Expand All @@ -269,7 +269,7 @@ def test_print_results_ignore_module(
assert expected_out in captured.out


def test_print_results_single_file(capsys, monkeypatch):
def test_print_results_single_file(capsys, monkeypatch) -> None:
"""Results for a single file should still list the filename."""
single_file = os.path.join(SAMPLE_DIR, "full.py")
conf = {"docstring_style": "google"}
Expand Down Expand Up @@ -319,7 +319,7 @@ def test_print_results_single_file(capsys, monkeypatch):
)
def test_pass_when_fail_under_exact(
fail_under, perc_covered, exp_ret, monkeypatch
):
) -> None:
"""Pass if actual coverage is exactly the `--fail-under` value.
See issue `#114 <https://github.com/econchick/interrogate/issues/114>`_.
"""
Expand Down
16 changes: 8 additions & 8 deletions tests/unit/test_badge_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
)
def test_save_badge(
out_format, out_file, exp_called_with, mocker, monkeypatch
):
) -> None:
"""Badge is saved in the expected location."""
mock_cairosvg = mocker.Mock()
monkeypatch.setattr(badge_gen, "cairosvg", mock_cairosvg)
Expand All @@ -47,7 +47,7 @@ def test_save_badge(


@pytest.mark.skipif(not IS_WINDOWS, reason="windows-only tests")
def test_save_badge_windows(mocker):
def test_save_badge_windows(mocker) -> None:
"""Badge is saved in the expected location."""
mock_open = mocker.mock_open()
m = mocker.patch("interrogate.badge_gen.open", mock_open)
Expand All @@ -59,7 +59,7 @@ def test_save_badge_windows(mocker):
m.assert_called_once_with(output, "w")


def test_save_badge_no_cairo(monkeypatch):
def test_save_badge_no_cairo(monkeypatch) -> None:
"""PNG can't be generated without extra dependencies installed."""
monkeypatch.setattr("interrogate.badge_gen.cairosvg", None)
badge_contents = "<svg>foo</svg>"
Expand All @@ -70,7 +70,7 @@ def test_save_badge_no_cairo(monkeypatch):
)


def test_get_badge():
def test_get_badge() -> None:
"""SVG badge is templated as expected."""
actual = badge_gen.get_badge(99.9, "#4c1")
actual = actual.replace("\n", "").replace("\r", "")
Expand All @@ -93,14 +93,14 @@ def test_get_badge():
("99.png", None, None, True),
),
)
def test_should_generate(fixture, color, result, expected):
def test_should_generate(fixture, color, result, expected) -> None:
"""Only return True if existing badge needs updating"""
output = os.path.join(FIXTURES, "default-style", fixture)
actual = badge_gen.should_generate_badge(output, color, result)
assert actual is expected


def test_should_generate_xml_error(mocker, monkeypatch):
def test_should_generate_xml_error(mocker, monkeypatch) -> None:
"""Return True if parsing svg returns an error."""
mock_minidom_parse = mocker.Mock()
mock_minidom_parse.side_effect = Exception("fuuuu")
Expand All @@ -122,7 +122,7 @@ def test_should_generate_xml_error(mocker, monkeypatch):
(-1, "#9f9f9f"),
),
)
def test_get_color(result, expected):
def test_get_color(result, expected) -> None:
"""Expected color returned according to results."""
assert expected == badge_gen.get_color(result)

Expand Down Expand Up @@ -166,7 +166,7 @@ def test_create(
style,
monkeypatch,
tmpdir,
):
) -> None:
"""Status badges are created according to interrogation results."""
monkeypatch.setattr(badge_gen.os.path, "isdir", lambda x: is_dir)
output = tmpdir.mkdir("output")
Expand Down
18 changes: 9 additions & 9 deletions tests/unit/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
(("/usr/src/app", "/usr/src/test"), None, "/"),
),
)
def test_find_project_root(srcs, patch_func, expected, monkeypatch):
def test_find_project_root(srcs, patch_func, expected, monkeypatch) -> None:
"""Return expected directory of project root."""
with monkeypatch.context() as mp:
expected = pathlib.Path(expected)
Expand All @@ -51,7 +51,7 @@ def test_find_project_root(srcs, patch_func, expected, monkeypatch):
(False, None),
),
)
def test_find_project_config(is_file, expected, mocker, monkeypatch):
def test_find_project_config(is_file, expected, mocker, monkeypatch) -> None:
"""Return absolute path if pyproject.toml or setup.cfg is detected."""
with monkeypatch.context() as mp:
mp.setattr(config.pathlib.Path, "is_file", lambda x: is_file)
Expand All @@ -61,7 +61,7 @@ def test_find_project_config(is_file, expected, mocker, monkeypatch):
assert expected == actual


def test_parse_pyproject_toml(tmpdir):
def test_parse_pyproject_toml(tmpdir) -> None:
"""Return expected config data from a pyproject.toml file."""
toml_data = (
"[tool.foo]\n"
Expand Down Expand Up @@ -97,12 +97,12 @@ def test_parse_pyproject_toml(tmpdir):
),
),
)
def test_sanitize_list_values(value, exp_value):
def test_sanitize_list_values(value, exp_value) -> None:
"""Return expected list from a string that should be a list."""
assert exp_value == config.sanitize_list_values(value)


def test_parse_setup_cfg(tmpdir):
def test_parse_setup_cfg(tmpdir) -> None:
"""Return expected config data from a setup.cfg file."""
cfg_data = (
"[tool:foo]\n"
Expand All @@ -127,7 +127,7 @@ def test_parse_setup_cfg(tmpdir):
assert expected == actual


def test_parse_setup_cfg_raises(tmpdir):
def test_parse_setup_cfg_raises(tmpdir) -> None:
"""Return nothing if no interrogate section was found."""
cfg_data = "[tool.foo]\n" 'foo = "bar"\n'

Expand All @@ -138,7 +138,7 @@ def test_parse_setup_cfg_raises(tmpdir):
assert actual is None


def test_read_config_file_none(mocker, monkeypatch):
def test_read_config_file_none(mocker, monkeypatch) -> None:
"""Return nothing if no pyproject.toml or setup.cfg is found."""
monkeypatch.setattr(config, "find_project_config", lambda x: None)
ctx = mocker.Mock()
Expand Down Expand Up @@ -199,7 +199,7 @@ def test_read_config_file_none(mocker, monkeypatch):
)
def test_read_config_file(
value, ret_config, default_map, exp_ret, exp_defaults, mocker, monkeypatch
):
) -> None:
"""Parse config from a given pyproject.toml or setup.cfg file."""
monkeypatch.setattr(config, "find_project_config", lambda x: value)
monkeypatch.setattr(config, "parse_pyproject_toml", lambda x: ret_config)
Expand All @@ -211,7 +211,7 @@ def test_read_config_file(
assert exp_defaults == ctx.default_map


def test_read_config_file_raises(mocker, monkeypatch):
def test_read_config_file_raises(mocker, monkeypatch) -> None:
"""Handle exceptions while reading pyproject.toml/setup.cfg, if any."""
toml_error = tomllib.TOMLDecodeError("toml error")
os_error = OSError("os error")
Expand Down
Loading
Loading