Skip to content

Commit c03d079

Browse files
committed
Apply pchs.
1 parent d5f6590 commit c03d079

File tree

5 files changed

+40
-40
lines changed

5 files changed

+40
-40
lines changed

src/datajudge/constraints/column.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def compare(
3737
filter(lambda c: c not in column_names_factual, column_names_target)
3838
)
3939
assertion_message = (
40-
f"{self.ref} doesn't have column(s) " f"{', '.join(excluded_columns)}."
40+
f"{self.ref} doesn't have column(s) {', '.join(excluded_columns)}."
4141
)
4242
result = len(excluded_columns) == 0
4343
return result, assertion_message
@@ -51,7 +51,7 @@ def compare(
5151
filter(lambda c: c not in column_names_target, column_names_factual)
5252
)
5353
assertion_message = (
54-
f"{self.ref2} doesn't have column(s) " f"{', '.join(missing_columns)}. "
54+
f"{self.ref2} doesn't have column(s) {', '.join(missing_columns)}. "
5555
)
5656
result = len(missing_columns) == 0
5757
return result, assertion_message
@@ -65,7 +65,7 @@ def compare(
6565
filter(lambda c: c not in column_names_factual, column_names_target)
6666
)
6767
assertion_message = (
68-
f"{self.ref} doesn't have column(s) " f"{', '.join(missing_columns)}."
68+
f"{self.ref} doesn't have column(s) {', '.join(missing_columns)}."
6969
)
7070
result = len(missing_columns) == 0
7171
return result, assertion_message
@@ -109,7 +109,7 @@ def retrieve(
109109

110110
def compare(self, column_type_factual, column_type_target) -> Tuple[bool, str]:
111111
assertion_message = (
112-
f"{self.ref} is {column_type_factual} " f"instead of {column_type_target}."
112+
f"{self.ref} is {column_type_factual} instead of {column_type_target}."
113113
)
114114

115115
if isinstance(column_type_target, sa.types.TypeEngine):

src/datajudge/constraints/miscs.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,15 +38,15 @@ def compare(
3838
iter(primary_keys_factual.difference(primary_keys_target))
3939
)
4040
assertion_message = (
41-
f"{self.ref} incorrectly includes " f"{example_key} as primary key."
41+
f"{self.ref} incorrectly includes {example_key} as primary key."
4242
)
4343
result = False
4444
if len(primary_keys_target.difference(primary_keys_factual)) > 0:
4545
example_key = next(
4646
iter(primary_keys_target.difference(primary_keys_factual))
4747
)
4848
assertion_message = (
49-
f"{self.ref} doesn't include " f"{example_key} as primary key."
49+
f"{self.ref} doesn't include {example_key} as primary key."
5050
)
5151
result = False
5252
return result, assertion_message

src/datajudge/constraints/varchar.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ def test(self, engine: sa.engine.Engine) -> TestResult:
139139
)
140140

141141
counterexample_string = (
142-
("Some counterexamples consist of the following: " f"{counterexamples}. ")
142+
(f"Some counterexamples consist of the following: {counterexamples}. ")
143143
if counterexamples and len(counterexamples) > 0
144144
else ""
145145
)

tests/integration/test_integration.py

+27-27
Original file line numberDiff line numberDiff line change
@@ -371,9 +371,9 @@ def test_uniques_equality_within_with_outputcheck(engine, unique_table1, data):
371371
)
372372
test_result = req[0].test(engine)
373373
assert operation(test_result.outcome), test_result.failure_message
374-
assert test_result.failure_message.endswith(
375-
failure_message_suffix
376-
), test_result.failure_message
374+
assert test_result.failure_message.endswith(failure_message_suffix), (
375+
test_result.failure_message
376+
)
377377

378378

379379
@pytest.mark.parametrize(
@@ -479,9 +479,9 @@ def test_uniques_equality_between_with_outputcheck(
479479
)
480480
test_result = req[0].test(engine)
481481
assert operation(test_result.outcome), test_result.failure_message
482-
assert test_result.failure_message.endswith(
483-
failure_message_suffix
484-
), test_result.failure_message
482+
assert test_result.failure_message.endswith(failure_message_suffix), (
483+
test_result.failure_message
484+
)
485485

486486

487487
@pytest.mark.parametrize(
@@ -617,9 +617,9 @@ def test_uniques_superset_within_with_outputcheck(engine, unique_table1, data):
617617
)
618618
test_result = req[0].test(engine)
619619
assert operation(test_result.outcome), test_result.failure_message
620-
assert test_result.failure_message.endswith(
621-
failure_message_suffix
622-
), test_result.failure_message
620+
assert test_result.failure_message.endswith(failure_message_suffix), (
621+
test_result.failure_message
622+
)
623623

624624

625625
@pytest.mark.parametrize(
@@ -667,9 +667,9 @@ def test_uniques_superset_between_with_outputcheck(
667667
)
668668
test_result = req[0].test(engine)
669669
assert operation(test_result.outcome), test_result.failure_message
670-
assert test_result.failure_message.endswith(
671-
failure_message_suffix
672-
), test_result.failure_message
670+
assert test_result.failure_message.endswith(failure_message_suffix), (
671+
test_result.failure_message
672+
)
673673

674674

675675
@pytest.mark.parametrize(
@@ -991,9 +991,9 @@ def test_uniques_subset_within_complex_with_outputcheck(engine, unique_table1, d
991991
print(test_result)
992992
print(test_result.failure_message)
993993
assert operation(test_result.outcome), test_result.failure_message
994-
assert test_result.failure_message.endswith(
995-
failure_message_suffix
996-
), test_result.failure_message
994+
assert test_result.failure_message.endswith(failure_message_suffix), (
995+
test_result.failure_message
996+
)
997997

998998

999999
@pytest.mark.parametrize(
@@ -1056,9 +1056,9 @@ def test_uniques_subset_within_complex_with_outputcheck_extralong(
10561056
print(test_result)
10571057
print(test_result.failure_message)
10581058
assert operation(test_result.outcome), test_result.failure_message
1059-
assert test_result.failure_message.endswith(
1060-
failure_message_suffix
1061-
), test_result.failure_message
1059+
assert test_result.failure_message.endswith(failure_message_suffix), (
1060+
test_result.failure_message
1061+
)
10621062

10631063

10641064
@pytest.mark.parametrize(
@@ -1243,9 +1243,9 @@ def test_uniques_subset_between_with_outputcheck(
12431243
)
12441244
test_result = req[0].test(engine)
12451245
assert operation(test_result.outcome), test_result.failure_message
1246-
assert test_result.failure_message.endswith(
1247-
failure_message_suffix
1248-
), test_result.failure_message
1246+
assert test_result.failure_message.endswith(failure_message_suffix), (
1247+
test_result.failure_message
1248+
)
12491249

12501250

12511251
@pytest.mark.parametrize(
@@ -1400,9 +1400,9 @@ def test_functional_dependency_within_multi_key_with_outputcheck(
14001400

14011401
test_result = req[0].test(engine)
14021402
assert operation(test_result.outcome)
1403-
assert test_result.failure_message.endswith(
1404-
failure_message_suffix
1405-
), test_result.failure_message
1403+
assert test_result.failure_message.endswith(failure_message_suffix), (
1404+
test_result.failure_message
1405+
)
14061406

14071407

14081408
def _flatten_and_filter(data):
@@ -2820,9 +2820,9 @@ def test_uniqueness_within_infer_pk(engine, data, mix_table2_pk):
28202820
req.add_uniqueness_constraint(columns=selection_columns, infer_pk_columns=True)
28212821
test_result = req[0].test(engine)
28222822
# additional test: the PK columns are inferred during test time, i.e. we can check here if they were inferred correctly
2823-
assert (
2824-
req[0].ref.columns == target_columns
2825-
), f"Incorrect columns were retrieved from table. {req[0].ref.columns} != {target_columns}"
2823+
assert req[0].ref.columns == target_columns, (
2824+
f"Incorrect columns were retrieved from table. {req[0].ref.columns} != {target_columns}"
2825+
)
28262826
assert operation(test_result.outcome), test_result.failure_message
28272827

28282828

tests/integration/test_stats.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,11 @@ def test_ks_2sample_calculate_statistic(engine, random_normal_table, configurati
6666
engine, ref, ref2
6767
)
6868

69-
assert (
70-
abs(d_statistic - expected_d) <= 1e-10
71-
), f"The test statistic does not match: {expected_d} vs {d_statistic}"
69+
assert abs(d_statistic - expected_d) <= 1e-10, (
70+
f"The test statistic does not match: {expected_d} vs {d_statistic}"
71+
)
7272

7373
# 1e-05 should cover common p_values; if scipy is installed, a very accurate p_value is automatically calculated
74-
assert (
75-
abs(p_value - expected_p) <= 1e-05
76-
), f"The approx. p-value does not match: {expected_p} vs {p_value}"
74+
assert abs(p_value - expected_p) <= 1e-05, (
75+
f"The approx. p-value does not match: {expected_p} vs {p_value}"
76+
)

0 commit comments

Comments
 (0)