Skip to content

Commit a339ebf

Browse files
committed
lint
1 parent b9dab98 commit a339ebf

File tree

3 files changed

+16
-15
lines changed

3 files changed

+16
-15
lines changed

pyproject.toml

+3-3
Original file line numberDiff line numberDiff line change
@@ -62,14 +62,14 @@ module = [
6262
]
6363

6464
[tool.ruff]
65-
select = [
65+
lint.select = [
6666
"E", # pycodestyle errors
6767
"W", # pycodestyle warnings
6868
"F", # pyflakes
6969
"I", # isort
7070
"B", # flake8-bugbear
7171
]
72-
ignore = [
72+
lint.ignore = [
7373
"E501", # line too long, handled by black
7474
"B008", # do not perform function calls in argument defaults
7575
"B905", # requires python >= 3.10
@@ -81,7 +81,7 @@ exclude = [
8181
"tests"
8282
]
8383

84-
[tool.ruff.per-file-ignores]
84+
[tool.ruff.lint.per-file-ignores]
8585
"__init__.py" = [
8686
"F401", # MODULE IMPORTED BUT UNUSED
8787
]

src/lobster/transforms/_structure.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
import torch
22

3+
34
def trim_or_pad(tensor: torch.Tensor, pad_to: int, pad_idx: int = 0):
4-
"""Trim or pad a tensor with shape (L, ...) to a given length.
5-
"""
5+
"""Trim or pad a tensor with shape (L, ...) to a given length."""
66
L = tensor.shape[0]
77
if L >= pad_to:
88
# trim, assuming first dimension is the dim to trim
@@ -16,4 +16,3 @@ def trim_or_pad(tensor: torch.Tensor, pad_to: int, pad_idx: int = 0):
1616
)
1717
tensor = torch.concat((tensor, padding), dim=0)
1818
return tensor
19-

src/lobster/transforms/_structure_featurizer.py

+11-9
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import typing as T
2-
from pathlib import Path
32
import warnings
3+
from pathlib import Path
44

55
import numpy as np
66
import torch
@@ -9,10 +9,10 @@
99
OFProtein,
1010
atom37_to_frames,
1111
get_backbone_frames,
12+
make_atom14_masks,
13+
make_atom14_positions,
1214
make_pdb_features,
1315
protein_from_pdb_string,
14-
make_atom14_masks,
15-
make_atom14_positions
1616
)
1717
from lobster.transforms import trim_or_pad
1818

@@ -47,7 +47,9 @@ def _openfold_features_from_pdb(
4747

4848
return protein_features
4949

50-
def _process_structure_features(self, features: T.Dict[str, np.ndarray], seq_len: T.Optional[int] = None):
50+
def _process_structure_features(
51+
self, features: T.Dict[str, np.ndarray], seq_len: T.Optional[int] = None
52+
):
5153
"""Process feature dtypes and pad to max length for a single sequence."""
5254
features_requiring_padding = [
5355
"aatype",
@@ -69,7 +71,7 @@ def _process_structure_features(self, features: T.Dict[str, np.ndarray], seq_len
6971
features[k] = torch.from_numpy(v)
7072

7173
# Trim or pad to a fixed length for all per-specific features
72-
if (k in features_requiring_padding) and (not seq_len is None):
74+
if (k in features_requiring_padding) and (seq_len is not None):
7375
features[k] = trim_or_pad(features[k], seq_len)
7476

7577
# 'seq_length' is a tensor with shape equal to the aatype array length,
@@ -83,8 +85,8 @@ def _process_structure_features(self, features: T.Dict[str, np.ndarray], seq_len
8385
features["mask"] = mask.long()
8486

8587
# Make sure input sequence string is also trimmed
86-
if not seq_len is None:
87-
features['sequence'] = features['sequence'][:seq_len]
88+
if seq_len is not None:
89+
features["sequence"] = features["sequence"][:seq_len]
8890

8991
features["aatype"] = features["aatype"].argmax(dim=-1)
9092
return features
@@ -93,11 +95,11 @@ def __call__(self, pdb_str: str, seq_len: int, pdb_id: T.Optional[str] = None):
9395
with warnings.catch_warnings():
9496
warnings.simplefilter("ignore")
9597
features = self._openfold_features_from_pdb(pdb_str, pdb_id)
96-
98+
9799
features = self._process_structure_features(features, seq_len)
98100
features = atom37_to_frames(features)
99101
features = get_backbone_frames(features)
100102
features = make_atom14_masks(features)
101103
features = make_atom14_positions(features)
102104

103-
return features
105+
return features

0 commit comments

Comments
 (0)