Skip to content

Commit

Permalink
Reduce lint errors
Browse files Browse the repository at this point in the history
  • Loading branch information
terryyin committed Jan 31, 2025
1 parent 0d7dffc commit c21e296
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 26 deletions.
4 changes: 3 additions & 1 deletion lizard.py
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,9 @@ def analyze_source_code(self, filename, code):
for _ in reader(tokens, reader):
pass
except RecursionError as e:
sys.stderr.write("[skip] fail to process '%s' with RecursionError - %s\n" % (filename, e))
sys.stderr.write(
"[skip] fail to process '%s' with RecursionError - %s\n" %
(filename, e))
return context.fileinfo


Expand Down
2 changes: 1 addition & 1 deletion lizard_languages/clike.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def _state_operator_next(self, token):
self.context.add_to_function_name(' ' + token)

def _state_name_with_space(self, token):
self._state = self._state_operator\
self._state = self._state_operator \
if token == 'operator' else self._state_function
self.context.add_to_function_name(token)

Expand Down
18 changes: 12 additions & 6 deletions lizard_languages/code_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,13 @@ def create_token(match):
def _generate_tokens(source, add, flags=0):
# DO NOT put any sub groups in the regex. Good for performance
_until_end = r"(?:\\\n|[^\n])*"
combined_symbols = ["<<=", ">>=", "||", "&&", "===", "!==",
"==", "!=", "<=", ">=", "->", "=>",
"++", "--", '+=', '-=',
"+", "-", '*', '/',
'*=', '/=', '^=', '&=', '|=', "..."]
combined_symbols = [
"<<=", ">>=", "||", "&&", "===", "!==",
"==", "!=", "<=", ">=", "->", "=>",
"++", "--", '+=', '-=',
"+", "-", '*', '/',
'*=', '/=', '^=', '&=', '|=', "..."
]
token_pattern = re.compile(
r"(?:" +
r"\/\*.*?\*\/" +
Expand Down Expand Up @@ -171,8 +173,12 @@ def _generate_tokens(source, add, flags=0):
pattern = re.compile(r'\(\?[aiLmsux]+\)')
re_flags = ''.join(opt[2:-1] for opt in pattern.findall(addition))
flags = reduce(or_, (flag_dict[flag] for flag in re_flags), 0)
cleaned_addition = pattern.sub('', addition)

return _generate_tokens(source_code, pattern.sub('', addition), flags=flags)
return _generate_tokens(
source_code,
cleaned_addition,
flags=flags)

def __call__(self, tokens, reader):
self.context = reader.context
Expand Down
16 changes: 10 additions & 6 deletions lizard_languages/erlang.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,12 @@ def get_comment_from_token(token):

@staticmethod
def generate_tokens(source_code, addition='', token_class=None):
return map(lambda x: x[1], filter(lambda x: x[0] != py_token.Whitespace,
lex(source_code, lexer=lexers.get_lexer_by_name('erlang'))))
lexer = lexers.get_lexer_by_name('erlang')
tokens = lex(source_code, lexer=lexer)
return map(
lambda x: x[1],
filter(lambda x: x[0] != py_token.Whitespace, tokens)
)


class ErlangStates(CodeStateMachine):
Expand Down Expand Up @@ -77,8 +81,8 @@ def _state_end_of_params(self, token):
if token == '-':
self.punctuated = True
elif token == '>' and self.punctuated:
if len(self.context.stacked_functions) <= 1 or \
self.context.current_function.name == 'fun':
if (len(self.context.stacked_functions) <= 1 or
self.context.current_function.name == 'fun'):
self.next(self._state_func_first_line, token)
else:
self.func_match_failed(token)
Expand All @@ -93,8 +97,8 @@ def callback():

def _state_nested_end(self, token):
if token == '.' or token == ',':
if len(self.context.stacked_functions) > 1 \
and self.context.stacked_functions[-1].name == 'fun':
if (len(self.context.stacked_functions) > 1 and
self.context.stacked_functions[-1].name == 'fun'):
self.statemachine_return()
return

Expand Down
15 changes: 8 additions & 7 deletions lizard_languages/fortran.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from .code_reader import CodeStateMachine, CodeReader



class FortranCommentsMixin:
@staticmethod
def get_comment_from_token(token):
Expand All @@ -26,8 +27,8 @@ class FortranReader(CodeReader, FortranCommentsMixin):
'if', 'do', '.and.', '.or.', 'case'
}
_blocks = [
'PROGRAM', 'MODULE', 'SUBMODULE', 'SUBROUTINE', 'FUNCTION', 'TYPE', 'INTERFACE', 'BLOCK',
'IF', 'DO', 'FORALL', 'WHERE', 'SELECT', 'ASSOCIATE'
'PROGRAM', 'MODULE', 'SUBMODULE', 'SUBROUTINE', 'FUNCTION', 'TYPE',
'INTERFACE', 'BLOCK', 'IF', 'DO', 'FORALL', 'WHERE', 'SELECT', 'ASSOCIATE'
]

def __init__(self, context):
Expand All @@ -52,7 +53,8 @@ def generate_tokens(source_code, addition='', token_class=None):
r'MODULE\s+PROCEDURE|'
+ block_endings + addition
)
return CodeReader.generate_tokens(source_code, addition=addition, token_class=token_class)
return CodeReader.generate_tokens(
source_code, addition=addition, token_class=token_class)

def preprocess(self, tokens):
macro_depth = 0
Expand Down Expand Up @@ -80,7 +82,8 @@ def preprocess(self, tokens):


class FortranStates(CodeStateMachine):
_ends = re.compile('|'.join(r'END\s*{0}'.format(_) for _ in FortranReader._blocks), re.I)
_ends = re.compile(
'|'.join(r'END\s*{0}'.format(_) for _ in FortranReader._blocks), re.I)

# Define token groups to eliminate duplication
IGNORE_NEXT_TOKENS = {'%', '::', 'SAVE', 'DATA'}
Expand Down Expand Up @@ -253,9 +256,7 @@ def _if_then(self, token):
self.reset_state(token)

def _module_or_procedure(self, token):
token_upper = token.upper()
if token_upper == 'PROCEDURE':
if token.upper() == 'PROCEDURE':
self._state = self._procedure
else:
self._state = self._module
self._module(token)
14 changes: 9 additions & 5 deletions lizard_languages/python.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,10 @@ class PythonReader(CodeReader, ScriptLanguageMixIn):

ext = ['py']
language_names = ['python']
_conditions = set(['if', 'for', 'while', 'and', 'or',
'elif', 'except', 'finally'])
_conditions = set([
'if', 'for', 'while', 'and', 'or',
'elif', 'except', 'finally'
])

def __init__(self, context):
super(PythonReader, self).__init__(context)
Expand All @@ -39,8 +41,9 @@ def __init__(self, context):
@staticmethod
def generate_tokens(source_code, addition='', token_class=None):
return ScriptLanguageMixIn.generate_common_tokens(
source_code,
r"|\'\'\'.*?\'\'\'" + r'|\"\"\".*?\"\"\"', token_class)
source_code,
r"|\'\'\'.*?\'\'\'" + r'|\"\"\".*?\"\"\"',
token_class)

def preprocess(self, tokens):
indents = PythonIndents(self.context)
Expand All @@ -54,7 +57,8 @@ def preprocess(self, tokens):
else:
if not token.startswith('#'):
current_function = self.context.current_function
if current_function.name == '*global*' or current_function.long_name.endswith(')'):
if (current_function.name == '*global*' or
current_function.long_name.endswith(')')):
indents.set_nesting(current_leading_spaces, token)
reading_leading_space = False
else:
Expand Down

0 comments on commit c21e296

Please sign in to comment.