Compare commits

..

5 Commits

Author SHA1 Message Date
Zanie
35cc48a64c Add stubs for type params and type aliases 2023-07-17 19:06:16 -05:00
Zanie
0d4f1d86ad Format 2023-07-17 18:06:24 -05:00
Zanie
834910947e Update parser pin in fuzzer; fix lockfiles 2023-07-17 18:04:48 -05:00
Zanie
e34cfeb475 WIP: Add support for TypeAlias and TypeParam 2023-07-17 17:52:59 -05:00
Zanie
bfaa1f9530 Bump RustPython-Parser to include PEP-695
126652b684
2023-07-17 17:52:06 -05:00
661 changed files with 18015 additions and 28496 deletions

View File

@@ -2,14 +2,6 @@ name: Benchmark
on:
pull_request:
paths:
- 'Cargo.toml'
- 'Cargo.lock'
- 'rust-toolchain'
- 'crates/**'
- '!crates/ruff_dev'
- '!crates/ruff_shrinking'
workflow_dispatch:
concurrency:

View File

@@ -19,39 +19,6 @@ env:
PYTHON_VERSION: "3.11" # to build abi3 wheels
jobs:
determine_changes:
name: "Determine changes"
runs-on: ubuntu-latest
outputs:
linter: ${{ steps.changed.outputs.linter_any_changed }}
formatter: ${{ steps.changed.outputs.formatter_any_changed }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: tj-actions/changed-files@v37
id: changed
with:
files_yaml: |
linter:
- Cargo.toml
- Cargo.lock
- crates/**
- "!crates/ruff_python_formatter/**"
- "!crates/ruff_formatter/**"
- "!crates/ruff_dev/**"
- "!crates/ruff_shrinking/**"
formatter:
- Cargo.toml
- Cargo.lock
- crates/ruff_python_formatter/**
- crates/ruff_formatter/**
- crates/ruff_python_trivia/**
- crates/ruff_python_ast/**
cargo-fmt:
name: "cargo fmt"
runs-on: ubuntu-latest
@@ -86,12 +53,10 @@ jobs:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo insta"
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- run: pip install black[d]==23.1.0
- uses: Swatinem/rust-cache@v2
# cargo insta 1.30.0 fails for some reason (https://github.com/mitsuhiko/insta/issues/392)
- run: cargo install cargo-insta@=1.29.0
- run: pip install black[d]==23.1.0
- name: "Run tests (Ubuntu)"
if: ${{ matrix.os == 'ubuntu-latest' }}
run: cargo insta test --all --all-features --unreferenced reject
@@ -170,11 +135,9 @@ jobs:
ecosystem:
name: "ecosystem"
runs-on: ubuntu-latest
needs:
- cargo-test
- determine_changes
needs: cargo-test
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
if: github.event_name == 'pull_request' && needs.determine_changes.outputs.linter == 'true'
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
@@ -319,23 +282,16 @@ jobs:
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.generated.yml
check-formatter-ecosystem:
name: "Formatter ecosystem and progress checks"
check-formatter-stability:
name: "Check formatter stability"
runs-on: ubuntu-latest
needs: determine_changes
if: needs.determine_changes.outputs.formatter == 'true'
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- name: "Cache rust"
uses: Swatinem/rust-cache@v2
- name: "Formatter progress"
run: scripts/formatter_progress.sh
- name: "Github step summary"
run: grep "similarity index" target/progress_projects_report.txt | sort > $GITHUB_STEP_SUMMARY
# CPython is not black formatted, so we run only the stability check
- name: "Clone CPython 3.10"
run: git clone --branch 3.10 --depth 1 https://github.com/python/cpython.git crates/ruff/resources/test/cpython
- name: "Check CPython stability"
- name: "Check stability"
run: cargo run --bin ruff_dev -- format-dev --stability-check crates/ruff/resources/test/cpython

View File

@@ -71,7 +71,7 @@ pipx install pre-commit # or `pip install pre-commit` if you have a virtualenv
### Development
After cloning the repository, run Ruff locally from the repository root with:
After cloning the repository, run Ruff locally with:
```shell
cargo run -p ruff_cli -- check /path/to/file.py --no-cache
@@ -133,8 +133,8 @@ At time of writing, the repository includes the following crates:
refer to?"
- `crates/ruff_python_stdlib`: library crate containing Python-specific standard library data, e.g.
the names of all built-in exceptions and which standard library types are immutable.
- `crates/ruff_python_trivia`: library crate containing Python-specific trivia utilities (e.g.,
for analyzing indentation, newlines, etc.).
- `crates/ruff_python_whitespace`: library crate containing Python-specific whitespace analysis
logic (indentation and newlines).
- `crates/ruff_rustpython`: library crate containing `RustPython`-specific utilities.
- `crates/ruff_textwrap`: library crate to indent and dedent Python source code.
- `crates/ruff_wasm`: library crate for exposing Ruff as a WebAssembly module. Powers the
@@ -156,13 +156,10 @@ At a high level, the steps involved in adding a new lint rule are as follows:
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
an `ast::StmtAssert` node).
1. Define the logic for invoking the diagnostic in `crates/ruff/src/checkers/ast/analyze` (for
AST-based rules), `crates/ruff/src/checkers/tokens.rs` (for token-based rules),
`crates/ruff/src/checkers/physical_lines.rs` (for text-based rules),
`crates/ruff/src/checkers/filesystem.rs` (for filesystem-based rules), etc. For AST-based rules,
you'll likely want to modify `analyze/statement.rs` (if your rule is based on analyzing
statements, like imports) or `analyze/expression.rs` (if your rule is based on analyzing
expressions, like function calls).
1. Define the logic for triggering the violation in `crates/ruff/src/checkers/ast/mod.rs` (for
AST-based checks), `crates/ruff/src/checkers/tokens.rs` (for token-based checks),
`crates/ruff/src/checkers/lines.rs` (for text-based checks), or
`crates/ruff/src/checkers/filesystem.rs` (for filesystem-based checks).
1. Map the violation struct to a rule code in `crates/ruff/src/codes.rs` (e.g., `B011`).

432
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -21,11 +21,12 @@ filetime = { version = "0.2.20" }
glob = { version = "0.3.1" }
globset = { version = "0.4.10" }
ignore = { version = "0.4.20" }
insta = { version = "1.31.0", feature = ["filters", "glob"] }
insta = { version = "1.30.0" }
is-macro = { version = "0.2.2" }
itertools = { version = "0.10.5" }
log = { version = "0.4.17" }
memchr = "2.5.0"
nohash-hasher = { version = "0.2.0" }
num-bigint = { version = "0.4.3" }
num-traits = { version = "0.2.15" }
once_cell = { version = "1.17.1" }
@@ -51,11 +52,14 @@ wsl = { version = "0.1.0" }
# v1.0.1
libcst = { git = "https://github.com/Instagram/LibCST.git", rev = "3cacca1a1029f05707e50703b49fe3dd860aa839", default-features = false }
ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "4d03b9b5b212fc869e4cfda151414438186a7779" }
rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "4d03b9b5b212fc869e4cfda151414438186a7779" , default-features = false, features = ["num-bigint"]}
rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "4d03b9b5b212fc869e4cfda151414438186a7779", default-features = false, features = ["num-bigint"] }
rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "4d03b9b5b212fc869e4cfda151414438186a7779", default-features = false }
rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "4d03b9b5b212fc869e4cfda151414438186a7779" , default-features = false, features = ["full-lexer", "num-bigint"] }
# Please tag the RustPython version every time you update its revision here and in fuzz/Cargo.toml
# Tagging the version ensures that older ruff versions continue to build from source even when we rebase our RustPython fork.
# Note: As of tag v0.0.8 we are cherry-picking commits instead of rebasing so the tag is not necessary
ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34" }
rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34" , default-features = false, features = ["num-bigint"]}
rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34", default-features = false, features = ["num-bigint"] }
rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34", default-features = false }
rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34" , default-features = false, features = ["full-lexer", "num-bigint"] }
[profile.release]
lto = "fat"

View File

@@ -140,7 +140,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.280
rev: v0.0.278
hooks:
- id: ruff
```
@@ -397,7 +397,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- [PyTorch](https://github.com/pytorch/pytorch)
- [Pydantic](https://github.com/pydantic/pydantic)
- [Pylint](https://github.com/PyCQA/pylint)
- [Reflex](https://github.com/reflex-dev/reflex)
- [Pynecone](https://github.com/pynecone-io/pynecone)
- [Robyn](https://github.com/sansyrox/robyn)
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))

View File

@@ -1,6 +1,6 @@
[package]
name = "flake8-to-ruff"
version = "0.0.280"
version = "0.0.278"
description = """
Convert Flake8 configuration files to Ruff configuration files.
"""

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.0.280"
version = "0.0.278"
publish = false
authors = { workspace = true }
edition = { workspace = true }
@@ -19,7 +19,7 @@ ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_index = { path = "../ruff_index" }
ruff_macros = { path = "../ruff_macros" }
ruff_python_trivia = { path = "../ruff_python_trivia" }
ruff_python_whitespace = { path = "../ruff_python_whitespace" }
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
ruff_python_semantic = { path = "../ruff_python_semantic" }
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
@@ -45,6 +45,7 @@ libcst = { workspace = true }
log = { workspace = true }
memchr = { workspace = true }
natord = { version = "1.0.9" }
nohash-hasher = { workspace = true }
num-bigint = { workspace = true }
num-traits = { workspace = true }
once_cell = { workspace = true }
@@ -54,6 +55,7 @@ path-absolutize = { workspace = true, features = [
] }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.3.1", features = ["serde"] }
phf = { version = "0.11", features = ["macros"] }
pyproject-toml = { version = "0.6.0" }
quick-junit = { version = "0.3.2" }
regex = { workspace = true }
@@ -84,7 +86,6 @@ pretty_assertions = "1.3.0"
test-case = { workspace = true }
# Disable colored output in tests
colored = { workspace = true, features = ["no-color"] }
tempfile = "3.6.0"
[features]
default = []

View File

@@ -1,6 +1,6 @@
"""
Should emit:
B002 - on lines 18, 19, and 24
B002 - on lines 15 and 20
"""
@@ -8,17 +8,13 @@ def this_is_all_fine(n):
x = n + 1
y = 1 + n
z = +x + y
a = n - 1
b = 1 - n
c = -a - b
return +z, -c
return +z
def this_is_buggy(n):
x = ++n
y = --n
return x, y
return x
def this_is_buggy_too(n):
return ++n, --n
return ++n

View File

@@ -97,10 +97,3 @@ def f():
# variable name).
for line_ in range(self.header_lines):
fp.readline()
# Regression test: visitor didn't walk the elif test
for key, value in current_crawler_tags.items():
if key:
pass
elif wanted_tag_value != value:
pass

View File

@@ -17,37 +17,3 @@ from typing import TypedDict
class MyClass(TypedDict):
id: int
from threading import Event
class CustomEvent(Event):
def set(self) -> None:
...
def str(self) -> None:
...
from logging import Filter, LogRecord
class CustomFilter(Filter):
def filter(self, record: LogRecord) -> bool:
...
def str(self) -> None:
...
from typing_extensions import override
class MyClass:
@override
def str(self):
pass
def int(self):
pass

View File

@@ -1,2 +0,0 @@
#!/usr/bin/env python

View File

@@ -50,22 +50,3 @@ _ = """a""" "b"
_ = 'a' "b"
_ = rf"a" rf"b"
# Single-line explicit concatenation should be ignored.
_ = "abc" + "def" + "ghi"
_ = foo + "abc" + "def"
_ = "abc" + foo + "def"
_ = "abc" + "def" + foo
_ = foo + bar + "abc"
_ = "abc" + foo + bar
_ = foo + "abc" + bar
_ = (
a + f"abc" +
"def"
)
_ = (
f"abc" +
"def" + a
)

View File

@@ -1,10 +1,5 @@
import logging
from distutils import log
from logging_setup import logger
logging.warn("Hello World!")
log.warn("Hello world!") # This shouldn't be considered as a logger candidate
logger.warn("Hello world!")
logging . warn("Hello World!")

View File

@@ -1,14 +0,0 @@
var: int
a = var # OK
b = c = int # OK
a.b = int # OK
d, e = int, str # OK
f, g, h = int, str, TypeVar("T") # OK
i: TypeAlias = int | str # OK
j: TypeAlias = int # OK

View File

@@ -1,14 +0,0 @@
var: int
a = var # OK
b = c = int # PYI017
a.b = int # PYI017
d, e = int, str # PYI017
f, g, h = int, str, TypeVar("T") # PYI017
i: TypeAlias = int | str # OK
j: TypeAlias = int # OK

View File

@@ -1,19 +0,0 @@
import typing
from typing import TypeAlias, Literal, Any
NewAny = Any
OptionalStr = typing.Optional[str]
Foo = Literal["foo"]
IntOrStr = int | str
AliasNone = None
NewAny: typing.TypeAlias = Any
OptionalStr: TypeAlias = typing.Optional[str]
Foo: typing.TypeAlias = Literal["foo"]
IntOrStr: TypeAlias = int | str
IntOrFloat: Foo = int | float
AliasNone: typing.TypeAlias = None
# these are ok
VarAlias = str
AliasFoo = Foo

View File

@@ -1,18 +0,0 @@
from typing import Literal, Any
NewAny = Any
OptionalStr = typing.Optional[str]
Foo = Literal["foo"]
IntOrStr = int | str
AliasNone = None
NewAny: typing.TypeAlias = Any
OptionalStr: TypeAlias = typing.Optional[str]
Foo: typing.TypeAlias = Literal["foo"]
IntOrStr: TypeAlias = int | str
IntOrFloat: Foo = int | float
AliasNone: typing.TypeAlias = None
# these are ok
VarAlias = str
AliasFoo = Foo

View File

@@ -1,12 +0,0 @@
__all__ = ["A", "B", "C"]
# Errors
__all__.append("D")
__all__.extend(["E", "Foo"])
__all__.remove("A")
# OK
__all__ += ["D"]
foo = ["Hello"]
foo.append("World")
foo.bar.append("World")

View File

@@ -1,12 +0,0 @@
__all__ = ["A", "B", "C"]
# Errors
__all__.append("D")
__all__.extend(["E", "Foo"])
__all__.remove("A")
# OK
__all__ += ["D"]
foo = ["Hello"]
foo.append("World")
foo.bar.append("World")

View File

@@ -100,14 +100,6 @@ if node.module0123456789:
):
print("Bad module!")
# SIM102
# Regression test for https://github.com/apache/airflow/blob/145b16caaa43f0c42bffd97344df916c602cddde/airflow/configuration.py#L1161
if a:
if b:
if c:
print("if")
elif d:
print("elif")
# OK
if a:

View File

@@ -14,12 +14,6 @@ try:
except (ValueError, OSError):
pass
# SIM105
try:
foo()
except (ValueError, OSError) as e:
pass
# SIM105
try:
foo()
@@ -100,13 +94,3 @@ def with_comment():
foo()
except (ValueError, OSError):
pass # Trailing comment.
try:
print()
except ("not", "an", "exception"):
pass
try:
print()
except "not an exception":
pass

View File

@@ -23,7 +23,7 @@ elif a:
else:
b = 2
# SIM108
# OK (false negative)
if True:
pass
else:

View File

@@ -94,23 +94,3 @@ if result.eofs == "F":
errors = 1
else:
errors = 1
if a:
# Ignore branches with diverging comments because it means we're repeating
# the bodies because we have different reasons for each branch
x = 1
elif c:
x = 1
def foo():
a = True
b = False
if a > b: # end-of-line
return 3
elif a == b:
return 3
elif a < b: # end-of-line
return 4
elif b is None:
return 4

View File

@@ -84,15 +84,3 @@ elif func_name == "remove":
return "D"
elif func_name == "move":
return "MV"
# OK
def no_return_in_else(platform):
if platform == "linux":
return "auditwheel repair -w {dest_dir} {wheel}"
elif platform == "macos":
return "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}"
elif platform == "windows":
return ""
else:
msg = f"Unknown platform: {platform!r}"
raise ValueError(msg)

View File

@@ -1,19 +1,11 @@
key in obj.keys() # SIM118
key not in obj.keys() # SIM118
foo["bar"] in obj.keys() # SIM118
foo["bar"] not in obj.keys() # SIM118
foo['bar'] in obj.keys() # SIM118
foo['bar'] not in obj.keys() # SIM118
foo() in obj.keys() # SIM118
foo() not in obj.keys() # SIM118
for key in obj.keys(): # SIM118
pass

View File

@@ -38,15 +38,6 @@ if key in a_dict:
else:
vars[idx] = "defaultß9💣26789ß9💣26789ß9💣26789ß9💣26789ß9💣26789"
# SIM401
if foo():
pass
else:
if key in a_dict:
vars[idx] = a_dict[key]
else:
vars[idx] = "default"
###
# Negative cases
###
@@ -114,3 +105,12 @@ elif key in a_dict:
vars[idx] = a_dict[key]
else:
vars[idx] = "default"
# OK (false negative for nested else)
if foo():
pass
else:
if key in a_dict:
vars[idx] = a_dict[key]
else:
vars[idx] = "default"

View File

@@ -1,14 +0,0 @@
from pathlib import Path, PurePath
from pathlib import Path as pth
# match
_ = Path(".")
_ = pth(".")
_ = PurePath(".")
# no match
_ = Path()
print(".")
Path("file.txt")
Path(".", "folder")
PurePath(".", "folder")

View File

@@ -1,14 +0,0 @@
import os.path
from pathlib import Path
from os.path import getsize
os.path.getsize("filename")
os.path.getsize(b"filename")
os.path.getsize(Path("filename"))
os.path.getsize(__file__)
getsize("filename")
getsize(b"filename")
getsize(Path("filename"))
getsize(__file__)

View File

@@ -1,12 +0,0 @@
import os.path
from pathlib import Path
from os.path import getatime
os.path.getatime("filename")
os.path.getatime(b"filename")
os.path.getatime(Path("filename"))
getatime("filename")
getatime(b"filename")
getatime(Path("filename"))

View File

@@ -1,13 +0,0 @@
import os.path
from pathlib import Path
from os.path import getmtime
os.path.getmtime("filename")
os.path.getmtime(b"filename")
os.path.getmtime(Path("filename"))
getmtime("filename")
getmtime(b"filename")
getmtime(Path("filename"))

View File

@@ -1,12 +0,0 @@
import os.path
from pathlib import Path
from os.path import getctime
os.path.getctime("filename")
os.path.getctime(b"filename")
os.path.getctime(Path("filename"))
getctime("filename")
getctime(b"filename")
getctime(Path("filename"))

View File

@@ -1,20 +0,0 @@
import os
from os import sep
file_name = "foo/bar"
# PTH206
"foo/bar/".split(os.sep)
"foo/bar/".split(sep=os.sep)
"foo/bar/".split(os.sep)[-1]
"foo/bar/".split(os.sep)[-2]
"foo/bar/".split(os.sep)[-2:]
"fizz/buzz".split(sep)
"fizz/buzz".split(sep)[-1]
os.path.splitext("path/to/hello_world.py")[0].split(os.sep)[-1]
file_name.split(os.sep)
(os.path.abspath(file_name)).split(os.sep)
# OK
"foo/bar/".split("/")

View File

@@ -2,7 +2,6 @@ import os
import os.path
p = "/foo"
q = "bar"
a = os.path.abspath(p)
aa = os.chmod(p)
@@ -22,9 +21,7 @@ bbbbb = os.path.islink(p)
os.readlink(p)
os.stat(p)
os.path.isabs(p)
os.path.join(p, q)
os.sep.join([p, q])
os.sep.join((p, q))
os.path.join(p)
os.path.basename(p)
os.path.dirname(p)
os.path.samefile(p)

View File

@@ -2,7 +2,6 @@ import os as foo
import os.path as foo_p
p = "/foo"
q = "bar"
a = foo_p.abspath(p)
aa = foo.chmod(p)
@@ -22,9 +21,7 @@ bbbbb = foo_p.islink(p)
foo.readlink(p)
foo.stat(p)
foo_p.isabs(p)
foo_p.join(p, q)
foo.sep.join([p, q])
foo.sep.join((p, q))
foo_p.join(p)
foo_p.basename(p)
foo_p.dirname(p)
foo_p.samefile(p)

View File

@@ -1,10 +1,9 @@
from os import chmod, mkdir, makedirs, rename, replace, rmdir, sep
from os import chmod, mkdir, makedirs, rename, replace, rmdir
from os import remove, unlink, getcwd, readlink, stat
from os.path import abspath, exists, expanduser, isdir, isfile, islink
from os.path import isabs, join, basename, dirname, samefile, splitext
p = "/foo"
q = "bar"
a = abspath(p)
aa = chmod(p)
@@ -24,9 +23,7 @@ bbbbb = islink(p)
readlink(p)
stat(p)
isabs(p)
join(p, q)
sep.join((p, q))
sep.join([p, q])
join(p)
basename(p)
dirname(p)
samefile(p)

View File

@@ -1,4 +1,4 @@
from os import chmod as xchmod, mkdir as xmkdir, sep as s
from os import chmod as xchmod, mkdir as xmkdir
from os import makedirs as xmakedirs, rename as xrename, replace as xreplace
from os import rmdir as xrmdir, remove as xremove, unlink as xunlink
from os import getcwd as xgetcwd, readlink as xreadlink, stat as xstat
@@ -9,7 +9,6 @@ from os.path import join as xjoin, basename as xbasename, dirname as xdirname
from os.path import samefile as xsamefile, splitext as xsplitext
p = "/foo"
q = "bar"
a = xabspath(p)
aa = xchmod(p)
@@ -29,9 +28,7 @@ bbbbb = xislink(p)
xreadlink(p)
xstat(p)
xisabs(p)
xjoin(p, q)
s.join((p, q))
s.join([p, q])
xjoin(p)
xbasename(p)
xdirname(p)
xsamefile(p)

View File

@@ -1,7 +0,0 @@
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from setuptools.command.sdist import sdist as _sdist
from distutils.command.sdist import sdist as _sdist

View File

@@ -1,7 +0,0 @@
match 1:
case 1:
import sys
import os
case 2:
import collections
import abc

View File

@@ -41,5 +41,3 @@ regex = '\w' # noqa
regex = '''
\w
''' # noqa
regex = '\\\_'

View File

@@ -23,5 +23,3 @@ a = []
'%s %s' % (*a,)
k = {}
'%(k)s' % {**k}
'%s' % [1, 2, 3]
'%s' % {1, 2, 3}

View File

@@ -1,11 +1,6 @@
if (1, 2):
pass
if (3, 4):
pass
elif foo:
pass
for _ in range(5):
if True:
pass

View File

@@ -1,15 +0,0 @@
# Regression test for branch detection from
# https://github.com/pypa/build/blob/5800521541e5e749d4429617420d1ef8cdb40b46/src/build/_importlib.py
import sys
if sys.version_info < (3, 8):
import importlib_metadata as metadata
elif sys.version_info < (3, 9, 10) or (3, 10, 0) <= sys.version_info < (3, 10, 2):
try:
import importlib_metadata as metadata
except ModuleNotFoundError:
from importlib import metadata
else:
from importlib import metadata
__all__ = ["metadata"]

View File

@@ -1,6 +0,0 @@
class Class:
def func(self):
pass
def func(self):
pass

View File

@@ -1,11 +0,0 @@
"""Test case: `Literal` with `__future__` annotations."""
from __future__ import annotations
from typing import Literal, Final
from typing_extensions import assert_type
CONSTANT: Final = "ns"
assert_type(CONSTANT, Literal["ns"])

View File

@@ -25,41 +25,3 @@ def dec(x):
def f():
dec = 1
return dec
class Class:
def f(self):
print(my_var)
my_var = 1
class Class:
my_var = 0
def f(self):
print(my_var)
my_var = 1
import sys
def main():
print(sys.argv)
try:
3 / 0
except ZeroDivisionError:
import sys
sys.exit(1)
import sys
def main():
print(sys.argv)
for sys in range(5):
pass

View File

@@ -1,7 +1,6 @@
x = 1 # type: ignore
x = 1 # type:ignore
x = 1 # type: ignore[attr-defined] # type: ignore
x = 1 # type: ignoreme # type: ignore
x = 1
x = 1 # type ignore

View File

@@ -47,17 +47,3 @@ def not_ok1():
pass
else:
pass
# Regression test for https://github.com/apache/airflow/blob/f1e1cdcc3b2826e68ba133f350300b5065bbca33/airflow/models/dag.py#L1737
def not_ok2():
if True:
print(1)
elif True:
print(2)
else:
if True:
print(3)
else:
print(4)

View File

@@ -1,41 +0,0 @@
foo = 1
bar = 2
baz = 3
# Errors.
foo = foo
bar = bar
foo, bar = foo, bar
bar, foo = bar, foo
(foo, bar) = (foo, bar)
(bar, foo) = (bar, foo)
foo, (bar, baz) = foo, (bar, baz)
bar, (foo, baz) = bar, (foo, baz)
(foo, bar), baz = (foo, bar), baz
(foo, (bar, baz)) = (foo, (bar, baz))
foo, bar = foo, 1
bar, foo = bar, 1
(foo, bar) = (foo, 1)
(bar, foo) = (bar, 1)
foo, (bar, baz) = foo, (bar, 1)
bar, (foo, baz) = bar, (foo, 1)
(foo, bar), baz = (foo, bar), 1
(foo, (bar, baz)) = (foo, (bar, 1))
foo: int = foo
bar: int = bar
# Non-errors.
foo = bar
bar = foo
foo, bar = bar, foo
foo, bar = bar, foo
(foo, bar) = (bar, foo)
foo, bar = bar, 1
bar, foo = foo, 1
foo: int = bar
bar: int = 1
class Foo:
foo = foo
bar = bar

View File

@@ -1,18 +0,0 @@
import subprocess
def foo():
pass
# Errors.
subprocess.Popen(preexec_fn=foo)
subprocess.Popen(["ls"], preexec_fn=foo)
subprocess.Popen(preexec_fn=lambda: print("Hello, world!"))
subprocess.Popen(["ls"], preexec_fn=lambda: print("Hello, world!"))
# Non-errors.
subprocess.Popen()
subprocess.Popen(["ls"])
subprocess.Popen(preexec_fn=None) # None is the default.
subprocess.Popen(["ls"], preexec_fn=None) # None is the default.

View File

@@ -15,22 +15,7 @@ bytes("foo", **a)
bytes(b"foo"
b"bar")
bytes("foo")
bytes(1)
f"{f'{str()}'}"
int(1.0)
int("1")
int(b"11")
int(10, base=2)
int("10", base=2)
int("10", 2)
float("1.0")
float(b"1.0")
bool(1)
bool(0)
bool("foo")
bool("")
bool(b"")
bool(1.0)
# These become string or byte literals
str()
@@ -42,10 +27,3 @@ bytes(b"foo")
bytes(b"""
foo""")
f"{str()}"
int()
int(1)
float()
float(1.0)
bool()
bool(True)
bool(False)

View File

@@ -62,16 +62,6 @@ print("foo {} ".format(x))
1111111111111111111111111111111111111111111111111111111111111111111111111,
)
"""
{}
""".format(1)
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = """
{}
""".format(
111111
)
###
# Non-errors
###
@@ -109,37 +99,6 @@ r'"\N{snowman} {}".format(a)'
11111111111111111111111111111111111111111111111111111111111111111111111111,
)
"""
{}
{}
{}
""".format(
1,
2,
111111111111111111111111111111111111111111111111111111111111111111111111111111111111111,
)
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = """{}
""".format(
111111
)
"{}".format(
[
1,
2,
3,
]
)
"{a}".format(
a=[
1,
2,
3,
]
)
async def c():
return "{}".format(await 3)

View File

@@ -7,20 +7,20 @@ if True:
if True:
if foo:
print()
pass
elif sys.version_info < (3, 3):
cmd = [sys.executable, "-m", "test.regrtest"]
if True:
if foo:
print()
pass
elif sys.version_info < (3, 3):
cmd = [sys.executable, "-m", "test.regrtest"]
elif foo:
cmd = [sys.executable, "-m", "test", "-j0"]
if foo:
print()
pass
elif sys.version_info < (3, 3):
cmd = [sys.executable, "-m", "test.regrtest"]
@@ -28,7 +28,7 @@ if True:
cmd = [sys.executable, "-m", "test.regrtest"]
if foo:
print()
pass
elif sys.version_info < (3, 3):
cmd = [sys.executable, "-m", "test.regrtest"]
else:

View File

@@ -34,23 +34,11 @@ list(x)[::]
[i for i in x][::2]
[i for i in x][::]
# RUF015 (doesn't mirror the underlying list)
# OK (doesn't mirror the underlying list)
[i + 1 for i in x][0]
[i for i in x if i > 5][0]
[(i, i + 1) for i in x][0]
# RUF015 (multiple generators)
# OK (multiple generators)
y = range(10)
[i + j for i in x for j in y][0]
# RUF015
list(range(10))[0]
list(x.y)[0]
list(x["y"])[0]
# RUF015 (multi-line)
revision_heads_map_ast = [
a
for a in revision_heads_map_ast_obj.body
if isinstance(a, ast.Assign) and a.targets[0].id == "REVISION_HEADS_MAP"
][0]

View File

@@ -230,15 +230,6 @@ def incorrect_multi_conditional(arg1, arg2):
raise Exception("...") # should be typeerror
def multiple_is_instance_checks(some_arg):
if isinstance(some_arg, str):
pass
elif isinstance(some_arg, int):
pass
else:
raise Exception("...") # should be typeerror
class MyCustomTypeValidation(Exception):
pass
@@ -305,17 +296,6 @@ def multiple_ifs(some_args):
pass
def else_body(obj):
if isinstance(obj, datetime.timedelta):
return "TimeDelta"
elif isinstance(obj, relativedelta.relativedelta):
return "RelativeDelta"
elif isinstance(obj, CronExpression):
return "CronExpression"
else:
raise Exception(f"Unknown object type: {obj.__class__.__name__}")
def early_return():
if isinstance(this, some_type):
if x in this:

View File

@@ -7,7 +7,7 @@ use rustpython_parser::{lexer, Mode};
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use ruff_python_trivia::{is_python_whitespace, NewlineWithTrailingNewline, PythonWhitespace};
use ruff_python_whitespace::{is_python_whitespace, NewlineWithTrailingNewline, PythonWhitespace};
use crate::autofix::codemods;
@@ -190,24 +190,12 @@ fn is_lone_child(child: &Stmt, parent: &Stmt) -> bool {
}
Stmt::For(ast::StmtFor { body, orelse, .. })
| Stmt::AsyncFor(ast::StmtAsyncFor { body, orelse, .. })
| Stmt::While(ast::StmtWhile { body, orelse, .. }) => {
| Stmt::While(ast::StmtWhile { body, orelse, .. })
| Stmt::If(ast::StmtIf { body, orelse, .. }) => {
if is_only(body, child) || is_only(orelse, child) {
return true;
}
}
Stmt::If(ast::StmtIf {
body,
elif_else_clauses,
..
}) => {
if is_only(body, child)
|| elif_else_clauses
.iter()
.any(|ast::ElifElseClause { body, .. }| is_only(body, child))
{
return true;
}
}
Stmt::Try(ast::StmtTry {
body,
handlers,

View File

@@ -1,8 +1,9 @@
use std::collections::BTreeSet;
use itertools::Itertools;
use nohash_hasher::IntSet;
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustc_hash::{FxHashMap, FxHashSet};
use rustc_hash::FxHashMap;
use ruff_diagnostics::{Diagnostic, Edit, Fix, IsolationLevel};
use ruff_python_ast::source_code::Locator;
@@ -46,7 +47,7 @@ fn apply_fixes<'a>(
let mut output = String::with_capacity(locator.len());
let mut last_pos: Option<TextSize> = None;
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
let mut isolated: FxHashSet<u32> = FxHashSet::default();
let mut isolated: IntSet<u32> = IntSet::default();
let mut fixed = FxHashMap::default();
let mut source_map = SourceMap::default();

View File

@@ -1,27 +0,0 @@
use rustpython_parser::ast::{Arg, Ranged};
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_builtins, pep8_naming, pycodestyle};
/// Run lint rules over an [`Arg`] syntax node.
pub(crate) fn argument(arg: &Arg, checker: &mut Checker) {
if checker.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) = pycodestyle::rules::ambiguous_variable_name(&arg.arg, arg.range())
{
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::InvalidArgumentName) {
if let Some(diagnostic) = pep8_naming::rules::invalid_argument_name(
&arg.arg,
arg,
&checker.settings.pep8_naming.ignore_names,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::BuiltinArgumentShadowing) {
flake8_builtins::rules::builtin_argument_shadowing(checker, arg);
}
}

View File

@@ -1,26 +0,0 @@
use rustpython_parser::ast::Arguments;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_bugbear, flake8_pyi, ruff};
/// Run lint rules over a [`Arguments`] syntax node.
pub(crate) fn arguments(arguments: &Arguments, checker: &mut Checker) {
if checker.enabled(Rule::MutableArgumentDefault) {
flake8_bugbear::rules::mutable_argument_default(checker, arguments);
}
if checker.enabled(Rule::FunctionCallInDefaultArgument) {
flake8_bugbear::rules::function_call_in_argument_default(checker, arguments);
}
if checker.settings.rules.enabled(Rule::ImplicitOptional) {
ruff::rules::implicit_optional(checker, arguments);
}
if checker.is_stub {
if checker.enabled(Rule::TypedArgumentDefaultInStub) {
flake8_pyi::rules::typed_argument_simple_defaults(checker, arguments);
}
if checker.enabled(Rule::ArgumentDefaultInStub) {
flake8_pyi::rules::argument_simple_defaults(checker, arguments);
}
}
}

View File

@@ -1,68 +0,0 @@
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_import_conventions, flake8_pyi, pyflakes, pylint};
use ruff_diagnostics::{Diagnostic, Fix};
/// Run lint rules over the [`Binding`]s.
pub(crate) fn bindings(checker: &mut Checker) {
if !checker.any_enabled(&[
Rule::InvalidAllFormat,
Rule::InvalidAllObject,
Rule::UnaliasedCollectionsAbcSetImport,
Rule::UnconventionalImportAlias,
Rule::UnusedVariable,
]) {
return;
}
for binding in checker.semantic.bindings.iter() {
if checker.enabled(Rule::UnusedVariable) {
if binding.kind.is_bound_exception() && !binding.is_used() {
let mut diagnostic = Diagnostic::new(
pyflakes::rules::UnusedVariable {
name: binding.name(checker.locator).to_string(),
},
binding.range,
);
if checker.patch(Rule::UnusedVariable) {
diagnostic.try_set_fix(|| {
pyflakes::fixes::remove_exception_handler_assignment(
binding,
checker.locator,
)
.map(Fix::automatic)
});
}
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::InvalidAllFormat) {
if let Some(diagnostic) = pylint::rules::invalid_all_format(binding) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::InvalidAllObject) {
if let Some(diagnostic) = pylint::rules::invalid_all_object(binding) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::UnconventionalImportAlias) {
if let Some(diagnostic) = flake8_import_conventions::rules::unconventional_import_alias(
checker,
binding,
&checker.settings.flake8_import_conventions.aliases,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.is_stub {
if checker.enabled(Rule::UnaliasedCollectionsAbcSetImport) {
if let Some(diagnostic) =
flake8_pyi::rules::unaliased_collections_abc_set_import(checker, binding)
{
checker.diagnostics.push(diagnostic);
}
}
}
}
}

View File

@@ -1,16 +0,0 @@
use rustpython_parser::ast::Comprehension;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::flake8_simplify;
/// Run lint rules over a [`Comprehension`] syntax nodes.
pub(crate) fn comprehension(comprehension: &Comprehension, checker: &mut Checker) {
if checker.enabled(Rule::InDictKeys) {
flake8_simplify::rules::key_in_dict_for(
checker,
&comprehension.target,
&comprehension.iter,
);
}
}

View File

@@ -1,32 +0,0 @@
use rustpython_parser::ast::{self, Stmt};
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_bugbear, perflint};
/// Run lint rules over all deferred for-loops in the [`SemanticModel`].
pub(crate) fn deferred_for_loops(checker: &mut Checker) {
while !checker.deferred.for_loops.is_empty() {
let for_loops = std::mem::take(&mut checker.deferred.for_loops);
for snapshot in for_loops {
checker.semantic.restore(snapshot);
if let Stmt::For(ast::StmtFor {
target, iter, body, ..
})
| Stmt::AsyncFor(ast::StmtAsyncFor {
target, iter, body, ..
}) = &checker.semantic.stmt()
{
if checker.enabled(Rule::UnusedLoopControlVariable) {
flake8_bugbear::rules::unused_loop_control_variable(checker, target, body);
}
if checker.enabled(Rule::IncorrectDictIterator) {
perflint::rules::incorrect_dict_iterator(checker, target, iter);
}
} else {
unreachable!("Expected Expr::For | Expr::AsyncFor");
}
}
}
}

View File

@@ -1,287 +0,0 @@
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::cast;
use ruff_python_semantic::analyze::{branch_detection, visibility};
use ruff_python_semantic::{Binding, BindingKind, ScopeKind};
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_type_checking, flake8_unused_arguments, pyflakes, pylint};
/// Run lint rules over all deferred scopes in the [`SemanticModel`].
pub(crate) fn deferred_scopes(checker: &mut Checker) {
if !checker.any_enabled(&[
Rule::GlobalVariableNotAssigned,
Rule::ImportShadowedByLoopVar,
Rule::RedefinedWhileUnused,
Rule::RuntimeImportInTypeCheckingBlock,
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
Rule::UndefinedLocal,
Rule::UnusedAnnotation,
Rule::UnusedClassMethodArgument,
Rule::UnusedFunctionArgument,
Rule::UnusedImport,
Rule::UnusedLambdaArgument,
Rule::UnusedMethodArgument,
Rule::UnusedStaticMethodArgument,
Rule::UnusedVariable,
]) {
return;
}
// Identify any valid runtime imports. If a module is imported at runtime, and
// used at runtime, then by default, we avoid flagging any other
// imports from that model as typing-only.
let enforce_typing_imports = !checker.is_stub
&& checker.any_enabled(&[
Rule::RuntimeImportInTypeCheckingBlock,
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
]);
let runtime_imports: Vec<Vec<&Binding>> = if enforce_typing_imports {
checker
.semantic
.scopes
.iter()
.map(|scope| {
scope
.binding_ids()
.map(|binding_id| checker.semantic.binding(binding_id))
.filter(|binding| {
flake8_type_checking::helpers::is_valid_runtime_import(
binding,
&checker.semantic,
)
})
.collect()
})
.collect::<Vec<_>>()
} else {
vec![]
};
let mut diagnostics: Vec<Diagnostic> = vec![];
for scope_id in checker.deferred.scopes.iter().rev().copied() {
let scope = &checker.semantic.scopes[scope_id];
if checker.enabled(Rule::UndefinedLocal) {
pyflakes::rules::undefined_local(checker, scope_id, scope, &mut diagnostics);
}
if checker.enabled(Rule::GlobalVariableNotAssigned) {
for (name, binding_id) in scope.bindings() {
let binding = checker.semantic.binding(binding_id);
if binding.kind.is_global() {
diagnostics.push(Diagnostic::new(
pylint::rules::GlobalVariableNotAssigned {
name: (*name).to_string(),
},
binding.range,
));
}
}
}
if checker.enabled(Rule::ImportShadowedByLoopVar) {
for (name, binding_id) in scope.bindings() {
for shadow in checker.semantic.shadowed_bindings(scope_id, binding_id) {
// If the shadowing binding isn't a loop variable, abort.
let binding = &checker.semantic.bindings[shadow.binding_id()];
if !binding.kind.is_loop_var() {
continue;
}
// If the shadowed binding isn't an import, abort.
let shadowed = &checker.semantic.bindings[shadow.shadowed_id()];
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) {
continue;
}
// If the bindings are in different forks, abort.
if shadowed.source.map_or(true, |left| {
binding.source.map_or(true, |right| {
branch_detection::different_forks(left, right, &checker.semantic.stmts)
})
}) {
continue;
}
#[allow(deprecated)]
let line = checker.locator.compute_line_index(shadowed.range.start());
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::ImportShadowedByLoopVar {
name: name.to_string(),
line,
},
binding.range,
));
}
}
}
if checker.enabled(Rule::RedefinedWhileUnused) {
for (name, binding_id) in scope.bindings() {
for shadow in checker.semantic.shadowed_bindings(scope_id, binding_id) {
// If the shadowing binding is a loop variable, abort, to avoid overlap
// with F402.
let binding = &checker.semantic.bindings[shadow.binding_id()];
if binding.kind.is_loop_var() {
continue;
}
// If the shadowed binding is used, abort.
let shadowed = &checker.semantic.bindings[shadow.shadowed_id()];
if shadowed.is_used() {
continue;
}
// If the shadowing binding isn't considered a "redefinition" of the
// shadowed binding, abort.
if !binding.redefines(shadowed) {
continue;
}
if shadow.same_scope() {
// If the symbol is a dummy variable, abort, unless the shadowed
// binding is an import.
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) && checker.settings.dummy_variable_rgx.is_match(name)
{
continue;
}
// If this is an overloaded function, abort.
if shadowed.kind.is_function_definition()
&& visibility::is_overload(
cast::decorator_list(
checker.semantic.stmts[shadowed.source.unwrap()],
),
&checker.semantic,
)
{
continue;
}
} else {
// Only enforce cross-scope shadowing for imports.
if !matches!(
shadowed.kind,
BindingKind::Import(..)
| BindingKind::FromImport(..)
| BindingKind::SubmoduleImport(..)
| BindingKind::FutureImport
) {
continue;
}
}
// If the bindings are in different forks, abort.
if shadowed.source.map_or(true, |left| {
binding.source.map_or(true, |right| {
branch_detection::different_forks(left, right, &checker.semantic.stmts)
})
}) {
continue;
}
#[allow(deprecated)]
let line = checker.locator.compute_line_index(shadowed.range.start());
let mut diagnostic = Diagnostic::new(
pyflakes::rules::RedefinedWhileUnused {
name: (*name).to_string(),
line,
},
binding.range,
);
if let Some(range) = binding.parent_range(&checker.semantic) {
diagnostic.set_parent(range.start());
}
diagnostics.push(diagnostic);
}
}
}
if matches!(
scope.kind,
ScopeKind::Function(_) | ScopeKind::AsyncFunction(_) | ScopeKind::Lambda(_)
) {
if checker.enabled(Rule::UnusedVariable) {
pyflakes::rules::unused_variable(checker, scope, &mut diagnostics);
}
if checker.enabled(Rule::UnusedAnnotation) {
pyflakes::rules::unused_annotation(checker, scope, &mut diagnostics);
}
if !checker.is_stub {
if checker.any_enabled(&[
Rule::UnusedClassMethodArgument,
Rule::UnusedFunctionArgument,
Rule::UnusedLambdaArgument,
Rule::UnusedMethodArgument,
Rule::UnusedStaticMethodArgument,
]) {
flake8_unused_arguments::rules::unused_arguments(
checker,
scope,
&mut diagnostics,
);
}
}
}
if matches!(
scope.kind,
ScopeKind::Function(_) | ScopeKind::AsyncFunction(_) | ScopeKind::Module
) {
if enforce_typing_imports {
let runtime_imports: Vec<&Binding> = checker
.semantic
.scopes
.ancestor_ids(scope_id)
.flat_map(|scope_id| runtime_imports[scope_id.as_usize()].iter())
.copied()
.collect();
if checker.enabled(Rule::RuntimeImportInTypeCheckingBlock) {
flake8_type_checking::rules::runtime_import_in_type_checking_block(
checker,
scope,
&mut diagnostics,
);
}
if checker.any_enabled(&[
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
]) {
flake8_type_checking::rules::typing_only_runtime_import(
checker,
scope,
&runtime_imports,
&mut diagnostics,
);
}
}
if checker.enabled(Rule::UnusedImport) {
pyflakes::rules::unused_import(checker, scope, &mut diagnostics);
}
}
}
checker.diagnostics.extend(diagnostics);
}

View File

@@ -1,291 +0,0 @@
use ruff_python_ast::str::raw_contents_range;
use ruff_text_size::TextRange;
use rustpython_parser::ast::Ranged;
use ruff_python_semantic::{BindingKind, ContextualizedDefinition, Export};
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::docstrings::Docstring;
use crate::fs::relativize_path;
use crate::rules::{flake8_annotations, flake8_pyi, pydocstyle};
use crate::{docstrings, warn_user};
/// Run lint rules over all [`Definition`] nodes in the [`SemanticModel`].
///
/// This phase is expected to run after the AST has been traversed in its entirety; as such,
/// it is expected that all [`Definition`] nodes have been visited by the time, and that this
/// method will not recurse into any other nodes.
pub(crate) fn definitions(checker: &mut Checker) {
let enforce_annotations = checker.any_enabled(&[
Rule::AnyType,
Rule::MissingReturnTypeClassMethod,
Rule::MissingReturnTypePrivateFunction,
Rule::MissingReturnTypeSpecialMethod,
Rule::MissingReturnTypeStaticMethod,
Rule::MissingReturnTypeUndocumentedPublicFunction,
Rule::MissingTypeArgs,
Rule::MissingTypeCls,
Rule::MissingTypeFunctionArgument,
Rule::MissingTypeKwargs,
Rule::MissingTypeSelf,
]);
let enforce_stubs = checker.is_stub
&& checker.any_enabled(&[Rule::DocstringInStub, Rule::IterMethodReturnIterable]);
let enforce_docstrings = checker.any_enabled(&[
Rule::BlankLineAfterLastSection,
Rule::BlankLineAfterSummary,
Rule::BlankLineBeforeClass,
Rule::BlankLinesBetweenHeaderAndContent,
Rule::CapitalizeSectionName,
Rule::DashedUnderlineAfterSection,
Rule::DocstringStartsWithThis,
Rule::EmptyDocstring,
Rule::EmptyDocstringSection,
Rule::EndsInPeriod,
Rule::EndsInPunctuation,
Rule::EscapeSequenceInDocstring,
Rule::FirstLineCapitalized,
Rule::FitsOnOneLine,
Rule::IndentWithSpaces,
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
Rule::NewLineAfterLastParagraph,
Rule::NewLineAfterSectionName,
Rule::NoBlankLineAfterFunction,
Rule::NoBlankLineAfterSection,
Rule::NoBlankLineBeforeFunction,
Rule::NoBlankLineBeforeSection,
Rule::NoSignature,
Rule::NonImperativeMood,
Rule::OneBlankLineAfterClass,
Rule::OneBlankLineBeforeClass,
Rule::OverIndentation,
Rule::OverloadWithDocstring,
Rule::SectionNameEndsInColon,
Rule::SectionNotOverIndented,
Rule::SectionUnderlineAfterName,
Rule::SectionUnderlineMatchesSectionLength,
Rule::SectionUnderlineNotOverIndented,
Rule::SurroundingWhitespace,
Rule::TripleSingleQuotes,
Rule::UnderIndentation,
Rule::UndocumentedMagicMethod,
Rule::UndocumentedParam,
Rule::UndocumentedPublicClass,
Rule::UndocumentedPublicFunction,
Rule::UndocumentedPublicInit,
Rule::UndocumentedPublicMethod,
Rule::UndocumentedPublicModule,
Rule::UndocumentedPublicNestedClass,
Rule::UndocumentedPublicPackage,
]);
if !enforce_annotations && !enforce_docstrings && !enforce_stubs {
return;
}
// Compute visibility of all definitions.
let exports: Option<Vec<&str>> = {
checker
.semantic
.global_scope()
.get_all("__all__")
.map(|binding_id| &checker.semantic.bindings[binding_id])
.filter_map(|binding| match &binding.kind {
BindingKind::Export(Export { names }) => Some(names.iter().copied()),
_ => None,
})
.fold(None, |acc, names| {
Some(acc.into_iter().flatten().chain(names).collect())
})
};
let definitions = std::mem::take(&mut checker.semantic.definitions);
let mut overloaded_name: Option<String> = None;
for ContextualizedDefinition {
definition,
visibility,
} in definitions.resolve(exports.as_deref()).iter()
{
let docstring = docstrings::extraction::extract_docstring(definition);
// flake8-annotations
if enforce_annotations {
// TODO(charlie): This should be even stricter, in that an overload
// implementation should come immediately after the overloaded
// interfaces, without any AST nodes in between. Right now, we
// only error when traversing definition boundaries (functions,
// classes, etc.).
if !overloaded_name.map_or(false, |overloaded_name| {
flake8_annotations::helpers::is_overload_impl(
definition,
&overloaded_name,
&checker.semantic,
)
}) {
checker
.diagnostics
.extend(flake8_annotations::rules::definition(
checker,
definition,
*visibility,
));
}
overloaded_name =
flake8_annotations::helpers::overloaded_name(definition, &checker.semantic);
}
// flake8-pyi
if enforce_stubs {
if checker.enabled(Rule::DocstringInStub) {
flake8_pyi::rules::docstring_in_stubs(checker, docstring);
}
if checker.enabled(Rule::IterMethodReturnIterable) {
flake8_pyi::rules::iter_method_return_iterable(checker, definition);
}
}
// pydocstyle
if enforce_docstrings {
if pydocstyle::helpers::should_ignore_definition(
definition,
&checker.settings.pydocstyle.ignore_decorators,
&checker.semantic,
) {
continue;
}
// Extract a `Docstring` from a `Definition`.
let Some(expr) = docstring else {
pydocstyle::rules::not_missing(checker, definition, *visibility);
continue;
};
let contents = checker.locator.slice(expr.range());
let indentation = checker.locator.slice(TextRange::new(
checker.locator.line_start(expr.start()),
expr.start(),
));
if pydocstyle::helpers::should_ignore_docstring(contents) {
#[allow(deprecated)]
let location = checker.locator.compute_source_location(expr.start());
warn_user!(
"Docstring at {}:{}:{} contains implicit string concatenation; ignoring...",
relativize_path(checker.path),
location.row,
location.column
);
continue;
}
// SAFETY: Safe for docstrings that pass `should_ignore_docstring`.
let body_range = raw_contents_range(contents).unwrap();
let docstring = Docstring {
definition,
expr,
contents,
body_range,
indentation,
};
if !pydocstyle::rules::not_empty(checker, &docstring) {
continue;
}
if checker.enabled(Rule::FitsOnOneLine) {
pydocstyle::rules::one_liner(checker, &docstring);
}
if checker.any_enabled(&[
Rule::NoBlankLineAfterFunction,
Rule::NoBlankLineBeforeFunction,
]) {
pydocstyle::rules::blank_before_after_function(checker, &docstring);
}
if checker.any_enabled(&[
Rule::BlankLineBeforeClass,
Rule::OneBlankLineAfterClass,
Rule::OneBlankLineBeforeClass,
]) {
pydocstyle::rules::blank_before_after_class(checker, &docstring);
}
if checker.enabled(Rule::BlankLineAfterSummary) {
pydocstyle::rules::blank_after_summary(checker, &docstring);
}
if checker.any_enabled(&[
Rule::IndentWithSpaces,
Rule::OverIndentation,
Rule::UnderIndentation,
]) {
pydocstyle::rules::indent(checker, &docstring);
}
if checker.enabled(Rule::NewLineAfterLastParagraph) {
pydocstyle::rules::newline_after_last_paragraph(checker, &docstring);
}
if checker.enabled(Rule::SurroundingWhitespace) {
pydocstyle::rules::no_surrounding_whitespace(checker, &docstring);
}
if checker.any_enabled(&[
Rule::MultiLineSummaryFirstLine,
Rule::MultiLineSummarySecondLine,
]) {
pydocstyle::rules::multi_line_summary_start(checker, &docstring);
}
if checker.enabled(Rule::TripleSingleQuotes) {
pydocstyle::rules::triple_quotes(checker, &docstring);
}
if checker.enabled(Rule::EscapeSequenceInDocstring) {
pydocstyle::rules::backslashes(checker, &docstring);
}
if checker.enabled(Rule::EndsInPeriod) {
pydocstyle::rules::ends_with_period(checker, &docstring);
}
if checker.enabled(Rule::NonImperativeMood) {
pydocstyle::rules::non_imperative_mood(
checker,
&docstring,
&checker.settings.pydocstyle.property_decorators,
);
}
if checker.enabled(Rule::NoSignature) {
pydocstyle::rules::no_signature(checker, &docstring);
}
if checker.enabled(Rule::FirstLineCapitalized) {
pydocstyle::rules::capitalized(checker, &docstring);
}
if checker.enabled(Rule::DocstringStartsWithThis) {
pydocstyle::rules::starts_with_this(checker, &docstring);
}
if checker.enabled(Rule::EndsInPunctuation) {
pydocstyle::rules::ends_with_punctuation(checker, &docstring);
}
if checker.enabled(Rule::OverloadWithDocstring) {
pydocstyle::rules::if_needed(checker, &docstring);
}
if checker.any_enabled(&[
Rule::BlankLineAfterLastSection,
Rule::BlankLinesBetweenHeaderAndContent,
Rule::CapitalizeSectionName,
Rule::DashedUnderlineAfterSection,
Rule::EmptyDocstringSection,
Rule::MultiLineSummaryFirstLine,
Rule::NewLineAfterSectionName,
Rule::NoBlankLineAfterSection,
Rule::NoBlankLineBeforeSection,
Rule::SectionNameEndsInColon,
Rule::SectionNotOverIndented,
Rule::SectionUnderlineAfterName,
Rule::SectionUnderlineMatchesSectionLength,
Rule::SectionUnderlineNotOverIndented,
Rule::UndocumentedParam,
]) {
pydocstyle::rules::sections(
checker,
&docstring,
checker.settings.pydocstyle.convention.as_ref(),
);
}
}
}
}

View File

@@ -1,88 +0,0 @@
use rustpython_parser::ast::{self, ExceptHandler, Ranged};
use crate::checkers::ast::Checker;
use crate::registry::Rule;
use crate::rules::{
flake8_bandit, flake8_blind_except, flake8_bugbear, flake8_builtins, pycodestyle, pylint,
tryceratops,
};
/// Run lint rules over an [`ExceptHandler`] syntax node.
pub(crate) fn except_handler(except_handler: &ExceptHandler, checker: &mut Checker) {
match except_handler {
ExceptHandler::ExceptHandler(ast::ExceptHandlerExceptHandler {
type_,
name,
body,
range: _,
}) => {
if checker.enabled(Rule::BareExcept) {
if let Some(diagnostic) = pycodestyle::rules::bare_except(
type_.as_deref(),
body,
except_handler,
checker.locator,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::RaiseWithoutFromInsideExcept) {
flake8_bugbear::rules::raise_without_from_inside_except(
checker,
name.as_deref(),
body,
);
}
if checker.enabled(Rule::BlindExcept) {
flake8_blind_except::rules::blind_except(
checker,
type_.as_deref(),
name.as_deref(),
body,
);
}
if checker.enabled(Rule::TryExceptPass) {
flake8_bandit::rules::try_except_pass(
checker,
except_handler,
type_.as_deref(),
body,
checker.settings.flake8_bandit.check_typed_exception,
);
}
if checker.enabled(Rule::TryExceptContinue) {
flake8_bandit::rules::try_except_continue(
checker,
except_handler,
type_.as_deref(),
body,
checker.settings.flake8_bandit.check_typed_exception,
);
}
if checker.enabled(Rule::ExceptWithEmptyTuple) {
flake8_bugbear::rules::except_with_empty_tuple(checker, except_handler);
}
if checker.enabled(Rule::ExceptWithNonExceptionClasses) {
flake8_bugbear::rules::except_with_non_exception_classes(checker, except_handler);
}
if checker.enabled(Rule::ReraiseNoCause) {
tryceratops::rules::reraise_no_cause(checker, body);
}
if checker.enabled(Rule::BinaryOpException) {
pylint::rules::binary_op_exception(checker, except_handler);
}
if let Some(name) = name {
if checker.enabled(Rule::AmbiguousVariableName) {
if let Some(diagnostic) =
pycodestyle::rules::ambiguous_variable_name(name.as_str(), name.range())
{
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::BuiltinVariableShadowing) {
flake8_builtins::rules::builtin_variable_shadowing(checker, name, name.range());
}
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,27 +0,0 @@
pub(super) use argument::argument;
pub(super) use arguments::arguments;
pub(super) use bindings::bindings;
pub(super) use comprehension::comprehension;
pub(super) use deferred_for_loops::deferred_for_loops;
pub(super) use deferred_scopes::deferred_scopes;
pub(super) use definitions::definitions;
pub(super) use except_handler::except_handler;
pub(super) use expression::expression;
pub(super) use module::module;
pub(super) use statement::statement;
pub(super) use suite::suite;
pub(super) use unresolved_references::unresolved_references;
mod argument;
mod arguments;
mod bindings;
mod comprehension;
mod deferred_for_loops;
mod deferred_scopes;
mod definitions;
mod except_handler;
mod expression;
mod module;
mod statement;
mod suite;
mod unresolved_references;

View File

@@ -1,12 +0,0 @@
use rustpython_parser::ast::Suite;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::flake8_bugbear;
/// Run lint rules over a module.
pub(crate) fn module(suite: &Suite, checker: &mut Checker) {
if checker.enabled(Rule::FStringDocstring) {
flake8_bugbear::rules::f_string_docstring(checker, suite);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +0,0 @@
use rustpython_parser::ast::Stmt;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::flake8_pie;
/// Run lint rules over a suite of [`Stmt`] syntax nodes.
pub(crate) fn suite(suite: &[Stmt], checker: &mut Checker) {
if checker.enabled(Rule::UnnecessaryPass) {
flake8_pie::rules::no_unnecessary_pass(checker, suite);
}
}

View File

@@ -1,47 +0,0 @@
use ruff_diagnostics::Diagnostic;
use ruff_python_semantic::Exceptions;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::pyflakes;
/// Run lint rules over all [`UnresolvedReference`] entities in the [`SemanticModel`].
pub(crate) fn unresolved_references(checker: &mut Checker) {
if !checker.any_enabled(&[Rule::UndefinedLocalWithImportStarUsage, Rule::UndefinedName]) {
return;
}
for reference in checker.semantic.unresolved_references() {
if reference.is_wildcard_import() {
if checker.enabled(Rule::UndefinedLocalWithImportStarUsage) {
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedLocalWithImportStarUsage {
name: reference.name(checker.locator).to_string(),
},
reference.range(),
));
}
} else {
if checker.enabled(Rule::UndefinedName) {
// Avoid flagging if `NameError` is handled.
if reference.exceptions().contains(Exceptions::NAME_ERROR) {
continue;
}
// Allow __path__.
if checker.path.ends_with("__init__.py") {
if reference.name(checker.locator) == "__path__" {
continue;
}
}
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedName {
name: reference.name(checker.locator).to_string(),
},
reference.range(),
));
}
}
}
}

View File

@@ -14,4 +14,5 @@ pub(crate) struct Deferred<'a> {
pub(crate) functions: Vec<Snapshot>,
pub(crate) lambdas: Vec<(&'a Expr, Snapshot)>,
pub(crate) for_loops: Vec<Snapshot>,
pub(crate) assignments: Vec<Snapshot>,
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,5 @@
//! `NoQA` enforcement and validation.
use std::path::Path;
use itertools::Itertools;
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::ast::Ranged;
@@ -18,7 +16,6 @@ use crate::settings::Settings;
pub(crate) fn check_noqa(
diagnostics: &mut Vec<Diagnostic>,
path: &Path,
locator: &Locator,
comment_ranges: &[TextRange],
noqa_line_for: &NoqaMapping,
@@ -26,10 +23,10 @@ pub(crate) fn check_noqa(
settings: &Settings,
) -> Vec<usize> {
// Identify any codes that are globally exempted (within the current file).
let exemption = FileExemption::try_extract(locator.contents(), comment_ranges, path, locator);
let exemption = FileExemption::try_extract(locator.contents(), comment_ranges, locator);
// Extract all `noqa` directives.
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, path, locator);
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, locator);
// Indices of diagnostics that were ignored by a `noqa` directive.
let mut ignored_diagnostics = vec![];

View File

@@ -1,21 +1,29 @@
//! Lint rules based on checking physical lines.
use std::path::Path;
use ruff_text_size::TextSize;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use ruff_python_trivia::UniversalNewlines;
use ruff_python_whitespace::UniversalNewlines;
use crate::comments::shebang::ShebangDirective;
use crate::registry::Rule;
use crate::rules::flake8_copyright::rules::missing_copyright_notice;
use crate::rules::flake8_executable::rules::{
shebang_missing, shebang_newline, shebang_not_executable, shebang_python, shebang_whitespace,
};
use crate::rules::pycodestyle::rules::{
doc_line_too_long, line_too_long, mixed_spaces_and_tabs, no_newline_at_end_of_file,
tab_indentation, trailing_whitespace,
};
use crate::rules::pygrep_hooks::rules::{blanket_noqa, blanket_type_ignore};
use crate::rules::pylint;
use crate::rules::pyupgrade::rules::unnecessary_coding_comment;
use crate::settings::Settings;
pub(crate) fn check_physical_lines(
path: &Path,
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
@@ -23,7 +31,15 @@ pub(crate) fn check_physical_lines(
settings: &Settings,
) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = vec![];
let mut has_any_shebang = false;
let enforce_blanket_noqa = settings.rules.enabled(Rule::BlanketNOQA);
let enforce_shebang_not_executable = settings.rules.enabled(Rule::ShebangNotExecutable);
let enforce_shebang_missing = settings.rules.enabled(Rule::ShebangMissingExecutableFile);
let enforce_shebang_whitespace = settings.rules.enabled(Rule::ShebangLeadingWhitespace);
let enforce_shebang_newline = settings.rules.enabled(Rule::ShebangNotFirstLine);
let enforce_shebang_python = settings.rules.enabled(Rule::ShebangMissingPython);
let enforce_blanket_type_ignore = settings.rules.enabled(Rule::BlanketTypeIgnore);
let enforce_doc_line_too_long = settings.rules.enabled(Rule::DocLineTooLong);
let enforce_line_too_long = settings.rules.enabled(Rule::LineTooLong);
let enforce_no_newline_at_end_of_file = settings.rules.enabled(Rule::MissingNewlineAtEndOfFile);
@@ -37,6 +53,7 @@ pub(crate) fn check_physical_lines(
let enforce_copyright_notice = settings.rules.enabled(Rule::MissingCopyrightNotice);
let fix_unnecessary_coding_comment = settings.rules.should_fix(Rule::UTF8EncodingDeclaration);
let fix_shebang_whitespace = settings.rules.should_fix(Rule::ShebangLeadingWhitespace);
let mut commented_lines_iter = indexer.comment_ranges().iter().peekable();
let mut doc_lines_iter = doc_lines.iter().peekable();
@@ -55,6 +72,51 @@ pub(crate) fn check_physical_lines(
}
}
}
if enforce_blanket_type_ignore {
blanket_type_ignore(&mut diagnostics, &line);
}
if enforce_blanket_noqa {
blanket_noqa(&mut diagnostics, &line);
}
if enforce_shebang_missing
|| enforce_shebang_not_executable
|| enforce_shebang_whitespace
|| enforce_shebang_newline
|| enforce_shebang_python
{
if let Some(shebang) = ShebangDirective::try_extract(&line) {
has_any_shebang = true;
if enforce_shebang_not_executable {
if let Some(diagnostic) =
shebang_not_executable(path, line.range(), &shebang)
{
diagnostics.push(diagnostic);
}
}
if enforce_shebang_whitespace {
if let Some(diagnostic) =
shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
{
diagnostics.push(diagnostic);
}
}
if enforce_shebang_newline {
if let Some(diagnostic) =
shebang_newline(line.range(), &shebang, index == 0)
{
diagnostics.push(diagnostic);
}
}
if enforce_shebang_python {
if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
diagnostics.push(diagnostic);
}
}
}
}
}
while doc_lines_iter
@@ -107,6 +169,12 @@ pub(crate) fn check_physical_lines(
}
}
if enforce_shebang_missing && !has_any_shebang {
if let Some(diagnostic) = shebang_missing(path) {
diagnostics.push(diagnostic);
}
}
if enforce_copyright_notice {
if let Some(diagnostic) = missing_copyright_notice(locator, settings) {
diagnostics.push(diagnostic);
@@ -118,6 +186,8 @@ pub(crate) fn check_physical_lines(
#[cfg(test)]
mod tests {
use std::path::Path;
use rustpython_parser::lexer::lex;
use rustpython_parser::Mode;
@@ -139,6 +209,7 @@ mod tests {
let check_with_max_line_length = |line_length: LineLength| {
check_physical_lines(
Path::new("foo.py"),
&locator,
&stylist,
&indexer,

View File

@@ -1,7 +1,5 @@
//! Lint rules based on token traversal.
use std::path::Path;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
@@ -13,37 +11,83 @@ use crate::lex::docstring_detection::StateMachine;
use crate::registry::{AsRule, Rule};
use crate::rules::ruff::rules::Context;
use crate::rules::{
eradicate, flake8_commas, flake8_executable, flake8_fixme, flake8_implicit_str_concat,
flake8_pyi, flake8_quotes, flake8_todos, pycodestyle, pygrep_hooks, pylint, pyupgrade, ruff,
eradicate, flake8_commas, flake8_fixme, flake8_implicit_str_concat, flake8_pyi, flake8_quotes,
flake8_todos, pycodestyle, pylint, pyupgrade, ruff,
};
use crate::settings::Settings;
pub(crate) fn check_tokens(
tokens: &[LexResult],
path: &Path,
locator: &Locator,
indexer: &Indexer,
tokens: &[LexResult],
settings: &Settings,
is_stub: bool,
) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = vec![];
if settings.rules.enabled(Rule::BlanketNOQA) {
pygrep_hooks::rules::blanket_noqa(&mut diagnostics, indexer, locator);
}
if settings.rules.enabled(Rule::BlanketTypeIgnore) {
pygrep_hooks::rules::blanket_type_ignore(&mut diagnostics, indexer, locator);
}
if settings.rules.any_enabled(&[
let enforce_ambiguous_unicode_character = settings.rules.any_enabled(&[
Rule::AmbiguousUnicodeCharacterString,
Rule::AmbiguousUnicodeCharacterDocstring,
Rule::AmbiguousUnicodeCharacterComment,
]) {
]);
let enforce_invalid_string_character = settings.rules.any_enabled(&[
Rule::InvalidCharacterBackspace,
Rule::InvalidCharacterSub,
Rule::InvalidCharacterEsc,
Rule::InvalidCharacterNul,
Rule::InvalidCharacterZeroWidthSpace,
]);
let enforce_quotes = settings.rules.any_enabled(&[
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
]);
let enforce_commented_out_code = settings.rules.enabled(Rule::CommentedOutCode);
let enforce_compound_statements = settings.rules.any_enabled(&[
Rule::MultipleStatementsOnOneLineColon,
Rule::MultipleStatementsOnOneLineSemicolon,
Rule::UselessSemicolon,
]);
let enforce_invalid_escape_sequence = settings.rules.enabled(Rule::InvalidEscapeSequence);
let enforce_implicit_string_concatenation = settings.rules.any_enabled(&[
Rule::SingleLineImplicitStringConcatenation,
Rule::MultiLineImplicitStringConcatenation,
]);
let enforce_trailing_comma = settings.rules.any_enabled(&[
Rule::MissingTrailingComma,
Rule::TrailingCommaOnBareTuple,
Rule::ProhibitedTrailingComma,
]);
let enforce_extraneous_parenthesis = settings.rules.enabled(Rule::ExtraneousParentheses);
let enforce_type_comment_in_stub = settings.rules.enabled(Rule::TypeCommentInStub);
// Combine flake8_todos and flake8_fixme so that we can reuse detected [`TodoDirective`]s.
let enforce_todos = settings.rules.any_enabled(&[
Rule::InvalidTodoTag,
Rule::MissingTodoAuthor,
Rule::MissingTodoLink,
Rule::MissingTodoColon,
Rule::MissingTodoDescription,
Rule::InvalidTodoCapitalization,
Rule::MissingSpaceAfterTodoColon,
Rule::LineContainsFixme,
Rule::LineContainsXxx,
Rule::LineContainsTodo,
Rule::LineContainsHack,
]);
// RUF001, RUF002, RUF003
if enforce_ambiguous_unicode_character {
let mut state_machine = StateMachine::default();
for &(ref tok, range) in tokens.iter().flatten() {
let is_docstring = state_machine.consume(tok);
let is_docstring = if enforce_ambiguous_unicode_character {
state_machine.consume(tok)
} else {
false
};
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
ruff::rules::ambiguous_unicode_character(
&mut diagnostics,
@@ -64,11 +108,13 @@ pub(crate) fn check_tokens(
}
}
if settings.rules.enabled(Rule::CommentedOutCode) {
// ERA001
if enforce_commented_out_code {
eradicate::rules::commented_out_code(&mut diagnostics, locator, indexer, settings);
}
if settings.rules.enabled(Rule::InvalidEscapeSequence) {
// W605
if enforce_invalid_escape_sequence {
for (tok, range) in tokens.iter().flatten() {
if tok.is_string() {
pycodestyle::rules::invalid_escape_sequence(
@@ -80,14 +126,8 @@ pub(crate) fn check_tokens(
}
}
}
if settings.rules.any_enabled(&[
Rule::InvalidCharacterBackspace,
Rule::InvalidCharacterSub,
Rule::InvalidCharacterEsc,
Rule::InvalidCharacterNul,
Rule::InvalidCharacterZeroWidthSpace,
]) {
// PLE2510, PLE2512, PLE2513
if enforce_invalid_string_character {
for (tok, range) in tokens.iter().flatten() {
if tok.is_string() {
pylint::rules::invalid_string_characters(&mut diagnostics, *range, locator);
@@ -95,11 +135,8 @@ pub(crate) fn check_tokens(
}
}
if settings.rules.any_enabled(&[
Rule::MultipleStatementsOnOneLineColon,
Rule::MultipleStatementsOnOneLineSemicolon,
Rule::UselessSemicolon,
]) {
// E701, E702, E703
if enforce_compound_statements {
pycodestyle::rules::compound_statements(
&mut diagnostics,
tokens,
@@ -109,19 +146,13 @@ pub(crate) fn check_tokens(
);
}
if settings.rules.any_enabled(&[
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
]) {
// Q001, Q002, Q003
if enforce_quotes {
flake8_quotes::rules::from_tokens(&mut diagnostics, tokens, locator, settings);
}
if settings.rules.any_enabled(&[
Rule::MultiLineImplicitStringConcatenation,
Rule::SingleLineImplicitStringConcatenation,
]) {
// ISC001, ISC002
if enforce_implicit_string_concatenation {
flake8_implicit_str_concat::rules::implicit(
&mut diagnostics,
tokens,
@@ -130,49 +161,24 @@ pub(crate) fn check_tokens(
);
}
if settings.rules.enabled(Rule::ExplicitStringConcatenation) {
flake8_implicit_str_concat::rules::explicit(&mut diagnostics, tokens);
}
if settings.rules.any_enabled(&[
Rule::MissingTrailingComma,
Rule::TrailingCommaOnBareTuple,
Rule::ProhibitedTrailingComma,
]) {
// COM812, COM818, COM819
if enforce_trailing_comma {
flake8_commas::rules::trailing_commas(&mut diagnostics, tokens, locator, settings);
}
if settings.rules.enabled(Rule::ExtraneousParentheses) {
// UP034
if enforce_extraneous_parenthesis {
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens, locator, settings);
}
if is_stub && settings.rules.enabled(Rule::TypeCommentInStub) {
// PYI033
if enforce_type_comment_in_stub && is_stub {
flake8_pyi::rules::type_comment_in_stub(&mut diagnostics, locator, indexer);
}
if settings.rules.any_enabled(&[
Rule::ShebangNotExecutable,
Rule::ShebangMissingExecutableFile,
Rule::ShebangLeadingWhitespace,
Rule::ShebangNotFirstLine,
Rule::ShebangMissingPython,
]) {
flake8_executable::rules::from_tokens(tokens, path, locator, settings, &mut diagnostics);
}
if settings.rules.any_enabled(&[
Rule::InvalidTodoTag,
Rule::MissingTodoAuthor,
Rule::MissingTodoLink,
Rule::MissingTodoColon,
Rule::MissingTodoDescription,
Rule::InvalidTodoCapitalization,
Rule::MissingSpaceAfterTodoColon,
Rule::LineContainsFixme,
Rule::LineContainsXxx,
Rule::LineContainsTodo,
Rule::LineContainsHack,
]) {
// TD001, TD002, TD003, TD004, TD005, TD006, TD007
// T001, T002, T003, T004
if enforce_todos {
let todo_comments: Vec<TodoComment> = indexer
.comment_ranges()
.iter()
@@ -182,7 +188,9 @@ pub(crate) fn check_tokens(
TodoComment::from_comment(comment, *comment_range, i)
})
.collect();
flake8_todos::rules::todos(&mut diagnostics, &todo_comments, locator, indexer, settings);
flake8_fixme::rules::todos(&mut diagnostics, &todo_comments);
}

View File

@@ -172,10 +172,10 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "C0131") => (RuleGroup::Unspecified, rules::pylint::rules::TypeBivariance),
(Pylint, "C0132") => (RuleGroup::Unspecified, rules::pylint::rules::TypeParamNameMismatch),
(Pylint, "C0205") => (RuleGroup::Unspecified, rules::pylint::rules::SingleStringSlots),
(Pylint, "C0208") => (RuleGroup::Unspecified, rules::pylint::rules::IterationOverSet),
(Pylint, "C0414") => (RuleGroup::Unspecified, rules::pylint::rules::UselessImportAlias),
(Pylint, "C1901") => (RuleGroup::Nursery, rules::pylint::rules::CompareToEmptyString),
(Pylint, "C3002") => (RuleGroup::Unspecified, rules::pylint::rules::UnnecessaryDirectLambdaCall),
(Pylint, "C0208") => (RuleGroup::Unspecified, rules::pylint::rules::IterationOverSet),
(Pylint, "E0100") => (RuleGroup::Unspecified, rules::pylint::rules::YieldInInit),
(Pylint, "E0101") => (RuleGroup::Unspecified, rules::pylint::rules::ReturnInInit),
(Pylint, "E0116") => (RuleGroup::Unspecified, rules::pylint::rules::ContinueInFinally),
@@ -214,7 +214,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "R2004") => (RuleGroup::Unspecified, rules::pylint::rules::MagicValueComparison),
(Pylint, "R5501") => (RuleGroup::Unspecified, rules::pylint::rules::CollapsibleElseIf),
(Pylint, "W0120") => (RuleGroup::Unspecified, rules::pylint::rules::UselessElseOnLoop),
(Pylint, "W0127") => (RuleGroup::Unspecified, rules::pylint::rules::SelfAssigningVariable),
(Pylint, "W0129") => (RuleGroup::Unspecified, rules::pylint::rules::AssertOnStringLiteral),
(Pylint, "W0131") => (RuleGroup::Unspecified, rules::pylint::rules::NamedExprWithoutContext),
(Pylint, "W0406") => (RuleGroup::Unspecified, rules::pylint::rules::ImportSelf),
@@ -222,7 +221,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "W0603") => (RuleGroup::Unspecified, rules::pylint::rules::GlobalStatement),
(Pylint, "W0711") => (RuleGroup::Unspecified, rules::pylint::rules::BinaryOpException),
(Pylint, "W1508") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidEnvvarDefault),
(Pylint, "W1509") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessPopenPreexecFn),
(Pylint, "W2901") => (RuleGroup::Unspecified, rules::pylint::rules::RedefinedLoopName),
(Pylint, "W3301") => (RuleGroup::Unspecified, rules::pylint::rules::NestedMinMax),
@@ -237,7 +235,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8Builtins, "003") => (RuleGroup::Unspecified, rules::flake8_builtins::rules::BuiltinAttributeShadowing),
// flake8-bugbear
(Flake8Bugbear, "002") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnaryPrefixIncrementDecrement),
(Flake8Bugbear, "002") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnaryPrefixIncrement),
(Flake8Bugbear, "003") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::AssignmentToOsEnviron),
(Flake8Bugbear, "004") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnreliableCallableCheck),
(Flake8Bugbear, "005") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::StripWithMultiCharacters),
@@ -631,12 +629,10 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8Pyi, "014") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::ArgumentDefaultInStub),
(Flake8Pyi, "015") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::AssignmentDefaultInStub),
(Flake8Pyi, "016") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::DuplicateUnionMember),
(Flake8Pyi, "017") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::ComplexAssignmentInStub),
(Flake8Pyi, "020") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::QuotedAnnotationInStub),
(Flake8Pyi, "021") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::DocstringInStub),
(Flake8Pyi, "024") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::CollectionsNamedTuple),
(Flake8Pyi, "025") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnaliasedCollectionsAbcSetImport),
(Flake8Pyi, "026") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TypeAliasWithoutAnnotation),
(Flake8Pyi, "029") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::StrOrReprDefinedInStub),
(Flake8Pyi, "030") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnnecessaryLiteralUnion),
(Flake8Pyi, "032") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::AnyEqNeAnnotation),
@@ -654,7 +650,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8Pyi, "052") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnannotatedAssignmentInStub),
(Flake8Pyi, "054") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::NumericLiteralTooLong),
(Flake8Pyi, "053") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::StringOrBytesTooLong),
(Flake8Pyi, "056") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnsupportedMethodCallOnAll),
// flake8-pytest-style
(Flake8PytestStyle, "001") => (RuleGroup::Unspecified, rules::flake8_pytest_style::rules::PytestFixtureIncorrectParenthesesStyle),
@@ -752,13 +747,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8UsePathlib, "122") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::OsPathSplitext),
(Flake8UsePathlib, "123") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::BuiltinOpen),
(Flake8UsePathlib, "124") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::violations::PyPath),
(Flake8UsePathlib, "201") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::PathConstructorCurrentDirectory),
(Flake8UsePathlib, "202") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetsize),
(Flake8UsePathlib, "202") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetsize),
(Flake8UsePathlib, "203") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetatime),
(Flake8UsePathlib, "204") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetmtime),
(Flake8UsePathlib, "205") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsPathGetctime),
(Flake8UsePathlib, "206") => (RuleGroup::Unspecified, rules::flake8_use_pathlib::rules::OsSepSplit),
// flake8-logging-format
(Flake8LoggingFormat, "001") => (RuleGroup::Unspecified, rules::flake8_logging_format::violations::LoggingStringFormat),
@@ -794,7 +782,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Ruff, "011") => (RuleGroup::Unspecified, rules::ruff::rules::StaticKeyDictComprehension),
(Ruff, "012") => (RuleGroup::Unspecified, rules::ruff::rules::MutableClassDefault),
(Ruff, "013") => (RuleGroup::Unspecified, rules::ruff::rules::ImplicitOptional),
#[cfg(feature = "unreachable-code")] // When removing this feature gate, also update rules_selector.rs
#[cfg(feature = "unreachable-code")]
(Ruff, "014") => (RuleGroup::Nursery, rules::ruff::rules::UnreachableCode),
(Ruff, "015") => (RuleGroup::Unspecified, rules::ruff::rules::UnnecessaryIterableAllocationForFirstElement),
(Ruff, "016") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidIndexType),

View File

@@ -1,10 +1,15 @@
use std::ops::Deref;
use ruff_python_trivia::Cursor;
use ruff_python_whitespace::{is_python_whitespace, Cursor};
use ruff_text_size::{TextLen, TextSize};
/// A shebang directive (e.g., `#!/usr/bin/env python3`).
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct ShebangDirective<'a>(&'a str);
pub(crate) struct ShebangDirective<'a> {
/// The offset of the directive contents (e.g., `/usr/bin/env python3`) from the start of the
/// line.
pub(crate) offset: TextSize,
/// The contents of the directive (e.g., `"/usr/bin/env python3"`).
pub(crate) contents: &'a str,
}
impl<'a> ShebangDirective<'a> {
/// Parse a shebang directive from a line, or return `None` if the line does not contain a
@@ -12,6 +17,9 @@ impl<'a> ShebangDirective<'a> {
pub(crate) fn try_extract(line: &'a str) -> Option<Self> {
let mut cursor = Cursor::new(line);
// Trim whitespace.
cursor.eat_while(is_python_whitespace);
// Trim the `#!` prefix.
if !cursor.eat_char('#') {
return None;
@@ -20,15 +28,10 @@ impl<'a> ShebangDirective<'a> {
return None;
}
Some(Self(cursor.chars().as_str()))
}
}
impl Deref for ShebangDirective<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.0
Some(Self {
offset: line.text_len() - cursor.text_len(),
contents: cursor.chars().as_str(),
})
}
}
@@ -56,12 +59,6 @@ mod tests {
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_match_trailing_comment() {
let source = "#!/usr/bin/env python # trailing comment";
assert_debug_snapshot!(ShebangDirective::try_extract(source));
}
#[test]
fn shebang_leading_space() {
let source = " #!/usr/bin/env python";

View File

@@ -2,4 +2,9 @@
source: crates/ruff/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
None
Some(
ShebangDirective {
offset: 4,
contents: "/usr/bin/env python",
},
)

View File

@@ -3,7 +3,8 @@ source: crates/ruff/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
Some(
ShebangDirective(
"/usr/bin/env python",
),
ShebangDirective {
offset: 2,
contents: "/usr/bin/env python",
},
)

View File

@@ -1,9 +0,0 @@
---
source: crates/ruff/src/comments/shebang.rs
expression: "ShebangDirective::try_extract(source)"
---
Some(
ShebangDirective(
"/usr/bin/env python # trailing comment",
),
)

View File

@@ -71,12 +71,12 @@ pub fn extract_directives(
indexer: &Indexer,
) -> Directives {
Directives {
noqa_line_for: if flags.intersects(Flags::NOQA) {
noqa_line_for: if flags.contains(Flags::NOQA) {
extract_noqa_line_for(lxr, locator, indexer)
} else {
NoqaMapping::default()
},
isort: if flags.intersects(Flags::ISORT) {
isort: if flags.contains(Flags::ISORT) {
extract_isort_directives(lxr, locator)
} else {
IsortDirectives::default()

View File

@@ -10,7 +10,7 @@ use rustpython_parser::Tok;
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor};
use ruff_python_trivia::UniversalNewlineIterator;
use ruff_python_whitespace::UniversalNewlineIterator;
/// Extract doc lines (standalone comments) from a token sequence.
pub(crate) fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {

View File

@@ -5,7 +5,7 @@ use ruff_python_ast::docstrings::{leading_space, leading_words};
use ruff_text_size::{TextLen, TextRange, TextSize};
use strum_macros::EnumIter;
use ruff_python_trivia::{Line, UniversalNewlineIterator, UniversalNewlines};
use ruff_python_whitespace::{Line, UniversalNewlineIterator, UniversalNewlines};
use crate::docstrings::styles::SectionStyle;
use crate::docstrings::{Docstring, DocstringBody};

View File

@@ -8,7 +8,7 @@ use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers::is_docstring_stmt;
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_trivia::{PythonWhitespace, UniversalNewlineIterator};
use ruff_python_whitespace::{PythonWhitespace, UniversalNewlineIterator};
use ruff_textwrap::indent;
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -305,7 +305,7 @@ mod tests {
use rustpython_parser::Parse;
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_trivia::LineEnding;
use ruff_python_whitespace::LineEnding;
use super::Insertion;

View File

@@ -10,7 +10,7 @@ use serde::Serialize;
use serde_json::error::Category;
use ruff_diagnostics::Diagnostic;
use ruff_python_trivia::{NewlineWithTrailingNewline, UniversalNewlineIterator};
use ruff_python_whitespace::{NewlineWithTrailingNewline, UniversalNewlineIterator};
use ruff_text_size::{TextRange, TextSize};
use crate::autofix::source_map::{SourceMap, SourceMarker};

View File

@@ -100,9 +100,7 @@ pub fn check_path(
.any(|rule_code| rule_code.lint_source().is_tokens())
{
let is_stub = is_python_stub_file(path);
diagnostics.extend(check_tokens(
&tokens, path, locator, indexer, settings, is_stub,
));
diagnostics.extend(check_tokens(locator, indexer, &tokens, settings, is_stub));
}
// Run the filesystem-based rules.
@@ -195,7 +193,7 @@ pub fn check_path(
.any(|rule_code| rule_code.lint_source().is_physical_lines())
{
diagnostics.extend(check_physical_lines(
locator, stylist, indexer, &doc_lines, settings,
path, locator, stylist, indexer, &doc_lines, settings,
));
}
@@ -216,7 +214,6 @@ pub fn check_path(
{
let ignored = check_noqa(
&mut diagnostics,
path,
locator,
indexer.comment_ranges(),
&directives.noqa_line_for,
@@ -323,7 +320,6 @@ pub fn lint_only(
package: Option<&Path>,
settings: &Settings,
noqa: flags::Noqa,
source_kind: Option<&SourceKind>,
) -> LinterResult<(Vec<Message>, Option<ImportMap>)> {
// Tokenize once.
let tokens: Vec<LexResult> = ruff_rustpython::tokenize(contents);
@@ -356,7 +352,7 @@ pub fn lint_only(
&directives,
settings,
noqa,
source_kind,
None,
);
result.map(|(diagnostics, imports)| {

View File

@@ -109,11 +109,11 @@ impl Emitter for TextEmitter {
sep = ":".cyan(),
code_and_body = RuleCodeAndBody {
message,
show_fix_status: self.flags.intersects(EmitterFlags::SHOW_FIX_STATUS)
show_fix_status: self.flags.contains(EmitterFlags::SHOW_FIX_STATUS)
}
)?;
if self.flags.intersects(EmitterFlags::SHOW_SOURCE) {
if self.flags.contains(EmitterFlags::SHOW_SOURCE) {
writeln!(
writer,
"{}",
@@ -124,7 +124,7 @@ impl Emitter for TextEmitter {
)?;
}
if self.flags.intersects(EmitterFlags::SHOW_FIX_DIFF) {
if self.flags.contains(EmitterFlags::SHOW_FIX_DIFF) {
if let Some(diff) = Diff::from_message(message) {
writeln!(writer, "{diff}")?;
}

View File

@@ -13,10 +13,9 @@ use rustpython_parser::ast::Ranged;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::Locator;
use ruff_python_trivia::LineEnding;
use ruff_python_whitespace::LineEnding;
use crate::codes::NoqaCode;
use crate::fs::relativize_path;
use crate::registry::{AsRule, Rule, RuleSet};
use crate::rule_redirects::get_redirect_target;
@@ -68,8 +67,12 @@ impl<'a> Directive<'a> {
// If the next character is `:`, then it's a list of codes. Otherwise, it's a directive
// to ignore all rules.
let directive = match text[noqa_literal_end..].chars().next() {
Some(':') => {
return Ok(Some(
if text[noqa_literal_end..]
.chars()
.next()
.map_or(false, |c| c == ':')
{
// E.g., `# noqa: F401, F841`.
let mut codes_start = noqa_literal_end;
@@ -116,9 +119,8 @@ impl<'a> Directive<'a> {
range: range.add(offset),
codes,
})
}
None | Some('#') => {
// E.g., `# noqa` or `# noqa# ignore`.
} else {
// E.g., `# noqa`.
let range = TextRange::new(
TextSize::try_from(comment_start).unwrap(),
TextSize::try_from(noqa_literal_end).unwrap(),
@@ -126,21 +128,8 @@ impl<'a> Directive<'a> {
Self::All(All {
range: range.add(offset),
})
}
Some(c) if c.is_whitespace() => {
// E.g., `# noqa # ignore`.
let range = TextRange::new(
TextSize::try_from(comment_start).unwrap(),
TextSize::try_from(noqa_literal_end).unwrap(),
);
Self::All(All {
range: range.add(offset),
})
}
_ => return Err(ParseError::InvalidSuffix),
};
return Ok(Some(directive));
},
));
}
Ok(None)
@@ -236,7 +225,6 @@ impl FileExemption {
pub(crate) fn try_extract(
contents: &str,
comment_ranges: &[TextRange],
path: &Path,
locator: &Locator,
) -> Option<Self> {
let mut exempt_codes: Vec<NoqaCode> = vec![];
@@ -246,8 +234,7 @@ impl FileExemption {
Err(err) => {
#[allow(deprecated)]
let line = locator.compute_line_index(range.start());
let path_display = relativize_path(path);
warn!("Invalid `# ruff: noqa` directive at {path_display}:{line}: {err}");
warn!("Invalid `# noqa` directive on line {line}: {err}");
}
Ok(Some(ParsedFileExemption::All)) => {
return Some(Self::All);
@@ -260,8 +247,7 @@ impl FileExemption {
} else {
#[allow(deprecated)]
let line = locator.compute_line_index(range.start());
let path_display = relativize_path(path);
warn!("Invalid code provided to `# ruff: noqa` at {path_display}:{line}: {code}");
warn!("Invalid code provided to `# ruff: noqa` on line {line}: {code}");
None
}
}));
@@ -451,7 +437,6 @@ pub(crate) fn add_noqa(
line_ending: LineEnding,
) -> Result<usize> {
let (count, output) = add_noqa_inner(
path,
diagnostics,
locator,
commented_lines,
@@ -463,7 +448,6 @@ pub(crate) fn add_noqa(
}
fn add_noqa_inner(
path: &Path,
diagnostics: &[Diagnostic],
locator: &Locator,
commented_ranges: &[TextRange],
@@ -476,8 +460,8 @@ fn add_noqa_inner(
// Whether the file is exempted from all checks.
// Codes that are globally exempted (within the current file).
let exemption = FileExemption::try_extract(locator.contents(), commented_ranges, path, locator);
let directives = NoqaDirectives::from_commented_ranges(commented_ranges, path, locator);
let exemption = FileExemption::try_extract(locator.contents(), commented_ranges, locator);
let directives = NoqaDirectives::from_commented_ranges(commented_ranges, locator);
// Mark any non-ignored diagnostics.
for diagnostic in diagnostics {
@@ -641,7 +625,6 @@ pub(crate) struct NoqaDirectives<'a> {
impl<'a> NoqaDirectives<'a> {
pub(crate) fn from_commented_ranges(
comment_ranges: &[TextRange],
path: &Path,
locator: &'a Locator<'a>,
) -> Self {
let mut directives = Vec::new();
@@ -651,8 +634,7 @@ impl<'a> NoqaDirectives<'a> {
Err(err) => {
#[allow(deprecated)]
let line = locator.compute_line_index(range.start());
let path_display = relativize_path(path);
warn!("Invalid `# noqa` directive on {path_display}:{line}: {err}");
warn!("Invalid `# noqa` directive on line {line}: {err}");
}
Ok(Some(directive)) => {
// noqa comments are guaranteed to be single line.
@@ -776,14 +758,12 @@ impl FromIterator<TextRange> for NoqaMapping {
#[cfg(test)]
mod tests {
use std::path::Path;
use insta::assert_debug_snapshot;
use ruff_text_size::{TextRange, TextSize};
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::Locator;
use ruff_python_trivia::LineEnding;
use ruff_python_whitespace::LineEnding;
use crate::noqa::{add_noqa_inner, Directive, NoqaMapping, ParsedFileExemption};
use crate::rules::pycodestyle::rules::AmbiguousVariableName;
@@ -915,12 +895,6 @@ mod tests {
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
}
#[test]
fn noqa_invalid_suffix() {
let source = "# noqa[F401]";
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
}
#[test]
fn flake8_exemption_all() {
let source = "# flake8: noqa";
@@ -972,12 +946,9 @@ mod tests {
#[test]
fn modification() {
let path = Path::new("/tmp/foo.txt");
let contents = "x = 1";
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
path,
&[],
&Locator::new(contents),
&[],
@@ -997,7 +968,6 @@ mod tests {
let contents = "x = 1";
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
path,
&diagnostics,
&Locator::new(contents),
&[],
@@ -1022,7 +992,6 @@ mod tests {
let contents = "x = 1 # noqa: E741\n";
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
path,
&diagnostics,
&Locator::new(contents),
&[TextRange::new(TextSize::from(7), TextSize::from(19))],
@@ -1047,7 +1016,6 @@ mod tests {
let contents = "x = 1 # noqa";
let noqa_line_for = NoqaMapping::default();
let (count, output) = add_noqa_inner(
path,
&diagnostics,
&Locator::new(contents),
&[TextRange::new(TextSize::from(7), TextSize::from(13))],

View File

@@ -1,5 +1,4 @@
use colored::Colorize;
use log::warn;
use anyhow::Result;
use pyproject_toml::{BuildSystem, Project};
use ruff_text_size::{TextRange, TextSize};
use serde::{Deserialize, Serialize};
@@ -23,38 +22,34 @@ struct PyProjectToml {
project: Option<Project>,
}
pub fn lint_pyproject_toml(source_file: SourceFile, settings: &Settings) -> Vec<Message> {
let Some(err) = toml::from_str::<PyProjectToml>(source_file.source_text()).err() else {
return Vec::default();
pub fn lint_pyproject_toml(source_file: SourceFile, settings: &Settings) -> Result<Vec<Message>> {
let mut messages = vec![];
let err = match toml::from_str::<PyProjectToml>(source_file.source_text()) {
Ok(_) => return Ok(messages),
Err(err) => err,
};
let mut messages = Vec::new();
let range = match err.span() {
// This is bad but sometimes toml and/or serde just don't give us spans
// TODO(konstin,micha): https://github.com/astral-sh/ruff/issues/4571
None => TextRange::default(),
Some(range) => {
let Ok(end) = TextSize::try_from(range.end) else {
let message = format!(
"{} is larger than 4GB, but ruff assumes all files to be smaller",
source_file.name(),
);
if settings.rules.enabled(Rule::IOError) {
let diagnostic = Diagnostic::new(IOError { message }, TextRange::default());
let diagnostic = Diagnostic::new(
IOError {
message: "pyproject.toml is larger than 4GB".to_string(),
},
TextRange::default(),
);
messages.push(Message::from_diagnostic(
diagnostic,
source_file,
TextSize::default(),
));
} else {
warn!(
"{}{}{} {message}",
"Failed to lint ".bold(),
source_file.name().bold(),
":".bold()
);
}
return messages;
return Ok(messages);
};
TextRange::new(
// start <= end, so if end < 4GB follows start < 4GB
@@ -74,5 +69,5 @@ pub fn lint_pyproject_toml(source_file: SourceFile, settings: &Settings) -> Vec<
));
}
messages
Ok(messages)
}

View File

@@ -238,16 +238,23 @@ impl Rule {
match self {
Rule::InvalidPyprojectToml => LintSource::PyprojectToml,
Rule::UnusedNOQA => LintSource::Noqa,
Rule::BidirectionalUnicode
| Rule::BlankLineWithWhitespace
Rule::BlanketNOQA
| Rule::BlanketTypeIgnore
| Rule::DocLineTooLong
| Rule::LineTooLong
| Rule::MissingCopyrightNotice
| Rule::MissingNewlineAtEndOfFile
| Rule::MixedSpacesAndTabs
| Rule::TabIndentation
| Rule::MissingNewlineAtEndOfFile
| Rule::UTF8EncodingDeclaration
| Rule::ShebangMissingExecutableFile
| Rule::ShebangNotExecutable
| Rule::ShebangNotFirstLine
| Rule::BidirectionalUnicode
| Rule::ShebangMissingPython
| Rule::ShebangLeadingWhitespace
| Rule::TrailingWhitespace
| Rule::UTF8EncodingDeclaration => LintSource::PhysicalLines,
| Rule::TabIndentation
| Rule::MissingCopyrightNotice
| Rule::BlankLineWithWhitespace => LintSource::PhysicalLines,
Rule::AmbiguousUnicodeCharacterComment
| Rule::AmbiguousUnicodeCharacterDocstring
| Rule::AmbiguousUnicodeCharacterString
@@ -255,42 +262,34 @@ impl Rule {
| Rule::BadQuotesDocstring
| Rule::BadQuotesInlineString
| Rule::BadQuotesMultilineString
| Rule::BlanketNOQA
| Rule::BlanketTypeIgnore
| Rule::CommentedOutCode
| Rule::ExplicitStringConcatenation
| Rule::ExtraneousParentheses
| Rule::MultiLineImplicitStringConcatenation
| Rule::InvalidCharacterBackspace
| Rule::InvalidCharacterSub
| Rule::InvalidCharacterEsc
| Rule::InvalidCharacterNul
| Rule::InvalidCharacterSub
| Rule::InvalidCharacterZeroWidthSpace
| Rule::ExtraneousParentheses
| Rule::InvalidEscapeSequence
| Rule::InvalidTodoCapitalization
| Rule::SingleLineImplicitStringConcatenation
| Rule::MissingTrailingComma
| Rule::TrailingCommaOnBareTuple
| Rule::MultipleStatementsOnOneLineColon
| Rule::UselessSemicolon
| Rule::MultipleStatementsOnOneLineSemicolon
| Rule::ProhibitedTrailingComma
| Rule::TypeCommentInStub
| Rule::InvalidTodoTag
| Rule::MissingTodoAuthor
| Rule::MissingTodoLink
| Rule::MissingTodoColon
| Rule::MissingTodoDescription
| Rule::InvalidTodoCapitalization
| Rule::MissingSpaceAfterTodoColon
| Rule::LineContainsFixme
| Rule::LineContainsHack
| Rule::LineContainsTodo
| Rule::LineContainsXxx
| Rule::MissingSpaceAfterTodoColon
| Rule::MissingTodoAuthor
| Rule::MissingTodoColon
| Rule::MissingTodoDescription
| Rule::MissingTodoLink
| Rule::MissingTrailingComma
| Rule::MultiLineImplicitStringConcatenation
| Rule::MultipleStatementsOnOneLineColon
| Rule::MultipleStatementsOnOneLineSemicolon
| Rule::ProhibitedTrailingComma
| Rule::ShebangLeadingWhitespace
| Rule::ShebangMissingExecutableFile
| Rule::ShebangMissingPython
| Rule::ShebangNotExecutable
| Rule::ShebangNotFirstLine
| Rule::SingleLineImplicitStringConcatenation
| Rule::TrailingCommaOnBareTuple
| Rule::TypeCommentInStub
| Rule::UselessSemicolon => LintSource::Tokens,
| Rule::LineContainsXxx => LintSource::Tokens,
Rule::IOError => LintSource::Io,
Rule::UnsortedImports | Rule::MissingRequiredImport => LintSource::Imports,
Rule::ImplicitNamespacePackage | Rule::InvalidModuleName => LintSource::Filesystem,

View File

@@ -245,7 +245,6 @@ impl Renamer {
| BindingKind::NamedExprAssignment
| BindingKind::UnpackedAssignment
| BindingKind::Assignment
| BindingKind::BoundException
| BindingKind::LoopVar
| BindingKind::Global
| BindingKind::Nonlocal(_)

View File

@@ -262,7 +262,6 @@ pub fn python_files_in_path(
builder.add(path);
}
builder.standard_filters(pyproject_config.settings.lib.respect_gitignore);
builder.require_git(false);
builder.hidden(false);
let walker = builder.build_parallel();
@@ -331,12 +330,9 @@ pub fn python_files_in_path(
}
if result.as_ref().map_or(true, |entry| {
// Ignore directories
if entry.file_type().map_or(true, |ft| ft.is_dir()) {
false
} else if entry.depth() == 0 {
if entry.depth() == 0 {
// Accept all files that are passed-in directly.
true
entry.file_type().map_or(false, |ft| ft.is_file())
} else {
// Otherwise, check if the file is included.
let path = entry.path();
@@ -431,22 +427,18 @@ fn is_file_excluded(
#[cfg(test)]
mod tests {
use std::fs::{create_dir, File};
use std::path::Path;
use anyhow::Result;
use globset::GlobSet;
use itertools::Itertools;
use path_absolutize::Absolutize;
use tempfile::TempDir;
use crate::resolver::{
is_file_excluded, match_exclusion, python_files_in_path, resolve_settings_with_processor,
NoOpProcessor, PyprojectConfig, PyprojectDiscoveryStrategy, Relativity, Resolver,
is_file_excluded, match_exclusion, resolve_settings_with_processor, NoOpProcessor,
PyprojectConfig, PyprojectDiscoveryStrategy, Relativity, Resolver,
};
use crate::settings::pyproject::find_settings_toml;
use crate::settings::types::FilePattern;
use crate::settings::AllSettings;
use crate::test::test_resource_path;
fn make_exclusion(file_pattern: FilePattern) -> GlobSet {
@@ -610,43 +602,4 @@ mod tests {
));
Ok(())
}
#[test]
fn find_python_files() -> Result<()> {
// Initialize the filesystem:
// root
// ├── file1.py
// ├── dir1.py
// │ └── file2.py
// └── dir2.py
let tmp_dir = TempDir::new()?;
let root = tmp_dir.path();
let file1 = root.join("file1.py");
let dir1 = root.join("dir1.py");
let file2 = dir1.join("file2.py");
let dir2 = root.join("dir2.py");
File::create(&file1)?;
create_dir(dir1)?;
File::create(&file2)?;
create_dir(dir2)?;
let (paths, _) = python_files_in_path(
&[root.to_path_buf()],
&PyprojectConfig::new(
PyprojectDiscoveryStrategy::Fixed,
AllSettings::default(),
None,
),
&NoOpProcessor,
)?;
let paths = paths
.iter()
.flatten()
.map(ignore::DirEntry::path)
.sorted()
.collect::<Vec<_>>();
assert_eq!(paths, &[file2, file1]);
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More