Compare commits
2 Commits
v0.0.264
...
charlie/ca
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2446cd49fa | ||
|
|
5f5e71e81d |
@@ -12,6 +12,3 @@ indent_size = 2
|
||||
|
||||
[*.{rs,py}]
|
||||
indent_size = 4
|
||||
|
||||
[*.snap]
|
||||
trim_trailing_whitespace = false
|
||||
27
.github/workflows/ci.yaml
vendored
27
.github/workflows/ci.yaml
vendored
@@ -121,6 +121,15 @@ jobs:
|
||||
- run: cargo check
|
||||
- run: cargo fmt --all --check
|
||||
|
||||
typos:
|
||||
name: "spell check"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: crate-ci/typos@master
|
||||
with:
|
||||
files: .
|
||||
|
||||
ecosystem:
|
||||
name: "ecosystem"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -221,21 +230,3 @@ jobs:
|
||||
exit_code=${PIPESTATUS[0]}
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
exit $exit_code
|
||||
|
||||
docs:
|
||||
name: "mkdocs"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: "Install dependencies"
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: "Update README File"
|
||||
run: python scripts/transform_readme.py --target mkdocs
|
||||
- name: "Generate docs"
|
||||
run: python scripts/generate_mkdocs.py
|
||||
- name: "Build docs"
|
||||
run: mkdocs build --strict
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
crates/ruff/resources/test/cpython
|
||||
mkdocs.yml
|
||||
.overrides
|
||||
github_search.jsonl
|
||||
|
||||
###
|
||||
# Rust.gitignore
|
||||
|
||||
@@ -23,11 +23,6 @@ repos:
|
||||
- MD033 # no-inline-html
|
||||
- --
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.14.8
|
||||
hooks:
|
||||
- id: typos
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cargo-fmt
|
||||
|
||||
@@ -1,40 +1,5 @@
|
||||
# Breaking Changes
|
||||
|
||||
## 0.0.260
|
||||
|
||||
### Fixes are now represented as a list of edits ([#3709](https://github.com/charliermarsh/ruff/pull/3709))
|
||||
|
||||
Previously, Ruff represented each fix as a single edit, which prohibited Ruff from automatically
|
||||
fixing violations that required multiple edits across a file. As such, Ruff now represents each
|
||||
fix as a list of edits.
|
||||
|
||||
This primarily affects the JSON API. Ruff's JSON representation used to represent the `fix` field as
|
||||
a single edit, like so:
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Remove unused import: `sys`",
|
||||
"content": "",
|
||||
"location": {"row": 1, "column": 0},
|
||||
"end_location": {"row": 2, "column": 0}
|
||||
}
|
||||
```
|
||||
|
||||
The updated representation instead includes a list of edits:
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Remove unused import: `sys`",
|
||||
"edits": [
|
||||
{
|
||||
"content": "",
|
||||
"location": {"row": 1, "column": 0},
|
||||
"end_location": {"row": 2, "column": 0},
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## 0.0.246
|
||||
|
||||
### `multiple-statements-on-one-line-def` (`E704`) was removed ([#2773](https://github.com/charliermarsh/ruff/pull/2773))
|
||||
|
||||
@@ -116,7 +116,8 @@ At a high level, the steps involved in adding a new lint rule are as follows:
|
||||
To define the violation, start by creating a dedicated file for your rule under the appropriate
|
||||
rule linter (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`). That file should
|
||||
contain a struct defined via `#[violation]`, along with a function that creates the violation
|
||||
based on any required inputs.
|
||||
based on any required inputs. (Many of the existing examples live in `crates/ruff/src/violations.rs`,
|
||||
but we're looking to place new rules in their own files.)
|
||||
|
||||
To trigger the violation, you'll likely want to augment the logic in `crates/ruff/src/checkers/ast.rs`,
|
||||
which defines the Python AST visitor, responsible for iterating over the abstract syntax tree and
|
||||
@@ -214,20 +215,6 @@ them to [PyPI](https://pypi.org/project/ruff/).
|
||||
Ruff follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software,
|
||||
even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4).
|
||||
|
||||
## Ecosystem CI
|
||||
|
||||
GitHub Actions will run your changes against a number of real-world projects from GitHub and
|
||||
report on any diagnostic differences. You can also run those checks locally via:
|
||||
|
||||
```shell
|
||||
python scripts/check_ecosystem.py path/to/your/ruff path/to/older/ruff
|
||||
```
|
||||
|
||||
You can also run the Ecosystem CI check in a Docker container across a larger set of projects by
|
||||
downloading the [`known-github-tomls.json`](https://github.com/akx/ruff-usage-aggregate/blob/master/data/known-github-tomls.jsonl)
|
||||
as `github_search.jsonl` and following the instructions in [scripts/Dockerfile.ecosystem](https://github.com/charliermarsh/ruff/blob/main/scripts/Dockerfile.ecosystem).
|
||||
Note that this check will take a while to run.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
First, clone [CPython](https://github.com/python/cpython). It's a large and diverse Python codebase,
|
||||
|
||||
932
Cargo.lock
generated
932
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
16
Cargo.toml
16
Cargo.toml
@@ -3,7 +3,7 @@ members = ["crates/*"]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
rust-version = "1.69"
|
||||
rust-version = "1.67"
|
||||
homepage = "https://beta.ruff.rs/docs/"
|
||||
documentation = "https://beta.ruff.rs/docs/"
|
||||
repository = "https://github.com/charliermarsh/ruff"
|
||||
@@ -11,7 +11,7 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = { version = "1.0.69" }
|
||||
bitflags = { version = "2.1.0" }
|
||||
bitflags = { version = "1.3.2" }
|
||||
chrono = { version = "0.4.23", default-features = false, features = ["clock"] }
|
||||
clap = { version = "4.1.8", features = ["derive"] }
|
||||
colored = { version = "2.0.0" }
|
||||
@@ -24,25 +24,25 @@ is-macro = { version = "0.2.2" }
|
||||
itertools = { version = "0.10.5" }
|
||||
libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "80e4c1399f95e5beb532fdd1e209ad2dbb470438" }
|
||||
log = { version = "0.4.17" }
|
||||
nohash-hasher = { version = "0.2.0" }
|
||||
once_cell = { version = "1.17.1" }
|
||||
path-absolutize = { version = "3.0.14" }
|
||||
proc-macro2 = { version = "1.0.51" }
|
||||
quote = { version = "1.0.23" }
|
||||
regex = { version = "1.7.1" }
|
||||
ruff_text_size = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
|
||||
rustc-hash = { version = "1.1.0" }
|
||||
rustpython-common = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
|
||||
rustpython-parser = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
|
||||
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
|
||||
rustpython-parser = { features = [
|
||||
"lalrpop",
|
||||
"serde",
|
||||
], git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
|
||||
schemars = { version = "0.8.12" }
|
||||
serde = { version = "1.0.152", features = ["derive"] }
|
||||
serde_json = { version = "1.0.93", features = ["preserve_order"] }
|
||||
shellexpand = { version = "3.0.0" }
|
||||
similar = { version = "2.2.1" }
|
||||
smallvec = { version = "1.10.0" }
|
||||
strum = { version = "0.24.1", features = ["strum_macros"] }
|
||||
strum_macros = { version = "0.24.3" }
|
||||
syn = { version = "2.0.15" }
|
||||
syn = { version = "1.0.109" }
|
||||
test-case = { version = "3.0.0" }
|
||||
textwrap = { version = "0.16.0" }
|
||||
toml = { version = "0.7.2" }
|
||||
|
||||
118
README.md
118
README.md
@@ -47,16 +47,16 @@ all while executing tens or hundreds of times faster than any individual tool.
|
||||
|
||||
Ruff is extremely actively developed and used in major open-source projects like:
|
||||
|
||||
- [Apache Airflow](https://github.com/apache/airflow)
|
||||
- [pandas](https://github.com/pandas-dev/pandas)
|
||||
- [FastAPI](https://github.com/tiangolo/fastapi)
|
||||
- [Hugging Face](https://github.com/huggingface/transformers)
|
||||
- [Pandas](https://github.com/pandas-dev/pandas)
|
||||
- [Transformers (Hugging Face)](https://github.com/huggingface/transformers)
|
||||
- [Apache Airflow](https://github.com/apache/airflow)
|
||||
- [SciPy](https://github.com/scipy/scipy)
|
||||
|
||||
...and many more.
|
||||
|
||||
Ruff is backed by [Astral](https://astral.sh). Read the [launch post](https://astral.sh/blog/announcing-astral-the-company-behind-ruff),
|
||||
or the original [project announcement](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster).
|
||||
Read the [launch blog post](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster) or
|
||||
the most recent [project update](https://notes.crmarsh.com/ruff-the-first-200-releases).
|
||||
|
||||
## Testimonials
|
||||
|
||||
@@ -137,7 +137,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
|
||||
```yaml
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: 'v0.0.264'
|
||||
rev: 'v0.0.260'
|
||||
hooks:
|
||||
- id: ruff
|
||||
```
|
||||
@@ -145,20 +145,6 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
|
||||
Ruff can also be used as a [VS Code extension](https://github.com/charliermarsh/ruff-vscode) or
|
||||
alongside any other editor through the [Ruff LSP](https://github.com/charliermarsh/ruff-lsp).
|
||||
|
||||
Ruff can also be used as a [GitHub Action](https://github.com/features/actions) via
|
||||
[`ruff-action`](https://github.com/chartboost/ruff-action):
|
||||
|
||||
```yaml
|
||||
name: Ruff
|
||||
on: [ push, pull_request ]
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: chartboost/ruff-action@v1
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Ruff can be configured through a `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file (see:
|
||||
@@ -332,68 +318,52 @@ Ruff is released under the MIT license.
|
||||
|
||||
## Who's Using Ruff?
|
||||
|
||||
Ruff is used by a number of major open-source projects and companies, including:
|
||||
Ruff is used in a number of major open-source projects, including:
|
||||
|
||||
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
|
||||
- [Apache Airflow](https://github.com/apache/airflow)
|
||||
- AstraZeneca ([Magnus](https://github.com/AstraZeneca/magnus-core))
|
||||
- Benchling ([Refac](https://github.com/benchling/refac))
|
||||
- [Babel](https://github.com/python-babel/babel)
|
||||
- [Bokeh](https://github.com/bokeh/bokeh)
|
||||
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
|
||||
- [Dagger](https://github.com/dagger/dagger)
|
||||
- [Dagster](https://github.com/dagster-io/dagster)
|
||||
- [DVC](https://github.com/iterative/dvc)
|
||||
- [pandas](https://github.com/pandas-dev/pandas)
|
||||
- [FastAPI](https://github.com/tiangolo/fastapi)
|
||||
- [Gradio](https://github.com/gradio-app/gradio)
|
||||
- [Great Expectations](https://github.com/great-expectations/great_expectations)
|
||||
- Hugging Face ([Transformers](https://github.com/huggingface/transformers), [Datasets](https://github.com/huggingface/datasets), [Diffusers](https://github.com/huggingface/diffusers))
|
||||
- [Hatch](https://github.com/pypa/hatch)
|
||||
- [Home Assistant](https://github.com/home-assistant/core)
|
||||
- [Ibis](https://github.com/ibis-project/ibis)
|
||||
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
|
||||
- [LangChain](https://github.com/hwchase17/langchain)
|
||||
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
|
||||
- Matrix ([Synapse](https://github.com/matrix-org/synapse))
|
||||
- Meltano ([Meltano CLI](https://github.com/meltano/meltano), [Singer SDK](https://github.com/meltano/sdk))
|
||||
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
|
||||
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
|
||||
- [MegaLinter](https://github.com/oxsecurity/megalinter)
|
||||
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel), [ONNX Runtime](https://github.com/microsoft/onnxruntime))
|
||||
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
|
||||
- [Neon](https://github.com/neondatabase/neon)
|
||||
- [ONNX](https://github.com/onnx/onnx)
|
||||
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
|
||||
- [PDM](https://github.com/pdm-project/pdm)
|
||||
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
|
||||
- [Pandas](https://github.com/pandas-dev/pandas)
|
||||
- [Poetry](https://github.com/python-poetry/poetry)
|
||||
- [Polars](https://github.com/pola-rs/polars)
|
||||
- [PostHog](https://github.com/PostHog/posthog)
|
||||
- Prefect ([Python SDK](https://github.com/PrefectHQ/prefect), [Marvin](https://github.com/PrefectHQ/marvin))
|
||||
- [Pydantic](https://github.com/pydantic/pydantic)
|
||||
- [PyInstaller](https://github.com/pyinstaller/pyinstaller)
|
||||
- [Pylint](https://github.com/PyCQA/pylint)
|
||||
- [Pynecone](https://github.com/pynecone-io/pynecone)
|
||||
- [Robyn](https://github.com/sansyrox/robyn)
|
||||
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
|
||||
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))
|
||||
- [Saleor](https://github.com/saleor/saleor)
|
||||
- [Transformers (Hugging Face)](https://github.com/huggingface/transformers)
|
||||
- [Diffusers (Hugging Face)](https://github.com/huggingface/diffusers)
|
||||
- [Apache Airflow](https://github.com/apache/airflow)
|
||||
- [SciPy](https://github.com/scipy/scipy)
|
||||
- [Sphinx](https://github.com/sphinx-doc/sphinx)
|
||||
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
|
||||
- [Starlite](https://github.com/starlite-api/starlite)
|
||||
- [The Algorithms](https://github.com/TheAlgorithms/Python)
|
||||
- [Vega-Altair](https://github.com/altair-viz/altair)
|
||||
- WordPress ([Openverse](https://github.com/WordPress/openverse))
|
||||
- [ZenML](https://github.com/zenml-io/zenml)
|
||||
- [Zulip](https://github.com/zulip/zulip)
|
||||
- [build (PyPA)](https://github.com/pypa/build)
|
||||
- [Bokeh](https://github.com/bokeh/bokeh)
|
||||
- [Pydantic](https://github.com/pydantic/pydantic)
|
||||
- [PostHog](https://github.com/PostHog/posthog)
|
||||
- [Dagster](https://github.com/dagster-io/dagster)
|
||||
- [Dagger](https://github.com/dagger/dagger)
|
||||
- [Sphinx](https://github.com/sphinx-doc/sphinx)
|
||||
- [Hatch](https://github.com/pypa/hatch)
|
||||
- [PDM](https://github.com/pdm-project/pdm)
|
||||
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
|
||||
- [Great Expectations](https://github.com/great-expectations/great_expectations)
|
||||
- [ONNX](https://github.com/onnx/onnx)
|
||||
- [Polars](https://github.com/pola-rs/polars)
|
||||
- [Ibis](https://github.com/ibis-project/ibis)
|
||||
- [Synapse (Matrix)](https://github.com/matrix-org/synapse)
|
||||
- [SnowCLI (Snowflake)](https://github.com/Snowflake-Labs/snowcli)
|
||||
- [Dispatch (Netflix)](https://github.com/Netflix/dispatch)
|
||||
- [Saleor](https://github.com/saleor/saleor)
|
||||
- [Pynecone](https://github.com/pynecone-io/pynecone)
|
||||
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
|
||||
- [Home Assistant](https://github.com/home-assistant/core)
|
||||
- [Pylint](https://github.com/PyCQA/pylint)
|
||||
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
|
||||
- [cibuildwheel (PyPA)](https://github.com/pypa/cibuildwheel)
|
||||
- [delta-rs](https://github.com/delta-io/delta-rs)
|
||||
- [build (PyPA)](https://github.com/pypa/build)
|
||||
- [Babel](https://github.com/python-babel/babel)
|
||||
- [featuretools](https://github.com/alteryx/featuretools)
|
||||
- [meson-python](https://github.com/mesonbuild/meson-python)
|
||||
- [ZenML](https://github.com/zenml-io/zenml)
|
||||
- [delta-rs](https://github.com/delta-io/delta-rs)
|
||||
- [Starlite](https://github.com/starlite-api/starlite)
|
||||
- [telemetry-airflow (Mozilla)](https://github.com/mozilla/telemetry-airflow)
|
||||
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
|
||||
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
|
||||
- [nox](https://github.com/wntrblm/nox)
|
||||
- [Neon](https://github.com/neondatabase/neon)
|
||||
- [The Algorithms](https://github.com/TheAlgorithms/Python)
|
||||
- [Openverse](https://github.com/WordPress/openverse)
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -5,6 +5,3 @@ extend-exclude = ["snapshots", "black"]
|
||||
trivias = "trivias"
|
||||
hel = "hel"
|
||||
whos = "whos"
|
||||
spawnve = "spawnve"
|
||||
ned = "ned"
|
||||
poit = "poit"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "flake8-to-ruff"
|
||||
version = "0.0.264"
|
||||
version = "0.0.260"
|
||||
edition = { workspace = true }
|
||||
rust-version = { workspace = true }
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ruff"
|
||||
version = "0.0.264"
|
||||
version = "0.0.260"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
@@ -17,19 +17,16 @@ name = "ruff"
|
||||
ruff_cache = { path = "../ruff_cache" }
|
||||
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
|
||||
ruff_python_semantic = { path = "../ruff_python_semantic" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast" }
|
||||
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
|
||||
ruff_rustpython = { path = "../ruff_rustpython" }
|
||||
ruff_text_size = { workspace = true }
|
||||
|
||||
annotate-snippets = { version = "0.9.1", features = ["color"] }
|
||||
anyhow = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive", "string"], optional = true }
|
||||
colored = { workspace = true }
|
||||
dirs = { version = "5.0.0" }
|
||||
dirs = { version = "4.0.0" }
|
||||
fern = { version = "0.6.1" }
|
||||
glob = { workspace = true }
|
||||
globset = { workspace = true }
|
||||
@@ -40,7 +37,7 @@ itertools = { workspace = true }
|
||||
libcst = { workspace = true }
|
||||
log = { workspace = true }
|
||||
natord = { version = "1.0.9" }
|
||||
nohash-hasher = { workspace = true }
|
||||
nohash-hasher = { version = "0.2.0" }
|
||||
num-bigint = { version = "0.4.3" }
|
||||
num-traits = { version = "0.2.15" }
|
||||
once_cell = { workspace = true }
|
||||
@@ -50,7 +47,6 @@ path-absolutize = { workspace = true, features = [
|
||||
] }
|
||||
pathdiff = { version = "0.2.1" }
|
||||
pep440_rs = { version = "0.3.1", features = ["serde"] }
|
||||
quick-junit = { version = "0.3.2" }
|
||||
regex = { workspace = true }
|
||||
result-like = { version = "0.4.6" }
|
||||
rustc-hash = { workspace = true }
|
||||
@@ -60,9 +56,8 @@ schemars = { workspace = true }
|
||||
semver = { version = "1.0.16" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
similar = { workspace = true, features = ["inline"] }
|
||||
shellexpand = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
smallvec = { version = "1.10.0" }
|
||||
strum = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
textwrap = { workspace = true }
|
||||
@@ -72,11 +67,9 @@ typed-arena = { version = "2.0.2" }
|
||||
unicode-width = { version = "0.1.10" }
|
||||
|
||||
[dev-dependencies]
|
||||
insta = { workspace = true }
|
||||
insta = { workspace = true, features = ["yaml", "redactions"] }
|
||||
pretty_assertions = "1.3.0"
|
||||
test-case = { workspace = true }
|
||||
# Disable colored output in tests
|
||||
colored = { workspace = true, features = ["no-color"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
@@ -9,7 +9,6 @@ def foo(x, y, z):
|
||||
print(x, y, z)
|
||||
|
||||
# This is a real comment.
|
||||
# # This is a (nested) comment.
|
||||
#return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
assert True # S101
|
||||
|
||||
# Error
|
||||
assert True
|
||||
|
||||
def fn():
|
||||
x = 1
|
||||
assert x == 1 # S101
|
||||
assert x == 2 # S101
|
||||
|
||||
# Error
|
||||
assert x == 1
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
assert True # OK
|
||||
# Error
|
||||
assert x == 2
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
from subprocess import Popen, call, check_call, check_output, run
|
||||
|
||||
# Check different Popen wrappers are checked.
|
||||
Popen("true", shell=True)
|
||||
call("true", shell=True)
|
||||
check_call("true", shell=True)
|
||||
check_output("true", shell=True)
|
||||
run("true", shell=True)
|
||||
|
||||
# Check values that truthy values are treated as true.
|
||||
Popen("true", shell=1)
|
||||
Popen("true", shell=[1])
|
||||
Popen("true", shell={1: 1})
|
||||
Popen("true", shell=(1,))
|
||||
|
||||
# Check command argument looks unsafe.
|
||||
var_string = "true"
|
||||
Popen(var_string, shell=True)
|
||||
Popen([var_string], shell=True)
|
||||
Popen([var_string, ""], shell=True)
|
||||
@@ -1,20 +0,0 @@
|
||||
from subprocess import Popen, call, check_call, check_output, run
|
||||
|
||||
# Different Popen wrappers are checked.
|
||||
Popen("true", shell=False)
|
||||
call("true", shell=False)
|
||||
check_call("true", shell=False)
|
||||
check_output("true", shell=False)
|
||||
run("true", shell=False)
|
||||
|
||||
# Values that falsey values are treated as false.
|
||||
Popen("true", shell=0)
|
||||
Popen("true", shell=[])
|
||||
Popen("true", shell={})
|
||||
Popen("true", shell=None)
|
||||
|
||||
# Unknown values are treated as falsey.
|
||||
Popen("true", shell=True if True else False)
|
||||
|
||||
# No value is also caught.
|
||||
Popen("true")
|
||||
@@ -1,5 +0,0 @@
|
||||
def foo(shell):
|
||||
pass
|
||||
|
||||
|
||||
foo(shell=True)
|
||||
@@ -1,25 +0,0 @@
|
||||
import os
|
||||
|
||||
import commands
|
||||
import popen2
|
||||
|
||||
# Check all shell functions.
|
||||
os.system("true")
|
||||
os.popen("true")
|
||||
os.popen2("true")
|
||||
os.popen3("true")
|
||||
os.popen4("true")
|
||||
popen2.popen2("true")
|
||||
popen2.popen3("true")
|
||||
popen2.popen4("true")
|
||||
popen2.Popen3("true")
|
||||
popen2.Popen4("true")
|
||||
commands.getoutput("true")
|
||||
commands.getstatusoutput("true")
|
||||
|
||||
|
||||
# Check command argument looks unsafe.
|
||||
var_string = "true"
|
||||
os.system(var_string)
|
||||
os.system([var_string])
|
||||
os.system([var_string, ""])
|
||||
@@ -1,20 +0,0 @@
|
||||
import os
|
||||
|
||||
# Check all shell functions.
|
||||
os.execl("true")
|
||||
os.execle("true")
|
||||
os.execlp("true")
|
||||
os.execlpe("true")
|
||||
os.execv("true")
|
||||
os.execve("true")
|
||||
os.execvp("true")
|
||||
os.execvpe("true")
|
||||
os.spawnl("true")
|
||||
os.spawnle("true")
|
||||
os.spawnlp("true")
|
||||
os.spawnlpe("true")
|
||||
os.spawnv("true")
|
||||
os.spawnve("true")
|
||||
os.spawnvp("true")
|
||||
os.spawnvpe("true")
|
||||
os.startfile("true")
|
||||
@@ -1,44 +0,0 @@
|
||||
import os
|
||||
|
||||
# Check all functions.
|
||||
subprocess.Popen("true")
|
||||
subprocess.call("true")
|
||||
subprocess.check_call("true")
|
||||
subprocess.check_output("true")
|
||||
subprocess.run("true")
|
||||
os.system("true")
|
||||
os.popen("true")
|
||||
os.popen2("true")
|
||||
os.popen3("true")
|
||||
os.popen4("true")
|
||||
popen2.popen2("true")
|
||||
popen2.popen3("true")
|
||||
popen2.popen4("true")
|
||||
popen2.Popen3("true")
|
||||
popen2.Popen4("true")
|
||||
commands.getoutput("true")
|
||||
commands.getstatusoutput("true")
|
||||
os.execl("true")
|
||||
os.execle("true")
|
||||
os.execlp("true")
|
||||
os.execlpe("true")
|
||||
os.execv("true")
|
||||
os.execve("true")
|
||||
os.execvp("true")
|
||||
os.execvpe("true")
|
||||
os.spawnl("true")
|
||||
os.spawnle("true")
|
||||
os.spawnlp("true")
|
||||
os.spawnlpe("true")
|
||||
os.spawnv("true")
|
||||
os.spawnve("true")
|
||||
os.spawnvp("true")
|
||||
os.spawnvpe("true")
|
||||
os.startfile("true")
|
||||
|
||||
# Check it does not fail for full paths.
|
||||
os.system("/bin/ls")
|
||||
os.system("./bin/ls")
|
||||
os.system(["/bin/ls"])
|
||||
os.system(["/bin/ls", "/tmp"])
|
||||
os.system(r"C:\\bin\ls")
|
||||
@@ -1,10 +1,9 @@
|
||||
"""
|
||||
Should emit:
|
||||
B017 - on lines 23 and 41
|
||||
B017 - on lines 20
|
||||
"""
|
||||
import asyncio
|
||||
import unittest
|
||||
import pytest
|
||||
|
||||
CONSTANT = True
|
||||
|
||||
@@ -35,14 +34,3 @@ class Foobar(unittest.TestCase):
|
||||
def raises_with_absolute_reference(self):
|
||||
with self.assertRaises(asyncio.CancelledError):
|
||||
Foo()
|
||||
|
||||
|
||||
def test_pytest_raises():
|
||||
with pytest.raises(Exception):
|
||||
raise ValueError("Hello")
|
||||
|
||||
with pytest.raises(Exception, "hello"):
|
||||
raise ValueError("This is fine")
|
||||
|
||||
with pytest.raises(Exception, match="hello"):
|
||||
raise ValueError("This is also fine")
|
||||
|
||||
@@ -172,14 +172,3 @@ def iter_f(names):
|
||||
|
||||
if False:
|
||||
return [lambda: i for i in range(3)] # error
|
||||
|
||||
|
||||
for val in range(3):
|
||||
def make_func(val=val):
|
||||
def tmp():
|
||||
return print(val)
|
||||
|
||||
return tmp
|
||||
|
||||
|
||||
funcs.append(make_func())
|
||||
|
||||
@@ -28,11 +28,6 @@ except (ValueError, *(RuntimeError, (KeyError, TypeError))): # error
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
pass
|
||||
except (*a, *(RuntimeError, (KeyError, TypeError))): # error
|
||||
pass
|
||||
|
||||
try:
|
||||
pass
|
||||
except (ValueError, *(RuntimeError, TypeError)): # ok
|
||||
@@ -43,36 +38,10 @@ try:
|
||||
except (ValueError, *[RuntimeError, *(TypeError,)]): # ok
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
pass
|
||||
except (*a, *b): # ok
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
pass
|
||||
except (*a, *(RuntimeError, TypeError)): # ok
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
pass
|
||||
except (*a, *(b, c)): # ok
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
pass
|
||||
except (*a, *(*b, *c)): # ok
|
||||
pass
|
||||
|
||||
|
||||
def what_to_catch():
|
||||
return ...
|
||||
|
||||
|
||||
try:
|
||||
pass
|
||||
except what_to_catch(): # ok
|
||||
pass
|
||||
pass
|
||||
@@ -78,87 +78,12 @@ for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
for shopper in shoppers:
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
_ = [collect_shop_items(shopper, section_items) for shopper in shoppers] # B031
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
# The variable is overridden, skip checking.
|
||||
_ = [_ for section_items in range(3)]
|
||||
_ = [collect_shop_items(shopper, section_items) for shopper in shoppers]
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
_ = [item for item in section_items]
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
# The iterator is being used for the second time.
|
||||
_ = [(item1, item2) for item1 in section_items for item2 in section_items] # B031
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
else:
|
||||
collect_shop_items(shopper, section_items)
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
# Mutually exclusive branches shouldn't trigger the warning
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
elif _section == "frozen items":
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
else:
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
elif _section == "frozen items":
|
||||
# Mix `match` and `if` statements
|
||||
match shopper:
|
||||
case "Jane":
|
||||
collect_shop_items(shopper, section_items)
|
||||
if _section == "fourth":
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
case _:
|
||||
collect_shop_items(shopper, section_items)
|
||||
else:
|
||||
collect_shop_items(shopper, section_items)
|
||||
# Now, it should detect
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
# Mutually exclusive branches shouldn't trigger the warning
|
||||
match _section:
|
||||
case "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
match shopper:
|
||||
case "Jane":
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
case _:
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
case "frozen items":
|
||||
collect_shop_items(shopper, section_items)
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
case _:
|
||||
collect_shop_items(shopper, section_items)
|
||||
# Now, it should detect
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
|
||||
for group in groupby(items, key=lambda p: p[1]):
|
||||
# This is bad, but not detected currently
|
||||
collect_shop_items("Jane", group[1])
|
||||
collect_shop_items("Joe", group[1])
|
||||
|
||||
|
||||
# https://github.com/charliermarsh/ruff/issues/4050
|
||||
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
for item in section_items:
|
||||
collect_shop_items(shopper, item)
|
||||
elif _section == "frozen items":
|
||||
_ = [item for item in section_items]
|
||||
else:
|
||||
collect_shop_items(shopper, section_items)
|
||||
|
||||
# Make sure we ignore - but don't fail on more complicated invocations
|
||||
for _key, (_value1, _value2) in groupby(
|
||||
[("a", (1, 2)), ("b", (3, 4)), ("a", (5, 6))], key=lambda p: p[1]
|
||||
|
||||
@@ -7,14 +7,11 @@ set(set(x))
|
||||
set(list(x))
|
||||
set(tuple(x))
|
||||
set(sorted(x))
|
||||
set(sorted(x, key=lambda y: y))
|
||||
set(reversed(x))
|
||||
sorted(list(x))
|
||||
sorted(tuple(x))
|
||||
sorted(sorted(x))
|
||||
sorted(sorted(x, key=lambda y: y))
|
||||
sorted(reversed(x))
|
||||
sorted(list(x), key=lambda y: y)
|
||||
tuple(
|
||||
list(
|
||||
[x, 3, "hell"\
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
dict({})
|
||||
dict({'a': 1})
|
||||
dict({'x': 1 for x in range(10)})
|
||||
dict(
|
||||
{'x': 1 for x in range(10)}
|
||||
)
|
||||
|
||||
dict({}, a=1)
|
||||
dict({x: 1 for x in range(1)}, a=1)
|
||||
|
||||
@@ -21,36 +21,3 @@ def f_c():
|
||||
def f_ok():
|
||||
msg = "hello"
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
||||
def f_unfixable():
|
||||
msg = "hello"
|
||||
raise RuntimeError("This is an example exception")
|
||||
|
||||
|
||||
def f_msg_in_nested_scope():
|
||||
def nested():
|
||||
msg = "hello"
|
||||
|
||||
raise RuntimeError("This is an example exception")
|
||||
|
||||
|
||||
def f_msg_in_parent_scope():
|
||||
msg = "hello"
|
||||
|
||||
def nested():
|
||||
raise RuntimeError("This is an example exception")
|
||||
|
||||
|
||||
def f_fix_indentation_check(foo):
|
||||
if foo:
|
||||
raise RuntimeError("This is an example exception")
|
||||
else:
|
||||
if foo == "foo":
|
||||
raise RuntimeError(f"This is an exception: {foo}")
|
||||
raise RuntimeError("This is an exception: {}".format(foo))
|
||||
|
||||
|
||||
# Report these, but don't fix them
|
||||
if foo: raise RuntimeError("This is an example exception")
|
||||
if foo: x = 1; raise RuntimeError("This is an example exception")
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
import typing as t # banned
|
||||
import typing as ty # banned
|
||||
|
||||
import numpy as nmp # banned
|
||||
import numpy as npy # banned
|
||||
import tensorflow.keras.backend as K # banned
|
||||
import torch.nn.functional as F # banned
|
||||
from tensorflow.keras import backend as K # banned
|
||||
from torch.nn import functional as F # banned
|
||||
|
||||
from typing import Any # ok
|
||||
|
||||
import numpy as np # ok
|
||||
import tensorflow as tf # ok
|
||||
import torch.nn as nn # ok
|
||||
from tensorflow.keras import backend # ok
|
||||
@@ -1,10 +0,0 @@
|
||||
from logging.config import BaseConfigurator # banned
|
||||
from typing import Any, Dict # banned
|
||||
from typing import * # banned
|
||||
|
||||
from pandas import DataFrame # banned
|
||||
from pandas import * # banned
|
||||
|
||||
import logging.config # ok
|
||||
import typing # ok
|
||||
import pandas # ok
|
||||
@@ -1,5 +1,3 @@
|
||||
import logging
|
||||
from distutils import log
|
||||
|
||||
logging.warn("Hello World!")
|
||||
log.warn("Hello world!") # This shouldn't be considered as a logger candidate
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# PIE802
|
||||
any([x.id for x in bar])
|
||||
all([x.id for x in bar])
|
||||
any( # first comment
|
||||
@@ -14,6 +15,5 @@ all(x.id for x in bar)
|
||||
any(x.id for x in bar)
|
||||
all((x.id for x in bar))
|
||||
|
||||
|
||||
async def f() -> bool:
|
||||
return all([await use_greeting(greeting) for greeting in await greetings()])
|
||||
@@ -11,7 +11,3 @@ _T = TypeVar("_T") # OK
|
||||
_TTuple = TypeVarTuple("_TTuple") # OK
|
||||
|
||||
_P = ParamSpec("_P") # OK
|
||||
|
||||
|
||||
def f():
|
||||
T = TypeVar("T") # OK
|
||||
|
||||
@@ -11,6 +11,3 @@ _T = TypeVar("_T") # OK
|
||||
_TTuple = TypeVarTuple("_TTuple") # OK
|
||||
|
||||
_P = ParamSpec("_P") # OK
|
||||
|
||||
def f():
|
||||
T = TypeVar("T") # OK
|
||||
|
||||
@@ -46,48 +46,3 @@ field229: dict[int, int] = {1: 2, **{3: 4}} # Y015 Only simple default values a
|
||||
field23 = "foo" + "bar" # Y015 Only simple default values are allowed for assignments
|
||||
field24 = b"foo" + b"bar" # Y015 Only simple default values are allowed for assignments
|
||||
field25 = 5 * 5 # Y015 Only simple default values are allowed for assignments
|
||||
|
||||
# We shouldn't emit Y015 within functions
|
||||
def f():
|
||||
field26: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
||||
|
||||
|
||||
# We shouldn't emit Y015 for __slots__ or __match_args__
|
||||
class Class1:
|
||||
__slots__ = (
|
||||
'_one',
|
||||
'_two',
|
||||
'_three',
|
||||
'_four',
|
||||
'_five',
|
||||
'_six',
|
||||
'_seven',
|
||||
'_eight',
|
||||
'_nine',
|
||||
'_ten',
|
||||
'_eleven',
|
||||
)
|
||||
|
||||
__match_args__ = (
|
||||
'one',
|
||||
'two',
|
||||
'three',
|
||||
'four',
|
||||
'five',
|
||||
'six',
|
||||
'seven',
|
||||
'eight',
|
||||
'nine',
|
||||
'ten',
|
||||
'eleven',
|
||||
)
|
||||
|
||||
# We shouldn't emit Y015 for __all__
|
||||
__all__ = ["Class1"]
|
||||
|
||||
# Ignore the following for PYI015
|
||||
field26 = typing.Sequence[int]
|
||||
field27 = list[str]
|
||||
field28 = builtins.str
|
||||
field29 = str
|
||||
field30 = str | bytes | None
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import builtins
|
||||
import typing
|
||||
from typing import TypeAlias, Final, NewType, TypeVar, TypeVarTuple, ParamSpec
|
||||
from typing import TypeAlias, Final
|
||||
|
||||
# We shouldn't emit Y015 for simple default values
|
||||
field1: int
|
||||
@@ -26,10 +26,6 @@ field9 = None # Y026 Use typing_extensions.TypeAlias for type aliases, e.g. "fi
|
||||
Field95: TypeAlias = None
|
||||
Field96: TypeAlias = int | None
|
||||
Field97: TypeAlias = None | typing.SupportsInt | builtins.str | float | bool
|
||||
Field98 = NewType('MyInt', int)
|
||||
Field99 = TypeVar('Field99')
|
||||
Field100 = TypeVarTuple('Field100')
|
||||
Field101 = ParamSpec('Field101')
|
||||
field19 = [1, 2, 3] # Y052 Need type annotation for "field19"
|
||||
field191: list[int] = [1, 2, 3]
|
||||
field20 = (1, 2, 3) # Y052 Need type annotation for "field20"
|
||||
@@ -53,48 +49,3 @@ field229: dict[int, int] = {1: 2, **{3: 4}} # Y015 Only simple default values a
|
||||
field23 = "foo" + "bar" # Y015 Only simple default values are allowed for assignments
|
||||
field24 = b"foo" + b"bar" # Y015 Only simple default values are allowed for assignments
|
||||
field25 = 5 * 5 # Y015 Only simple default values are allowed for assignments
|
||||
|
||||
# We shouldn't emit Y015 within functions
|
||||
def f():
|
||||
field26: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
|
||||
|
||||
|
||||
# We shouldn't emit Y015 for __slots__ or __match_args__
|
||||
class Class1:
|
||||
__slots__ = (
|
||||
'_one',
|
||||
'_two',
|
||||
'_three',
|
||||
'_four',
|
||||
'_five',
|
||||
'_six',
|
||||
'_seven',
|
||||
'_eight',
|
||||
'_nine',
|
||||
'_ten',
|
||||
'_eleven',
|
||||
)
|
||||
|
||||
__match_args__ = (
|
||||
'one',
|
||||
'two',
|
||||
'three',
|
||||
'four',
|
||||
'five',
|
||||
'six',
|
||||
'seven',
|
||||
'eight',
|
||||
'nine',
|
||||
'ten',
|
||||
'eleven',
|
||||
)
|
||||
|
||||
# We shouldn't emit Y015 for __all__
|
||||
__all__ = ["Class1"]
|
||||
|
||||
# Ignore the following for PYI015
|
||||
field26 = typing.Sequence[int]
|
||||
field27 = list[str]
|
||||
field28 = builtins.str
|
||||
field29 = str
|
||||
field30 = str | bytes | None
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# Shouldn't affect non-union field types.
|
||||
field1: str
|
||||
|
||||
# Should emit for duplicate field types.
|
||||
field2: str | str # PYI016: Duplicate union member `str`
|
||||
|
||||
|
||||
# Should emit for union types in arguments.
|
||||
def func1(arg1: int | int): # PYI016: Duplicate union member `int`
|
||||
print(arg1)
|
||||
|
||||
|
||||
# Should emit for unions in return types.
|
||||
def func2() -> str | str: # PYI016: Duplicate union member `str`
|
||||
return "my string"
|
||||
|
||||
|
||||
# Should emit in longer unions, even if not directly adjacent.
|
||||
field3: str | str | int # PYI016: Duplicate union member `str`
|
||||
field4: int | int | str # PYI016: Duplicate union member `int`
|
||||
field5: str | int | str # PYI016: Duplicate union member `str`
|
||||
field6: int | bool | str | int # PYI016: Duplicate union member `int`
|
||||
|
||||
# Shouldn't emit for non-type unions.
|
||||
field7 = str | str
|
||||
|
||||
# Should emit for strangely-bracketed unions.
|
||||
field8: int | (str | int) # PYI016: Duplicate union member `int`
|
||||
|
||||
# Should handle user brackets when fixing.
|
||||
field9: int | (int | str) # PYI016: Duplicate union member `int`
|
||||
field10: (str | int) | str # PYI016: Duplicate union member `str`
|
||||
|
||||
# Should emit for nested unions.
|
||||
field11: dict[int | int, str]
|
||||
@@ -1,32 +0,0 @@
|
||||
# Shouldn't affect non-union field types.
|
||||
field1: str
|
||||
|
||||
# Should emit for duplicate field types.
|
||||
field2: str | str # PYI016: Duplicate union member `str`
|
||||
|
||||
# Should emit for union types in arguments.
|
||||
def func1(arg1: int | int): # PYI016: Duplicate union member `int`
|
||||
print(arg1)
|
||||
|
||||
# Should emit for unions in return types.
|
||||
def func2() -> str | str: # PYI016: Duplicate union member `str`
|
||||
return "my string"
|
||||
|
||||
# Should emit in longer unions, even if not directly adjacent.
|
||||
field3: str | str | int # PYI016: Duplicate union member `str`
|
||||
field4: int | int | str # PYI016: Duplicate union member `int`
|
||||
field5: str | int | str # PYI016: Duplicate union member `str`
|
||||
field6: int | bool | str | int # PYI016: Duplicate union member `int`
|
||||
|
||||
# Shouldn't emit for non-type unions.
|
||||
field7 = str | str
|
||||
|
||||
# Should emit for strangely-bracketed unions.
|
||||
field8: int | (str | int) # PYI016: Duplicate union member `int`
|
||||
|
||||
# Should handle user brackets when fixing.
|
||||
field9: int | (int | str) # PYI016: Duplicate union member `int`
|
||||
field10: (str | int) | str # PYI016: Duplicate union member `str`
|
||||
|
||||
# Should emit for nested unions.
|
||||
field11: dict[int | int, str]
|
||||
@@ -49,18 +49,3 @@ def test_list_expressions(param1, param2):
|
||||
@pytest.mark.parametrize([some_expr, "param2"], [1, 2, 3])
|
||||
def test_list_mixed_expr_literal(param1, param2):
|
||||
...
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("param1, " "param2, " "param3"), [(1, 2, 3), (4, 5, 6)])
|
||||
def test_implicit_str_concat_with_parens(param1, param2, param3):
|
||||
...
|
||||
|
||||
|
||||
@pytest.mark.parametrize("param1, " "param2, " "param3", [(1, 2, 3), (4, 5, 6)])
|
||||
def test_implicit_str_concat_no_parens(param1, param2, param3):
|
||||
...
|
||||
|
||||
|
||||
@pytest.mark.parametrize((("param1, " "param2, " "param3")), [(1, 2, 3), (4, 5, 6)])
|
||||
def test_implicit_str_concat_with_multi_parens(param1, param2, param3):
|
||||
...
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
###
|
||||
def x():
|
||||
a = 1
|
||||
return a # RET504
|
||||
return a # error
|
||||
|
||||
|
||||
# Can be refactored false positives
|
||||
@@ -211,10 +211,10 @@ def nonlocal_assignment():
|
||||
def decorator() -> Flask:
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route("/hello")
|
||||
@app.route('/hello')
|
||||
def hello() -> str:
|
||||
"""Hello endpoint."""
|
||||
return "Hello, World!"
|
||||
return 'Hello, World!'
|
||||
|
||||
return app
|
||||
|
||||
@@ -222,13 +222,12 @@ def decorator() -> Flask:
|
||||
def default():
|
||||
y = 1
|
||||
|
||||
def f(x=y) -> X:
|
||||
def f(x = y) -> X:
|
||||
return x
|
||||
|
||||
return y
|
||||
|
||||
|
||||
# Multiple assignment
|
||||
def get_queryset(option_1, option_2):
|
||||
queryset: Any = None
|
||||
queryset = queryset.filter(a=1)
|
||||
@@ -247,28 +246,4 @@ def get_queryset():
|
||||
|
||||
def get_queryset():
|
||||
queryset = Model.filter(a=1)
|
||||
return queryset # RET504
|
||||
|
||||
|
||||
# Function arguments
|
||||
def str_to_bool(val):
|
||||
if isinstance(val, bool):
|
||||
return val
|
||||
val = val.strip().lower()
|
||||
if val in ("1", "true", "yes"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def str_to_bool(val):
|
||||
if isinstance(val, bool):
|
||||
return val
|
||||
val = 1
|
||||
return val # RET504
|
||||
|
||||
|
||||
def str_to_bool(val):
|
||||
if isinstance(val, bool):
|
||||
return some_obj
|
||||
return val
|
||||
return queryset # error
|
||||
|
||||
@@ -59,15 +59,3 @@ def bar():
|
||||
return foo()
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def with_ellipsis():
|
||||
try:
|
||||
foo()
|
||||
except ValueError:
|
||||
...
|
||||
|
||||
def with_ellipsis_and_return():
|
||||
try:
|
||||
return foo()
|
||||
except ValueError:
|
||||
...
|
||||
|
||||
@@ -9,17 +9,6 @@ os.environ.get('foo', 'bar')
|
||||
|
||||
os.getenv('foo')
|
||||
|
||||
env = os.environ.get('foo')
|
||||
|
||||
env = os.environ['foo']
|
||||
|
||||
if env := os.environ.get('foo'):
|
||||
pass
|
||||
|
||||
if env := os.environ['foo']:
|
||||
pass
|
||||
|
||||
|
||||
# Good
|
||||
os.environ['FOO']
|
||||
|
||||
@@ -28,13 +17,3 @@ os.environ.get('FOO')
|
||||
os.environ.get('FOO', 'bar')
|
||||
|
||||
os.getenv('FOO')
|
||||
|
||||
env = os.getenv('FOO')
|
||||
|
||||
if env := os.getenv('FOO'):
|
||||
pass
|
||||
|
||||
env = os.environ['FOO']
|
||||
|
||||
if env := os.environ['FOO']:
|
||||
pass
|
||||
|
||||
@@ -42,113 +42,3 @@ if False and f() and a and g() and b: # OK
|
||||
|
||||
if a and False and f() and b and g(): # OK
|
||||
pass
|
||||
|
||||
|
||||
a or "" or True # SIM222
|
||||
|
||||
a or "foo" or True or "bar" # SIM222
|
||||
|
||||
a or 0 or True # SIM222
|
||||
|
||||
a or 1 or True or 2 # SIM222
|
||||
|
||||
a or 0.0 or True # SIM222
|
||||
|
||||
a or 0.1 or True or 0.2 # SIM222
|
||||
|
||||
a or [] or True # SIM222
|
||||
|
||||
a or list([]) or True # SIM222
|
||||
|
||||
a or [1] or True or [2] # SIM222
|
||||
|
||||
a or list([1]) or True or list([2]) # SIM222
|
||||
|
||||
a or {} or True # SIM222
|
||||
|
||||
a or dict() or True # SIM222
|
||||
|
||||
a or {1: 1} or True or {2: 2} # SIM222
|
||||
|
||||
a or dict({1: 1}) or True or dict({2: 2}) # SIM222
|
||||
|
||||
a or set() or True # SIM222
|
||||
|
||||
a or set(set()) or True # SIM222
|
||||
|
||||
a or {1} or True or {2} # SIM222
|
||||
|
||||
a or set({1}) or True or set({2}) # SIM222
|
||||
|
||||
a or () or True # SIM222
|
||||
|
||||
a or tuple(()) or True # SIM222
|
||||
|
||||
a or (1,) or True or (2,) # SIM222
|
||||
|
||||
a or tuple((1,)) or True or tuple((2,)) # SIM222
|
||||
|
||||
a or frozenset() or True # SIM222
|
||||
|
||||
a or frozenset(frozenset()) or True # SIM222
|
||||
|
||||
a or frozenset({1}) or True or frozenset({2}) # SIM222
|
||||
|
||||
a or frozenset(frozenset({1})) or True or frozenset(frozenset({2})) # SIM222
|
||||
|
||||
|
||||
# Inside test `a` is simplified.
|
||||
|
||||
bool(a or [1] or True or [2]) # SIM222
|
||||
|
||||
assert a or [1] or True or [2] # SIM222
|
||||
|
||||
if (a or [1] or True or [2]) and (a or [1] or True or [2]): # SIM222
|
||||
pass
|
||||
|
||||
0 if a or [1] or True or [2] else 1 # SIM222
|
||||
|
||||
while a or [1] or True or [2]: # SIM222
|
||||
pass
|
||||
|
||||
[
|
||||
0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a or [1] or True or [2] # SIM222
|
||||
if b or [1] or True or [2] # SIM222
|
||||
]
|
||||
|
||||
{
|
||||
0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a or [1] or True or [2] # SIM222
|
||||
if b or [1] or True or [2] # SIM222
|
||||
}
|
||||
|
||||
{
|
||||
0: 0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a or [1] or True or [2] # SIM222
|
||||
if b or [1] or True or [2] # SIM222
|
||||
}
|
||||
|
||||
(
|
||||
0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a or [1] or True or [2] # SIM222
|
||||
if b or [1] or True or [2] # SIM222
|
||||
)
|
||||
|
||||
# Outside test `a` is not simplified.
|
||||
|
||||
a or [1] or True or [2] # SIM222
|
||||
|
||||
if (a or [1] or True or [2]) == (a or [1]): # SIM222
|
||||
pass
|
||||
|
||||
if f(a or [1] or True or [2]): # SIM222
|
||||
pass
|
||||
|
||||
@@ -37,113 +37,3 @@ if True or f() or a or g() or b: # OK
|
||||
|
||||
if a or True or f() or b or g(): # OK
|
||||
pass
|
||||
|
||||
|
||||
a and "" and False # SIM223
|
||||
|
||||
a and "foo" and False and "bar" # SIM223
|
||||
|
||||
a and 0 and False # SIM223
|
||||
|
||||
a and 1 and False and 2 # SIM223
|
||||
|
||||
a and 0.0 and False # SIM223
|
||||
|
||||
a and 0.1 and False and 0.2 # SIM223
|
||||
|
||||
a and [] and False # SIM223
|
||||
|
||||
a and list([]) and False # SIM223
|
||||
|
||||
a and [1] and False and [2] # SIM223
|
||||
|
||||
a and list([1]) and False and list([2]) # SIM223
|
||||
|
||||
a and {} and False # SIM223
|
||||
|
||||
a and dict() and False # SIM223
|
||||
|
||||
a and {1: 1} and False and {2: 2} # SIM223
|
||||
|
||||
a and dict({1: 1}) and False and dict({2: 2}) # SIM223
|
||||
|
||||
a and set() and False # SIM223
|
||||
|
||||
a and set(set()) and False # SIM223
|
||||
|
||||
a and {1} and False and {2} # SIM223
|
||||
|
||||
a and set({1}) and False and set({2}) # SIM223
|
||||
|
||||
a and () and False # SIM222
|
||||
|
||||
a and tuple(()) and False # SIM222
|
||||
|
||||
a and (1,) and False and (2,) # SIM222
|
||||
|
||||
a and tuple((1,)) and False and tuple((2,)) # SIM222
|
||||
|
||||
a and frozenset() and False # SIM222
|
||||
|
||||
a and frozenset(frozenset()) and False # SIM222
|
||||
|
||||
a and frozenset({1}) and False and frozenset({2}) # SIM222
|
||||
|
||||
a and frozenset(frozenset({1})) and False and frozenset(frozenset({2})) # SIM222
|
||||
|
||||
|
||||
# Inside test `a` is simplified.
|
||||
|
||||
bool(a and [] and False and []) # SIM223
|
||||
|
||||
assert a and [] and False and [] # SIM223
|
||||
|
||||
if (a and [] and False and []) or (a and [] and False and []): # SIM223
|
||||
pass
|
||||
|
||||
0 if a and [] and False and [] else 1 # SIM222
|
||||
|
||||
while a and [] and False and []: # SIM223
|
||||
pass
|
||||
|
||||
[
|
||||
0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a and [] and False and [] # SIM223
|
||||
if b and [] and False and [] # SIM223
|
||||
]
|
||||
|
||||
{
|
||||
0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a and [] and False and [] # SIM223
|
||||
if b and [] and False and [] # SIM223
|
||||
}
|
||||
|
||||
{
|
||||
0: 0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a and [] and False and [] # SIM223
|
||||
if b and [] and False and [] # SIM223
|
||||
}
|
||||
|
||||
(
|
||||
0
|
||||
for a in range(10)
|
||||
for b in range(10)
|
||||
if a and [] and False and [] # SIM223
|
||||
if b and [] and False and [] # SIM223
|
||||
)
|
||||
|
||||
# Outside test `a` is not simplified.
|
||||
|
||||
a and [] and False and [] # SIM223
|
||||
|
||||
if (a and [] and False and []) == (a and []): # SIM223
|
||||
pass
|
||||
|
||||
if f(a and [] and False and []): # SIM223
|
||||
pass
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
# SIM910
|
||||
{}.get(key, None)
|
||||
|
||||
# SIM910
|
||||
{}.get("key", None)
|
||||
|
||||
# OK
|
||||
{}.get(key)
|
||||
|
||||
# OK
|
||||
{}.get("key")
|
||||
|
||||
# OK
|
||||
{}.get(key, False)
|
||||
|
||||
# OK
|
||||
{}.get("key", False)
|
||||
|
||||
# SIM910
|
||||
if a := {}.get(key, None):
|
||||
pass
|
||||
|
||||
# SIM910
|
||||
a = {}.get(key, None)
|
||||
|
||||
# SIM910
|
||||
({}).get(key, None)
|
||||
@@ -31,6 +31,3 @@ typing.TypedDict.anything()
|
||||
# import aliases are resolved
|
||||
import typing as totally_not_typing
|
||||
totally_not_typing.TypedDict
|
||||
|
||||
# relative imports are respected
|
||||
from .typing import TypedDict
|
||||
|
||||
11
crates/ruff/resources/test/fixtures/flake8_tidy_imports/TID251_false_positives.py
vendored
Normal file
11
crates/ruff/resources/test/fixtures/flake8_tidy_imports/TID251_false_positives.py
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# module members cannot be imported with that syntax
|
||||
import typing.TypedDict
|
||||
|
||||
# we don't track reassignments
|
||||
import typing, other
|
||||
typing = other
|
||||
typing.TypedDict()
|
||||
|
||||
# yet another false positive
|
||||
def foo(typing):
|
||||
typing.TypedDict()
|
||||
@@ -1,6 +1,3 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
def f():
|
||||
# Even in strict mode, this shouldn't rase an error, since `pkg` is used at runtime,
|
||||
# and implicitly imports `pkg.bar`.
|
||||
|
||||
@@ -22,6 +22,3 @@ from bar import (
|
||||
a, # comment 7
|
||||
b, # comment 8
|
||||
)
|
||||
|
||||
# comment 9
|
||||
from baz import * # comment 10
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
from mypackage.subpackage import ( # long comment that seems to be a problem
|
||||
a_long_variable_name_that_causes_problems,
|
||||
items,
|
||||
)
|
||||
@@ -1,3 +0,0 @@
|
||||
"""Hello, world!"""
|
||||
|
||||
x = 1
|
||||
@@ -1,7 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import os
|
||||
import sys
|
||||
import pytz
|
||||
import django.settings
|
||||
from library import foo
|
||||
from . import local
|
||||
@@ -39,11 +39,3 @@ class Test(unittest.TestCase):
|
||||
|
||||
def testTest(self):
|
||||
assert True
|
||||
|
||||
|
||||
from typing import override
|
||||
|
||||
|
||||
@override
|
||||
def BAD_FUNC():
|
||||
pass
|
||||
|
||||
@@ -13,11 +13,3 @@ class C:
|
||||
myObj2 = namedtuple("MyObj2", ["a", "b"])
|
||||
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
|
||||
Point2D = TypedDict('Point2D', {'in': int, 'x-y': int})
|
||||
|
||||
|
||||
class D(TypedDict):
|
||||
lower: int
|
||||
CONSTANT: str
|
||||
mixedCase: bool
|
||||
_mixedCase: list
|
||||
mixed_Case: set
|
||||
|
||||
@@ -4,11 +4,11 @@ if not X is Y:
|
||||
#: E714
|
||||
if not X.B is Y:
|
||||
pass
|
||||
|
||||
#: Okay
|
||||
#: E714
|
||||
if not X is Y is not Z:
|
||||
pass
|
||||
|
||||
#: Okay
|
||||
if not X is not Y:
|
||||
pass
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ f = lambda: (yield from g())
|
||||
class F:
|
||||
f = lambda x: 2 * x
|
||||
|
||||
|
||||
f = object()
|
||||
f.method = lambda: "Method"
|
||||
f = {}
|
||||
@@ -22,30 +21,3 @@ f = []
|
||||
f.append(lambda x: x**2)
|
||||
f = g = lambda x: x**2
|
||||
lambda: "no-op"
|
||||
|
||||
# Annotated
|
||||
from typing import Callable, ParamSpec
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
# ParamSpec cannot be used in this context, so do not preserve the annotation.
|
||||
f: Callable[P, int] = lambda *args: len(args)
|
||||
f: Callable[[], None] = lambda: None
|
||||
f: Callable[..., None] = lambda a, b: None
|
||||
f: Callable[[int], int] = lambda x: 2 * x
|
||||
|
||||
# Let's use the `Callable` type from `collections.abc` instead.
|
||||
from collections.abc import Callable
|
||||
|
||||
f: Callable[[str, int], str] = lambda a, b: a * b
|
||||
f: Callable[[str, int], tuple[str, int]] = lambda a, b: (a, b)
|
||||
f: Callable[[str, int, list[str]], list[str]] = lambda a, b, /, c: [*c, a * b]
|
||||
|
||||
|
||||
# Override `Callable`
|
||||
class Callable:
|
||||
pass
|
||||
|
||||
|
||||
# Do not copy the annotation from here on out.
|
||||
f: Callable[[str, int], str] = lambda a, b: a * b
|
||||
|
||||
@@ -97,10 +97,10 @@ if length > options.max_line_length:
|
||||
if os.path.exists(os.path.join(path, PEP8_BIN)):
|
||||
cmd = ([os.path.join(path, PEP8_BIN)] +
|
||||
self._pep8_options(targetfile))
|
||||
#: W191 - okay
|
||||
#: W191
|
||||
'''
|
||||
multiline string with tab in it'''
|
||||
#: E101 (W191 okay)
|
||||
#: E101 W191
|
||||
'''multiline string
|
||||
with tabs
|
||||
and spaces
|
||||
@@ -142,10 +142,4 @@ def test_keys(self):
|
||||
x = [
|
||||
'abc'
|
||||
]
|
||||
#: W191 - okay
|
||||
''' multiline string with tab in it, same lines'''
|
||||
""" here we're using '''different delimiters'''"""
|
||||
'''
|
||||
multiline string with tab in it, different lines
|
||||
'''
|
||||
" single line string with tab in it"
|
||||
#:
|
||||
|
||||
@@ -9,9 +9,6 @@ def f():
|
||||
|
||||
# Here's a standalone comment that's over the limit.
|
||||
|
||||
x = 2
|
||||
# Another standalone that is preceded by a newline and indent toke and is over the limit.
|
||||
|
||||
print("Here's a string that's over the limit, but it's not a docstring.")
|
||||
|
||||
|
||||
|
||||
@@ -115,20 +115,6 @@ def f(x, *args, **kwargs):
|
||||
return x
|
||||
|
||||
|
||||
def f(x, *, y, z):
|
||||
"""Do something.
|
||||
|
||||
Args:
|
||||
x: some first value
|
||||
|
||||
Keyword Args:
|
||||
y (int): the other value
|
||||
z (int): the last value
|
||||
|
||||
"""
|
||||
return x, y, z
|
||||
|
||||
|
||||
class Test:
|
||||
def f(self, /, arg1: int) -> None:
|
||||
"""
|
||||
|
||||
@@ -11,9 +11,3 @@
|
||||
"{}".format(1, 2, 3) # F523
|
||||
"{:{}}".format(1, 2) # No issues
|
||||
"{:{}}".format(1, 2, 3) # F523
|
||||
|
||||
# With *args
|
||||
"{0}{1}".format(*args) # No issues
|
||||
"{0}{1}".format(1, *args) # No issues
|
||||
"{0}{1}".format(1, 2, *args) # No issues
|
||||
"{0}{1}".format(1, 2, 3, *args) # F523
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
def redef(value):
|
||||
match value:
|
||||
case True:
|
||||
|
||||
def fun(x, y):
|
||||
return x
|
||||
|
||||
case False:
|
||||
|
||||
def fun(x, y):
|
||||
return y
|
||||
|
||||
return fun
|
||||
@@ -132,8 +132,3 @@ def in_ipython_notebook() -> bool:
|
||||
except NameError:
|
||||
return False # not in notebook
|
||||
return True
|
||||
|
||||
|
||||
def named_expr():
|
||||
if any((key := (value := x)) for x in ["ok"]):
|
||||
print(key)
|
||||
|
||||
@@ -121,8 +121,3 @@ def f(x: int):
|
||||
print("A")
|
||||
case y:
|
||||
pass
|
||||
|
||||
|
||||
def f():
|
||||
if any((key := (value := x)) for x in ["ok"]):
|
||||
print(key)
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
x = 1 # type: ignore
|
||||
x = 1 # type ignore
|
||||
x = 1 # type:ignore
|
||||
x = 1 # type: ignore[attr-defined] # type: ignore
|
||||
|
||||
x = 1
|
||||
x = 1 # type ignore
|
||||
x = 1 # type ignore # noqa
|
||||
x = 1 # type: ignore[attr-defined]
|
||||
x = 1 # type: ignore[attr-defined, name-defined]
|
||||
x = 1 # type: ignore[attr-defined] # type: ignore[type-mismatch]
|
||||
x = 1 # type: ignore[type-mismatch] # noqa
|
||||
x = 1 # type: ignore [attr-defined]
|
||||
x = 1 # type: ignore [attr-defined, name-defined]
|
||||
x = 1 # type: ignore [type-mismatch] # noqa
|
||||
x = 1 # type: Union[int, str]
|
||||
x = 1 # type: ignoreme
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
x = 1 # pyright: ignore
|
||||
x = 1 # pyright:ignore
|
||||
x = 1 # pyright: ignore[attr-defined] # pyright: ignore
|
||||
|
||||
x = 1
|
||||
x = 1 # pyright ignore
|
||||
x = 1 # pyright ignore # noqa
|
||||
x = 1 # pyright: ignore[attr-defined]
|
||||
x = 1 # pyright: ignore[attr-defined, name-defined]
|
||||
x = 1 # pyright: ignore[attr-defined] # pyright: ignore[type-mismatch]
|
||||
x = 1 # pyright: ignore[type-mismatch] # noqa
|
||||
x = 1 # pyright: ignore [attr-defined]
|
||||
x = 1 # pyright: ignore [attr-defined, name-defined]
|
||||
x = 1 # pyright: ignore [type-mismatch] # noqa
|
||||
x = 1 # pyright: Union[int, str]
|
||||
x = 1 # pyright: ignoreme
|
||||
Binary file not shown.
@@ -1,6 +1,3 @@
|
||||
import typing
|
||||
from typing import cast
|
||||
|
||||
# For -> for, variable reused
|
||||
for i in []:
|
||||
for i in []: # error
|
||||
@@ -46,9 +43,6 @@ for i in []:
|
||||
|
||||
# For -> assignment
|
||||
for i in []:
|
||||
# ignore typing cast
|
||||
i = cast(int, i)
|
||||
i = typing.cast(int, i)
|
||||
i = 5 # error
|
||||
|
||||
# For -> augmented assignment
|
||||
@@ -59,10 +53,6 @@ for i in []:
|
||||
for i in []:
|
||||
i: int = 5 # error
|
||||
|
||||
# For -> annotated assignment without value
|
||||
for i in []:
|
||||
i: int # no error
|
||||
|
||||
# Async for -> for, variable reused
|
||||
async for i in []:
|
||||
for i in []: # error
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
class TestClass:
|
||||
def __bool__(self):
|
||||
...
|
||||
|
||||
def __bool__(self, x): # too many mandatory args
|
||||
...
|
||||
|
||||
def __bool__(self, x=1): # additional optional args OK
|
||||
...
|
||||
|
||||
def __bool__(self, *args): # varargs OK
|
||||
...
|
||||
|
||||
def __bool__(): # ignored; should be caughty by E0211/N805
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def __bool__():
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def __bool__(x): # too many mandatory args
|
||||
...
|
||||
|
||||
@staticmethod
|
||||
def __bool__(x=1): # additional optional args OK
|
||||
...
|
||||
|
||||
def __eq__(self, other): # multiple args
|
||||
...
|
||||
|
||||
def __eq__(self, other=1): # expected arg is optional
|
||||
...
|
||||
|
||||
def __eq__(self): # too few mandatory args
|
||||
...
|
||||
|
||||
def __eq__(self, other, other_other): # too many mandatory args
|
||||
...
|
||||
|
||||
def __round__(self): # allow zero additional args.
|
||||
...
|
||||
|
||||
def __round__(self, x): # allow one additional args.
|
||||
...
|
||||
|
||||
def __round__(self, x, y): # disallow 2 args
|
||||
...
|
||||
|
||||
def __round__(self, x, y, z=2): # disallow 3 args even when one is optional
|
||||
...
|
||||
@@ -83,26 +83,3 @@ print('Hello %s (%s)' % bar['bop'])
|
||||
print('Hello %(arg)s' % bar)
|
||||
print('Hello %(arg)s' % bar.baz)
|
||||
print('Hello %(arg)s' % bar['bop'])
|
||||
|
||||
# Hanging modulos
|
||||
(
|
||||
"foo %s "
|
||||
"bar %s"
|
||||
) % (x, y)
|
||||
|
||||
(
|
||||
"foo %(foo)s "
|
||||
"bar %(bar)s"
|
||||
) % {"foo": x, "bar": y}
|
||||
|
||||
(
|
||||
"""foo %s"""
|
||||
% (x,)
|
||||
)
|
||||
|
||||
(
|
||||
"""
|
||||
foo %s
|
||||
"""
|
||||
% (x,)
|
||||
)
|
||||
|
||||
@@ -34,6 +34,28 @@ pytest.param('"%8s" % (None,)', id="unsafe width-string conversion"),
|
||||
"%(and)s" % {"and": 2}
|
||||
|
||||
# OK (arguably false negatives)
|
||||
(
|
||||
"foo %s "
|
||||
"bar %s"
|
||||
) % (x, y)
|
||||
|
||||
(
|
||||
"foo %(foo)s "
|
||||
"bar %(bar)s"
|
||||
) % {"foo": x, "bar": y}
|
||||
|
||||
(
|
||||
"""foo %s"""
|
||||
% (x,)
|
||||
)
|
||||
|
||||
(
|
||||
"""
|
||||
foo %s
|
||||
"""
|
||||
% (x,)
|
||||
)
|
||||
|
||||
'Hello %s' % bar
|
||||
|
||||
'Hello %s' % bar.baz
|
||||
|
||||
@@ -46,14 +46,6 @@ print("foo {} ".format(x))
|
||||
|
||||
'({}={{0!e}})'.format(a)
|
||||
|
||||
"{[b]}".format(a)
|
||||
|
||||
'{[b]}'.format(a)
|
||||
|
||||
"""{[b]}""".format(a)
|
||||
|
||||
'''{[b]}'''.format(a)
|
||||
|
||||
###
|
||||
# Non-errors
|
||||
###
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
import typing
|
||||
from dataclasses import dataclass, field
|
||||
from typing import ClassVar, Sequence
|
||||
|
||||
KNOWINGLY_MUTABLE_DEFAULT = []
|
||||
|
||||
|
||||
@dataclass()
|
||||
class A:
|
||||
mutable_default: list[int] = []
|
||||
immutable_annotation: typing.Sequence[int] = []
|
||||
without_annotation = []
|
||||
ignored_via_comment: list[int] = [] # noqa: RUF008
|
||||
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
|
||||
perfectly_fine: list[int] = field(default_factory=list)
|
||||
class_variable: typing.ClassVar[list[int]] = []
|
||||
|
||||
|
||||
@dataclass
|
||||
class B:
|
||||
mutable_default: list[int] = []
|
||||
immutable_annotation: Sequence[int] = []
|
||||
without_annotation = []
|
||||
ignored_via_comment: list[int] = [] # noqa: RUF008
|
||||
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
|
||||
perfectly_fine: list[int] = field(default_factory=list)
|
||||
class_variable: ClassVar[list[int]] = []
|
||||
@@ -1,42 +0,0 @@
|
||||
import datetime
|
||||
import re
|
||||
import typing
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import ClassVar, NamedTuple
|
||||
|
||||
|
||||
def default_function() -> list[int]:
|
||||
return []
|
||||
|
||||
|
||||
class ImmutableType(NamedTuple):
|
||||
something: int = 8
|
||||
|
||||
|
||||
@dataclass()
|
||||
class A:
|
||||
hidden_mutable_default: list[int] = default_function()
|
||||
class_variable: typing.ClassVar[list[int]] = default_function()
|
||||
another_class_var: ClassVar[list[int]] = default_function()
|
||||
|
||||
fine_path: Path = Path()
|
||||
fine_date: datetime.date = datetime.date(2042, 1, 1)
|
||||
fine_timedelta: datetime.timedelta = datetime.timedelta(hours=7)
|
||||
fine_tuple: tuple[int] = tuple([1])
|
||||
fine_regex: re.Pattern = re.compile(r".*")
|
||||
|
||||
|
||||
DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES = ImmutableType(40)
|
||||
DEFAULT_A_FOR_ALL_DATACLASSES = A([1, 2, 3])
|
||||
|
||||
|
||||
@dataclass
|
||||
class B:
|
||||
hidden_mutable_default: list[int] = default_function()
|
||||
another_dataclass: A = A()
|
||||
not_optimal: ImmutableType = ImmutableType(20)
|
||||
good_variant: ImmutableType = DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES
|
||||
okay_variant: A = DEFAULT_A_FOR_ALL_DATACLASSES
|
||||
|
||||
fine_dataclass_function: list[int] = field(default_factory=list)
|
||||
@@ -1,19 +1,18 @@
|
||||
//! Interface for generating autofix edits from higher-level actions (e.g., "remove an argument").
|
||||
use anyhow::{bail, Result};
|
||||
use itertools::Itertools;
|
||||
use libcst_native::{
|
||||
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
|
||||
};
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Stmt, StmtKind};
|
||||
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
|
||||
use rustpython_parser::{lexer, Mode, Tok};
|
||||
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_python_ast::context::Context;
|
||||
use ruff_python_ast::helpers;
|
||||
use ruff_python_ast::helpers::to_absolute;
|
||||
use ruff_python_ast::imports::{AnyImport, Import};
|
||||
use ruff_python_ast::newlines::NewlineWithTrailingNewline;
|
||||
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
|
||||
use ruff_python_semantic::context::Context;
|
||||
|
||||
use crate::cst::helpers::compose_module_path;
|
||||
use crate::cst::matchers::match_module;
|
||||
@@ -102,17 +101,20 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
|
||||
|
||||
/// Return the location of a trailing semicolon following a `Stmt`, if it's part
|
||||
/// of a multi-statement line.
|
||||
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<TextSize> {
|
||||
let contents = locator.after(stmt.end());
|
||||
|
||||
for line in NewlineWithTrailingNewline::from(contents) {
|
||||
let trimmed = line.trim_start();
|
||||
|
||||
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
|
||||
let contents = locator.skip(stmt.end_location.unwrap());
|
||||
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
|
||||
let trimmed = line.trim();
|
||||
if trimmed.starts_with(';') {
|
||||
let colon_offset = line.text_len() - trimmed.text_len();
|
||||
return Some(stmt.end() + line.start() + colon_offset);
|
||||
let column = line
|
||||
.char_indices()
|
||||
.find_map(|(column, char)| if char == ';' { Some(column) } else { None })
|
||||
.unwrap();
|
||||
return Some(to_absolute(
|
||||
Location::new(row + 1, column),
|
||||
stmt.end_location.unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
if !trimmed.starts_with('\\') {
|
||||
break;
|
||||
}
|
||||
@@ -121,36 +123,42 @@ fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<TextSize> {
|
||||
}
|
||||
|
||||
/// Find the next valid break for a `Stmt` after a semicolon.
|
||||
fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
|
||||
let start_location = semicolon + TextSize::from(1);
|
||||
|
||||
let contents = &locator.contents()[usize::from(start_location)..];
|
||||
for line in NewlineWithTrailingNewline::from(contents) {
|
||||
fn next_stmt_break(semicolon: Location, locator: &Locator) -> Location {
|
||||
let start_location = Location::new(semicolon.row(), semicolon.column() + 1);
|
||||
let contents = locator.skip(start_location);
|
||||
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
|
||||
let trimmed = line.trim();
|
||||
// Skip past any continuations.
|
||||
if trimmed.starts_with('\\') {
|
||||
continue;
|
||||
}
|
||||
|
||||
return start_location
|
||||
+ if trimmed.is_empty() {
|
||||
// If the line is empty, then despite the previous statement ending in a
|
||||
// semicolon, we know that it's not a multi-statement line.
|
||||
line.start()
|
||||
} else {
|
||||
// Otherwise, find the start of the next statement. (Or, anything that isn't
|
||||
// whitespace.)
|
||||
let relative_offset = line.find(|c: char| !c.is_whitespace()).unwrap();
|
||||
line.start() + TextSize::try_from(relative_offset).unwrap()
|
||||
};
|
||||
return if trimmed.is_empty() {
|
||||
// If the line is empty, then despite the previous statement ending in a
|
||||
// semicolon, we know that it's not a multi-statement line.
|
||||
to_absolute(Location::new(row + 1, 0), start_location)
|
||||
} else {
|
||||
// Otherwise, find the start of the next statement. (Or, anything that isn't
|
||||
// whitespace.)
|
||||
let column = line
|
||||
.char_indices()
|
||||
.find_map(|(column, char)| {
|
||||
if char.is_whitespace() {
|
||||
None
|
||||
} else {
|
||||
Some(column)
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
to_absolute(Location::new(row + 1, column), start_location)
|
||||
};
|
||||
}
|
||||
|
||||
locator.line_end(start_location)
|
||||
Location::new(start_location.row() + 1, 0)
|
||||
}
|
||||
|
||||
/// Return `true` if a `Stmt` occurs at the end of a file.
|
||||
fn is_end_of_file(stmt: &Stmt, locator: &Locator) -> bool {
|
||||
stmt.end() == locator.contents().text_len()
|
||||
let contents = locator.skip(stmt.end_location.unwrap());
|
||||
contents.is_empty()
|
||||
}
|
||||
|
||||
/// Return the `Fix` to use when deleting a `Stmt`.
|
||||
@@ -181,23 +189,33 @@ pub fn delete_stmt(
|
||||
{
|
||||
// If removing this node would lead to an invalid syntax tree, replace
|
||||
// it with a `pass`.
|
||||
Ok(Edit::range_replacement("pass".to_string(), stmt.range()))
|
||||
Ok(Edit::replacement(
|
||||
"pass".to_string(),
|
||||
stmt.location,
|
||||
stmt.end_location.unwrap(),
|
||||
))
|
||||
} else {
|
||||
Ok(if let Some(semicolon) = trailing_semicolon(stmt, locator) {
|
||||
let next = next_stmt_break(semicolon, locator);
|
||||
Edit::deletion(stmt.start(), next)
|
||||
} else if helpers::has_leading_content(stmt, locator) {
|
||||
Edit::range_deletion(stmt.range())
|
||||
} else if helpers::preceded_by_continuation(stmt, indexer, locator) {
|
||||
if is_end_of_file(stmt, locator) && locator.is_at_start_of_line(stmt.start()) {
|
||||
Edit::deletion(stmt.location, next)
|
||||
} else if helpers::match_leading_content(stmt, locator) {
|
||||
Edit::deletion(stmt.location, stmt.end_location.unwrap())
|
||||
} else if helpers::preceded_by_continuation(stmt, indexer) {
|
||||
if is_end_of_file(stmt, locator) && stmt.location.column() == 0 {
|
||||
// Special-case: a file can't end in a continuation.
|
||||
Edit::range_replacement(stylist.line_ending().to_string(), stmt.range())
|
||||
Edit::replacement(
|
||||
stylist.line_ending().to_string(),
|
||||
stmt.location,
|
||||
stmt.end_location.unwrap(),
|
||||
)
|
||||
} else {
|
||||
Edit::range_deletion(stmt.range())
|
||||
Edit::deletion(stmt.location, stmt.end_location.unwrap())
|
||||
}
|
||||
} else {
|
||||
let range = locator.full_lines_range(stmt.range());
|
||||
Edit::range_deletion(range)
|
||||
Edit::deletion(
|
||||
Location::new(stmt.location.row(), 0),
|
||||
Location::new(stmt.end_location.unwrap().row() + 1, 0),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -212,7 +230,7 @@ pub fn remove_unused_imports<'a>(
|
||||
indexer: &Indexer,
|
||||
stylist: &Stylist,
|
||||
) -> Result<Edit> {
|
||||
let module_text = locator.slice(stmt.range());
|
||||
let module_text = locator.slice(stmt);
|
||||
let mut tree = match_module(module_text)?;
|
||||
|
||||
let Some(Statement::Simple(body)) = tree.body.first_mut() else {
|
||||
@@ -318,7 +336,11 @@ pub fn remove_unused_imports<'a>(
|
||||
};
|
||||
tree.codegen(&mut state);
|
||||
|
||||
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
|
||||
Ok(Edit::replacement(
|
||||
state.to_string(),
|
||||
stmt.location,
|
||||
stmt.end_location.unwrap(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,14 +352,15 @@ pub fn remove_unused_imports<'a>(
|
||||
/// For this behavior, set `remove_parentheses` to `true`.
|
||||
pub fn remove_argument(
|
||||
locator: &Locator,
|
||||
call_at: TextSize,
|
||||
expr_range: TextRange,
|
||||
call_at: Location,
|
||||
expr_at: Location,
|
||||
expr_end: Location,
|
||||
args: &[Expr],
|
||||
keywords: &[Keyword],
|
||||
remove_parentheses: bool,
|
||||
) -> Result<Edit> {
|
||||
// TODO(sbrugman): Preserve trailing comments.
|
||||
let contents = locator.after(call_at);
|
||||
let contents = locator.skip(call_at);
|
||||
|
||||
let mut fix_start = None;
|
||||
let mut fix_end = None;
|
||||
@@ -350,13 +373,13 @@ pub fn remove_argument(
|
||||
if n_arguments == 1 {
|
||||
// Case 1: there is only one argument.
|
||||
let mut count: usize = 0;
|
||||
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
|
||||
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
|
||||
if matches!(tok, Tok::Lpar) {
|
||||
if count == 0 {
|
||||
fix_start = Some(if remove_parentheses {
|
||||
range.start()
|
||||
start
|
||||
} else {
|
||||
range.start() + TextSize::from(1)
|
||||
Location::new(start.row(), start.column() + 1)
|
||||
});
|
||||
}
|
||||
count += 1;
|
||||
@@ -366,9 +389,9 @@ pub fn remove_argument(
|
||||
count -= 1;
|
||||
if count == 0 {
|
||||
fix_end = Some(if remove_parentheses {
|
||||
range.end()
|
||||
end
|
||||
} else {
|
||||
range.end() - TextSize::from(1)
|
||||
Location::new(end.row(), end.column() - 1)
|
||||
});
|
||||
break;
|
||||
}
|
||||
@@ -376,27 +399,27 @@ pub fn remove_argument(
|
||||
}
|
||||
} else if args
|
||||
.iter()
|
||||
.map(Expr::start)
|
||||
.chain(keywords.iter().map(Keyword::start))
|
||||
.any(|location| location > expr_range.start())
|
||||
.map(|node| node.location)
|
||||
.chain(keywords.iter().map(|node| node.location))
|
||||
.any(|location| location > expr_at)
|
||||
{
|
||||
// Case 2: argument or keyword is _not_ the last node.
|
||||
let mut seen_comma = false;
|
||||
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
|
||||
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
|
||||
if seen_comma {
|
||||
if matches!(tok, Tok::NonLogicalNewline) {
|
||||
// Also delete any non-logical newlines after the comma.
|
||||
continue;
|
||||
}
|
||||
fix_end = Some(if matches!(tok, Tok::Newline) {
|
||||
range.end()
|
||||
end
|
||||
} else {
|
||||
range.start()
|
||||
start
|
||||
});
|
||||
break;
|
||||
}
|
||||
if range.start() == expr_range.start() {
|
||||
fix_start = Some(range.start());
|
||||
if start == expr_at {
|
||||
fix_start = Some(start);
|
||||
}
|
||||
if fix_start.is_some() && matches!(tok, Tok::Comma) {
|
||||
seen_comma = true;
|
||||
@@ -405,13 +428,13 @@ pub fn remove_argument(
|
||||
} else {
|
||||
// Case 3: argument or keyword is the last node, so we have to find the last
|
||||
// comma in the stmt.
|
||||
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
|
||||
if range.start() == expr_range.start() {
|
||||
fix_end = Some(expr_range.end());
|
||||
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
|
||||
if start == expr_at {
|
||||
fix_end = Some(expr_end);
|
||||
break;
|
||||
}
|
||||
if matches!(tok, Tok::Comma) {
|
||||
fix_start = Some(range.start());
|
||||
fix_start = Some(start);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -458,8 +481,11 @@ pub fn get_or_import_symbol(
|
||||
//
|
||||
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the
|
||||
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
|
||||
let import_edit =
|
||||
Edit::range_replacement(locator.slice(source.range()).to_string(), source.range());
|
||||
let import_edit = Edit::replacement(
|
||||
locator.slice(source).to_string(),
|
||||
source.location,
|
||||
source.end_location.unwrap(),
|
||||
);
|
||||
Ok((import_edit, binding))
|
||||
} else {
|
||||
if let Some(stmt) = importer.get_import_from(module) {
|
||||
@@ -500,12 +526,12 @@ pub fn get_or_import_symbol(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
use ruff_text_size::TextSize;
|
||||
use rustpython_parser as parser;
|
||||
use rustpython_parser::ast::Location;
|
||||
|
||||
use ruff_python_ast::source_code::Locator;
|
||||
|
||||
use crate::autofix::actions::{next_stmt_break, trailing_semicolon};
|
||||
use crate::autofix::helpers::{next_stmt_break, trailing_semicolon};
|
||||
|
||||
#[test]
|
||||
fn find_semicolon() -> Result<()> {
|
||||
@@ -519,13 +545,19 @@ mod tests {
|
||||
let program = parser::parse_program(contents, "<filename>")?;
|
||||
let stmt = program.first().unwrap();
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(5)));
|
||||
assert_eq!(
|
||||
trailing_semicolon(stmt, &locator),
|
||||
Some(Location::new(1, 5))
|
||||
);
|
||||
|
||||
let contents = "x = 1 ; y = 1";
|
||||
let program = parser::parse_program(contents, "<filename>")?;
|
||||
let stmt = program.first().unwrap();
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(6)));
|
||||
assert_eq!(
|
||||
trailing_semicolon(stmt, &locator),
|
||||
Some(Location::new(1, 6))
|
||||
);
|
||||
|
||||
let contents = r#"
|
||||
x = 1 \
|
||||
@@ -535,7 +567,10 @@ x = 1 \
|
||||
let program = parser::parse_program(contents, "<filename>")?;
|
||||
let stmt = program.first().unwrap();
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(10)));
|
||||
assert_eq!(
|
||||
trailing_semicolon(stmt, &locator),
|
||||
Some(Location::new(2, 2))
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -545,15 +580,15 @@ x = 1 \
|
||||
let contents = "x = 1; y = 1";
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(
|
||||
next_stmt_break(TextSize::from(4), &locator),
|
||||
TextSize::from(5)
|
||||
next_stmt_break(Location::new(1, 4), &locator),
|
||||
Location::new(1, 5)
|
||||
);
|
||||
|
||||
let contents = "x = 1 ; y = 1";
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(
|
||||
next_stmt_break(TextSize::from(5), &locator),
|
||||
TextSize::from(6)
|
||||
next_stmt_break(Location::new(1, 5), &locator),
|
||||
Location::new(1, 6)
|
||||
);
|
||||
|
||||
let contents = r#"
|
||||
@@ -563,8 +598,8 @@ x = 1 \
|
||||
.trim();
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(
|
||||
next_stmt_break(TextSize::from(10), &locator),
|
||||
TextSize::from(12)
|
||||
next_stmt_break(Location::new(2, 2), &locator),
|
||||
Location::new(2, 4)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,28 +1,24 @@
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use itertools::Itertools;
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
use rustc_hash::FxHashMap;
|
||||
use rustpython_parser::ast::Location;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Edit, Fix};
|
||||
use ruff_python_ast::source_code::Locator;
|
||||
use ruff_python_ast::types::Range;
|
||||
|
||||
use crate::linter::FixTable;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
|
||||
pub mod actions;
|
||||
pub mod helpers;
|
||||
|
||||
/// Auto-fix errors in a file, and write the fixed source code to disk.
|
||||
pub fn fix_file(diagnostics: &[Diagnostic], locator: &Locator) -> Option<(String, FixTable)> {
|
||||
let mut with_fixes = diagnostics
|
||||
.iter()
|
||||
.filter(|diag| !diag.fix.is_empty())
|
||||
.peekable();
|
||||
|
||||
if with_fixes.peek().is_none() {
|
||||
if diagnostics.iter().all(|check| check.fix.is_empty()) {
|
||||
None
|
||||
} else {
|
||||
Some(apply_fixes(with_fixes, locator))
|
||||
Some(apply_fixes(diagnostics.iter(), locator))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +28,7 @@ fn apply_fixes<'a>(
|
||||
locator: &'a Locator<'a>,
|
||||
) -> (String, FixTable) {
|
||||
let mut output = String::with_capacity(locator.len());
|
||||
let mut last_pos: Option<TextSize> = None;
|
||||
let mut last_pos: Option<Location> = None;
|
||||
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
|
||||
let mut fixed = FxHashMap::default();
|
||||
|
||||
@@ -56,7 +52,7 @@ fn apply_fixes<'a>(
|
||||
// Best-effort approach: if this fix overlaps with a fix we've already applied,
|
||||
// skip it.
|
||||
if last_pos.map_or(false, |last_pos| {
|
||||
fix.min_start()
|
||||
fix.location()
|
||||
.map_or(false, |fix_location| last_pos >= fix_location)
|
||||
}) {
|
||||
continue;
|
||||
@@ -64,14 +60,14 @@ fn apply_fixes<'a>(
|
||||
|
||||
for edit in fix.edits() {
|
||||
// Add all contents from `last_pos` to `fix.location`.
|
||||
let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
|
||||
let slice = locator.slice(Range::new(last_pos.unwrap_or_default(), edit.location));
|
||||
output.push_str(slice);
|
||||
|
||||
// Add the patch itself.
|
||||
output.push_str(edit.content().unwrap_or_default());
|
||||
output.push_str(&edit.content);
|
||||
|
||||
// Track that the edit was applied.
|
||||
last_pos = Some(edit.end());
|
||||
last_pos = Some(edit.end_location);
|
||||
applied.insert(edit);
|
||||
}
|
||||
|
||||
@@ -79,7 +75,7 @@ fn apply_fixes<'a>(
|
||||
}
|
||||
|
||||
// Add the remaining content.
|
||||
let slice = locator.after(last_pos.unwrap_or_default());
|
||||
let slice = locator.skip(last_pos.unwrap_or_default());
|
||||
output.push_str(slice);
|
||||
|
||||
(output, fixed)
|
||||
@@ -87,8 +83,8 @@ fn apply_fixes<'a>(
|
||||
|
||||
/// Compare two fixes.
|
||||
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
|
||||
fix1.min_start()
|
||||
.cmp(&fix2.min_start())
|
||||
fix1.location()
|
||||
.cmp(&fix2.location())
|
||||
.then_with(|| match (&rule1, &rule2) {
|
||||
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
|
||||
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
|
||||
@@ -99,7 +95,7 @@ fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Orderi
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ruff_text_size::TextSize;
|
||||
use rustpython_parser::ast::Location;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_diagnostics::Edit;
|
||||
@@ -113,7 +109,8 @@ mod tests {
|
||||
.map(|edit| Diagnostic {
|
||||
// The choice of rule here is arbitrary.
|
||||
kind: MissingNewlineAtEndOfFile.into(),
|
||||
range: edit.range(),
|
||||
location: edit.location,
|
||||
end_location: edit.end_location,
|
||||
fix: edit.into(),
|
||||
parent: None,
|
||||
})
|
||||
@@ -138,11 +135,11 @@ class A(object):
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([Edit::replacement(
|
||||
"Bar".to_string(),
|
||||
TextSize::new(8),
|
||||
TextSize::new(14),
|
||||
)]);
|
||||
let diagnostics = create_diagnostics([Edit {
|
||||
content: "Bar".to_string(),
|
||||
location: Location::new(1, 8),
|
||||
end_location: Location::new(1, 14),
|
||||
}]);
|
||||
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
contents,
|
||||
@@ -164,7 +161,11 @@ class A(object):
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([Edit::deletion(TextSize::new(7), TextSize::new(15))]);
|
||||
let diagnostics = create_diagnostics([Edit {
|
||||
content: String::new(),
|
||||
location: Location::new(1, 7),
|
||||
end_location: Location::new(1, 15),
|
||||
}]);
|
||||
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
contents,
|
||||
@@ -187,8 +188,16 @@ class A(object, object, object):
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([
|
||||
Edit::deletion(TextSize::from(8), TextSize::from(16)),
|
||||
Edit::deletion(TextSize::from(22), TextSize::from(30)),
|
||||
Edit {
|
||||
content: String::new(),
|
||||
location: Location::new(1, 8),
|
||||
end_location: Location::new(1, 16),
|
||||
},
|
||||
Edit {
|
||||
content: String::new(),
|
||||
location: Location::new(1, 22),
|
||||
end_location: Location::new(1, 30),
|
||||
},
|
||||
]);
|
||||
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
|
||||
|
||||
@@ -213,8 +222,16 @@ class A(object):
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([
|
||||
Edit::deletion(TextSize::from(7), TextSize::from(15)),
|
||||
Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
|
||||
Edit {
|
||||
content: String::new(),
|
||||
location: Location::new(1, 7),
|
||||
end_location: Location::new(1, 15),
|
||||
},
|
||||
Edit {
|
||||
content: "ignored".to_string(),
|
||||
location: Location::new(1, 9),
|
||||
end_location: Location::new(1, 11),
|
||||
},
|
||||
]);
|
||||
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use ruff_text_size::TextRange;
|
||||
use ruff_python_ast::scope::ScopeStack;
|
||||
use rustpython_parser::ast::{Expr, Stmt};
|
||||
|
||||
use ruff_python_ast::types::Range;
|
||||
use ruff_python_ast::types::RefEquality;
|
||||
use ruff_python_semantic::analyze::visibility::{Visibility, VisibleScope};
|
||||
use ruff_python_semantic::scope::ScopeId;
|
||||
use ruff_python_ast::visibility::{Visibility, VisibleScope};
|
||||
|
||||
use crate::checkers::ast::AnnotationContext;
|
||||
use crate::docstrings::definition::Definition;
|
||||
|
||||
type Context<'a> = (ScopeId, Vec<RefEquality<'a, Stmt>>);
|
||||
type Context<'a> = (ScopeStack, Vec<RefEquality<'a, Stmt>>);
|
||||
|
||||
/// A collection of AST nodes that are deferred for later analysis.
|
||||
/// Used to, e.g., store functions, whose bodies shouldn't be analyzed until all
|
||||
@@ -16,7 +16,7 @@ type Context<'a> = (ScopeId, Vec<RefEquality<'a, Stmt>>);
|
||||
#[derive(Default)]
|
||||
pub struct Deferred<'a> {
|
||||
pub definitions: Vec<(Definition<'a>, Visibility, Context<'a>)>,
|
||||
pub string_type_definitions: Vec<(TextRange, &'a str, AnnotationContext, Context<'a>)>,
|
||||
pub string_type_definitions: Vec<(Range, &'a str, AnnotationContext, Context<'a>)>,
|
||||
pub type_definitions: Vec<(&'a Expr, AnnotationContext, Context<'a>)>,
|
||||
pub functions: Vec<(&'a Stmt, Context<'a>, VisibleScope)>,
|
||||
pub lambdas: Vec<(&'a Expr, Context<'a>)>,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,12 @@
|
||||
//! Lint rules based on import analysis.
|
||||
use std::borrow::Cow;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use rustpython_parser::ast::{StmtKind, Suite};
|
||||
use rustpython_parser::ast::Suite;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::helpers::to_module_path;
|
||||
use ruff_python_ast::imports::{ImportMap, ModuleImport};
|
||||
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
|
||||
use ruff_python_ast::visitor::Visitor;
|
||||
use ruff_python_stdlib::path::is_python_stub_file;
|
||||
|
||||
use crate::directives::IsortDirectives;
|
||||
use crate::registry::Rule;
|
||||
@@ -17,60 +14,6 @@ use crate::rules::isort;
|
||||
use crate::rules::isort::track::{Block, ImportTracker};
|
||||
use crate::settings::{flags, Settings};
|
||||
|
||||
fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) -> Option<ImportMap> {
|
||||
let Some(package) = package else {
|
||||
return None;
|
||||
};
|
||||
let Some(module_path) = to_module_path(package, path) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let num_imports = blocks.iter().map(|block| block.imports.len()).sum();
|
||||
let mut module_imports = Vec::with_capacity(num_imports);
|
||||
for stmt in blocks.iter().flat_map(|block| &block.imports) {
|
||||
match &stmt.node {
|
||||
StmtKind::Import { names } => {
|
||||
module_imports.extend(
|
||||
names
|
||||
.iter()
|
||||
.map(|name| ModuleImport::new(name.node.name.clone(), stmt.range())),
|
||||
);
|
||||
}
|
||||
StmtKind::ImportFrom {
|
||||
module,
|
||||
names,
|
||||
level,
|
||||
} => {
|
||||
let level = level.unwrap_or(0);
|
||||
let module = if let Some(module) = module {
|
||||
if level == 0 {
|
||||
Cow::Borrowed(module)
|
||||
} else {
|
||||
if module_path.len() <= level {
|
||||
continue;
|
||||
}
|
||||
let prefix = module_path[..module_path.len() - level].join(".");
|
||||
Cow::Owned(format!("{prefix}.{module}"))
|
||||
}
|
||||
} else {
|
||||
if module_path.len() <= level {
|
||||
continue;
|
||||
}
|
||||
Cow::Owned(module_path[..module_path.len() - level].join("."))
|
||||
};
|
||||
module_imports.extend(names.iter().map(|name| {
|
||||
ModuleImport::new(format!("{}.{}", module, name.node.name), name.range())
|
||||
}));
|
||||
}
|
||||
_ => panic!("Expected StmtKind::Import | StmtKind::ImportFrom"),
|
||||
}
|
||||
}
|
||||
|
||||
let mut import_map = ImportMap::default();
|
||||
import_map.insert(module_path.join("."), module_imports);
|
||||
Some(import_map)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn check_imports(
|
||||
python_ast: &Suite,
|
||||
@@ -82,12 +25,10 @@ pub fn check_imports(
|
||||
autofix: flags::Autofix,
|
||||
path: &Path,
|
||||
package: Option<&Path>,
|
||||
) -> (Vec<Diagnostic>, Option<ImportMap>) {
|
||||
let is_stub = is_python_stub_file(path);
|
||||
|
||||
) -> Vec<Diagnostic> {
|
||||
// Extract all imports from the AST.
|
||||
let tracker = {
|
||||
let mut tracker = ImportTracker::new(locator, directives, is_stub);
|
||||
let mut tracker = ImportTracker::new(locator, directives, path);
|
||||
tracker.visit_body(python_ast);
|
||||
tracker
|
||||
};
|
||||
@@ -108,12 +49,8 @@ pub fn check_imports(
|
||||
}
|
||||
if settings.rules.enabled(Rule::MissingRequiredImport) {
|
||||
diagnostics.extend(isort::rules::add_required_imports(
|
||||
&blocks, python_ast, locator, stylist, settings, autofix, is_stub,
|
||||
&blocks, python_ast, locator, stylist, settings, autofix,
|
||||
));
|
||||
}
|
||||
|
||||
// Extract import map.
|
||||
let imports = extract_import_map(path, package, &blocks);
|
||||
|
||||
(diagnostics, imports)
|
||||
diagnostics
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use ruff_text_size::TextRange;
|
||||
use rustpython_parser::ast::Location;
|
||||
use rustpython_parser::lexer::LexResult;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
|
||||
use ruff_diagnostics::{Diagnostic, Fix};
|
||||
use ruff_python_ast::source_code::{Locator, Stylist};
|
||||
use ruff_python_ast::token_kind::TokenKind;
|
||||
use ruff_python_ast::types::Range;
|
||||
|
||||
use crate::registry::{AsRule, Rule};
|
||||
use crate::rules::pycodestyle::rules::logical_lines::{
|
||||
@@ -37,7 +37,7 @@ pub fn check_logical_lines(
|
||||
settings: &Settings,
|
||||
autofix: flags::Autofix,
|
||||
) -> Vec<Diagnostic> {
|
||||
let mut context = LogicalLinesContext::new(settings);
|
||||
let mut diagnostics = vec![];
|
||||
|
||||
#[cfg(feature = "logical_lines")]
|
||||
let should_fix_missing_whitespace =
|
||||
@@ -59,51 +59,120 @@ pub fn check_logical_lines(
|
||||
|
||||
for line in &LogicalLines::from_tokens(tokens, locator) {
|
||||
if line.flags().contains(TokenFlags::OPERATOR) {
|
||||
space_around_operator(&line, &mut context);
|
||||
whitespace_around_named_parameter_equals(&line, &mut context);
|
||||
missing_whitespace_around_operator(&line, &mut context);
|
||||
missing_whitespace(&line, should_fix_missing_whitespace, &mut context);
|
||||
}
|
||||
for (location, kind) in space_around_operator(&line) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for (location, kind) in whitespace_around_named_parameter_equals(&line.tokens()) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
for (location, kind) in missing_whitespace_around_operator(&line.tokens()) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for diagnostic in missing_whitespace(&line, should_fix_missing_whitespace) {
|
||||
if settings.rules.enabled(diagnostic.kind.rule()) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
if line
|
||||
.flags()
|
||||
.contains(TokenFlags::OPERATOR | TokenFlags::PUNCTUATION)
|
||||
{
|
||||
extraneous_whitespace(&line, &mut context);
|
||||
for (location, kind) in extraneous_whitespace(&line) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
if line.flags().contains(TokenFlags::KEYWORD) {
|
||||
whitespace_around_keywords(&line, &mut context);
|
||||
missing_whitespace_after_keyword(&line, &mut context);
|
||||
}
|
||||
for (location, kind) in whitespace_around_keywords(&line) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for (location, kind) in missing_whitespace_after_keyword(&line.tokens()) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
if line.flags().contains(TokenFlags::COMMENT) {
|
||||
whitespace_before_comment(&line, locator, prev_line.is_none(), &mut context);
|
||||
for (range, kind) in whitespace_before_comment(&line.tokens(), locator) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location: range.location,
|
||||
end_location: range.end_location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if line.flags().contains(TokenFlags::BRACKET) {
|
||||
whitespace_before_parameters(
|
||||
&line,
|
||||
for diagnostic in whitespace_before_parameters(
|
||||
&line.tokens(),
|
||||
should_fix_whitespace_before_parameters,
|
||||
&mut context,
|
||||
);
|
||||
) {
|
||||
if settings.rules.enabled(diagnostic.kind.rule()) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the indentation level.
|
||||
let Some(first_token) = line.first_token() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let range = if first_token.kind() == TokenKind::Indent {
|
||||
first_token.range()
|
||||
} else {
|
||||
TextRange::new(locator.line_start(first_token.start()), first_token.start())
|
||||
};
|
||||
|
||||
let indent_level = expand_indent(locator.slice(range));
|
||||
|
||||
let Some(start_loc) = line.first_token_location() else { continue; };
|
||||
let start_line = locator.slice(Range::new(Location::new(start_loc.row(), 0), start_loc));
|
||||
let indent_level = expand_indent(start_line);
|
||||
let indent_size = 4;
|
||||
|
||||
for kind in indentation(
|
||||
for (location, kind) in indentation(
|
||||
&line,
|
||||
prev_line.as_ref(),
|
||||
indent_char,
|
||||
@@ -112,7 +181,13 @@ pub fn check_logical_lines(
|
||||
indent_size,
|
||||
) {
|
||||
if settings.rules.enabled(kind.rule()) {
|
||||
context.push(kind, range);
|
||||
diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
location,
|
||||
end_location: location,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -121,40 +196,7 @@ pub fn check_logical_lines(
|
||||
prev_indent_level = Some(indent_level);
|
||||
}
|
||||
}
|
||||
context.diagnostics
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct LogicalLinesContext<'a> {
|
||||
settings: &'a Settings,
|
||||
diagnostics: Vec<Diagnostic>,
|
||||
}
|
||||
|
||||
impl<'a> LogicalLinesContext<'a> {
|
||||
fn new(settings: &'a Settings) -> Self {
|
||||
Self {
|
||||
settings,
|
||||
diagnostics: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push<K: Into<DiagnosticKind>>(&mut self, kind: K, range: TextRange) {
|
||||
let kind = kind.into();
|
||||
if self.settings.rules.enabled(kind.rule()) {
|
||||
self.diagnostics.push(Diagnostic {
|
||||
kind,
|
||||
range,
|
||||
fix: Fix::empty(),
|
||||
parent: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_diagnostic(&mut self, diagnostic: Diagnostic) {
|
||||
if self.settings.rules.enabled(diagnostic.kind.rule()) {
|
||||
self.diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
diagnostics
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
//! `NoQA` enforcement and validation.
|
||||
|
||||
use itertools::Itertools;
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use nohash_hasher::IntMap;
|
||||
use rustpython_parser::ast::Location;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Edit};
|
||||
use ruff_python_ast::source_code::Locator;
|
||||
use ruff_python_ast::newlines::StrExt;
|
||||
use ruff_python_ast::types::Range;
|
||||
|
||||
use crate::codes::NoqaCode;
|
||||
use crate::noqa;
|
||||
use crate::noqa::{Directive, FileExemption, NoqaDirectives, NoqaMapping};
|
||||
use crate::noqa::{Directive, FileExemption};
|
||||
use crate::registry::{AsRule, Rule};
|
||||
use crate::rule_redirects::get_redirect_target;
|
||||
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
|
||||
@@ -15,25 +17,37 @@ use crate::settings::{flags, Settings};
|
||||
|
||||
pub fn check_noqa(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
locator: &Locator,
|
||||
comment_ranges: &[TextRange],
|
||||
noqa_line_for: &NoqaMapping,
|
||||
contents: &str,
|
||||
commented_lines: &[usize],
|
||||
noqa_line_for: &IntMap<usize, usize>,
|
||||
settings: &Settings,
|
||||
autofix: flags::Autofix,
|
||||
) -> Vec<usize> {
|
||||
let enforce_noqa = settings.rules.enabled(Rule::UnusedNOQA);
|
||||
|
||||
let lines: Vec<&str> = contents.universal_newlines().collect();
|
||||
|
||||
// Identify any codes that are globally exempted (within the current file).
|
||||
let exemption = noqa::file_exemption(locator.contents(), comment_ranges);
|
||||
let exemption = noqa::file_exemption(&lines, commented_lines);
|
||||
|
||||
// Map from line number to `noqa` directive on that line, along with any codes
|
||||
// that were matched by the directive.
|
||||
let mut noqa_directives: IntMap<usize, (Directive, Vec<NoqaCode>)> = IntMap::default();
|
||||
|
||||
// Extract all `noqa` directives.
|
||||
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, locator);
|
||||
if enforce_noqa {
|
||||
for lineno in commented_lines {
|
||||
noqa_directives
|
||||
.entry(lineno - 1)
|
||||
.or_insert_with(|| (noqa::extract_noqa_directive(lines[lineno - 1]), vec![]));
|
||||
}
|
||||
}
|
||||
|
||||
// Indices of diagnostics that were ignored by a `noqa` directive.
|
||||
let mut ignored_diagnostics = vec![];
|
||||
|
||||
// Remove any ignored diagnostics.
|
||||
'outer: for (index, diagnostic) in diagnostics.iter().enumerate() {
|
||||
for (index, diagnostic) in diagnostics.iter().enumerate() {
|
||||
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
|
||||
continue;
|
||||
}
|
||||
@@ -54,65 +68,92 @@ pub fn check_noqa(
|
||||
FileExemption::None => {}
|
||||
}
|
||||
|
||||
let noqa_offsets = diagnostic
|
||||
.parent
|
||||
.into_iter()
|
||||
.chain(std::iter::once(diagnostic.start()))
|
||||
.map(|position| noqa_line_for.resolve(position))
|
||||
.unique();
|
||||
let diagnostic_lineno = diagnostic.location.row();
|
||||
|
||||
for noqa_offset in noqa_offsets {
|
||||
if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
|
||||
{
|
||||
let suppressed = match &directive_line.directive {
|
||||
Directive::All(..) => {
|
||||
directive_line
|
||||
.matches
|
||||
.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
true
|
||||
}
|
||||
Directive::Codes(.., codes, _) => {
|
||||
if noqa::includes(diagnostic.kind.rule(), codes) {
|
||||
directive_line
|
||||
.matches
|
||||
.push(diagnostic.kind.rule().noqa_code());
|
||||
// Is the violation ignored by a `noqa` directive on the parent line?
|
||||
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) {
|
||||
if parent_lineno != diagnostic_lineno {
|
||||
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno);
|
||||
if commented_lines.contains(noqa_lineno) {
|
||||
let noqa = noqa_directives.entry(noqa_lineno - 1).or_insert_with(|| {
|
||||
(noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![])
|
||||
});
|
||||
match noqa {
|
||||
(Directive::All(..), matches) => {
|
||||
matches.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
continue;
|
||||
}
|
||||
(Directive::Codes(.., codes, _), matches) => {
|
||||
if noqa::includes(diagnostic.kind.rule(), codes) {
|
||||
matches.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
(Directive::None, ..) => {}
|
||||
}
|
||||
Directive::None => unreachable!(),
|
||||
};
|
||||
|
||||
if suppressed {
|
||||
continue 'outer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is the diagnostic ignored by a `noqa` directive on the same line?
|
||||
let noqa_lineno = noqa_line_for
|
||||
.get(&diagnostic_lineno)
|
||||
.unwrap_or(&diagnostic_lineno);
|
||||
if commented_lines.contains(noqa_lineno) {
|
||||
let noqa = noqa_directives
|
||||
.entry(noqa_lineno - 1)
|
||||
.or_insert_with(|| (noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![]));
|
||||
match noqa {
|
||||
(Directive::All(..), matches) => {
|
||||
matches.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
(Directive::Codes(.., codes, _), matches) => {
|
||||
if noqa::includes(diagnostic.kind.rule(), codes) {
|
||||
matches.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
(Directive::None, ..) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce that the noqa directive was actually used (RUF100).
|
||||
if enforce_noqa {
|
||||
for line in noqa_directives.lines() {
|
||||
match &line.directive {
|
||||
Directive::All(leading_spaces, noqa_range, trailing_spaces) => {
|
||||
if line.matches.is_empty() {
|
||||
let mut diagnostic =
|
||||
Diagnostic::new(UnusedNOQA { codes: None }, *noqa_range);
|
||||
for (row, (directive, matches)) in noqa_directives {
|
||||
match directive {
|
||||
Directive::All(leading_spaces, start_byte, end_byte, trailing_spaces) => {
|
||||
if matches.is_empty() {
|
||||
let start_char = lines[row][..start_byte].chars().count();
|
||||
let end_char =
|
||||
start_char + lines[row][start_byte..end_byte].chars().count();
|
||||
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
UnusedNOQA { codes: None },
|
||||
Range::new(
|
||||
Location::new(row + 1, start_char),
|
||||
Location::new(row + 1, end_char),
|
||||
),
|
||||
);
|
||||
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
|
||||
diagnostic.set_fix(delete_noqa(
|
||||
*leading_spaces,
|
||||
*noqa_range,
|
||||
*trailing_spaces,
|
||||
locator,
|
||||
row,
|
||||
lines[row],
|
||||
leading_spaces,
|
||||
start_byte,
|
||||
end_byte,
|
||||
trailing_spaces,
|
||||
));
|
||||
}
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
Directive::Codes(leading_spaces, range, codes, trailing_spaces) => {
|
||||
Directive::Codes(leading_spaces, start_byte, end_byte, codes, trailing_spaces) => {
|
||||
let mut disabled_codes = vec![];
|
||||
let mut unknown_codes = vec![];
|
||||
let mut unmatched_codes = vec![];
|
||||
@@ -125,9 +166,7 @@ pub fn check_noqa(
|
||||
break;
|
||||
}
|
||||
|
||||
if line.matches.iter().any(|m| *m == code)
|
||||
|| settings.external.contains(code)
|
||||
{
|
||||
if matches.iter().any(|m| *m == code) || settings.external.contains(code) {
|
||||
valid_codes.push(code);
|
||||
} else {
|
||||
if let Ok(rule) = Rule::from_code(code) {
|
||||
@@ -150,6 +189,10 @@ pub fn check_noqa(
|
||||
&& unknown_codes.is_empty()
|
||||
&& unmatched_codes.is_empty())
|
||||
{
|
||||
let start_char = lines[row][..start_byte].chars().count();
|
||||
let end_char =
|
||||
start_char + lines[row][start_byte..end_byte].chars().count();
|
||||
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
UnusedNOQA {
|
||||
codes: Some(UnusedCodes {
|
||||
@@ -167,20 +210,26 @@ pub fn check_noqa(
|
||||
.collect(),
|
||||
}),
|
||||
},
|
||||
*range,
|
||||
Range::new(
|
||||
Location::new(row + 1, start_char),
|
||||
Location::new(row + 1, end_char),
|
||||
),
|
||||
);
|
||||
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
|
||||
if valid_codes.is_empty() {
|
||||
diagnostic.set_fix(delete_noqa(
|
||||
*leading_spaces,
|
||||
*range,
|
||||
*trailing_spaces,
|
||||
locator,
|
||||
row,
|
||||
lines[row],
|
||||
leading_spaces,
|
||||
start_byte,
|
||||
end_byte,
|
||||
trailing_spaces,
|
||||
));
|
||||
} else {
|
||||
diagnostic.set_fix(Edit::range_replacement(
|
||||
diagnostic.set_fix(Edit::replacement(
|
||||
format!("# noqa: {}", valid_codes.join(", ")),
|
||||
*range,
|
||||
Location::new(row + 1, start_char),
|
||||
Location::new(row + 1, end_char),
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -198,37 +247,39 @@ pub fn check_noqa(
|
||||
|
||||
/// Generate a [`Edit`] to delete a `noqa` directive.
|
||||
fn delete_noqa(
|
||||
leading_spaces: TextSize,
|
||||
noqa_range: TextRange,
|
||||
trailing_spaces: TextSize,
|
||||
locator: &Locator,
|
||||
row: usize,
|
||||
line: &str,
|
||||
leading_spaces: usize,
|
||||
start_byte: usize,
|
||||
end_byte: usize,
|
||||
trailing_spaces: usize,
|
||||
) -> Edit {
|
||||
let line_range = locator.line_range(noqa_range.start());
|
||||
|
||||
// Ex) `# noqa`
|
||||
if line_range
|
||||
== TextRange::new(
|
||||
noqa_range.start() - leading_spaces,
|
||||
noqa_range.end() + trailing_spaces,
|
||||
)
|
||||
{
|
||||
let full_line_end = locator.full_line_end(line_range.end());
|
||||
Edit::deletion(line_range.start(), full_line_end)
|
||||
}
|
||||
// Ex) `x = 1 # noqa`
|
||||
else if noqa_range.end() + trailing_spaces == line_range.end() {
|
||||
Edit::deletion(noqa_range.start() - leading_spaces, line_range.end())
|
||||
}
|
||||
// Ex) `x = 1 # noqa # type: ignore`
|
||||
else if locator.contents()[usize::from(noqa_range.end() + trailing_spaces)..].starts_with('#')
|
||||
{
|
||||
Edit::deletion(noqa_range.start(), noqa_range.end() + trailing_spaces)
|
||||
}
|
||||
// Ex) `x = 1 # noqa here`
|
||||
else {
|
||||
if start_byte - leading_spaces == 0 && end_byte == line.len() {
|
||||
// Ex) `# noqa`
|
||||
Edit::deletion(Location::new(row + 1, 0), Location::new(row + 2, 0))
|
||||
} else if end_byte == line.len() {
|
||||
// Ex) `x = 1 # noqa`
|
||||
let start_char = line[..start_byte].chars().count();
|
||||
let end_char = start_char + line[start_byte..end_byte].chars().count();
|
||||
Edit::deletion(
|
||||
noqa_range.start() + "# ".text_len(),
|
||||
noqa_range.end() + trailing_spaces,
|
||||
Location::new(row + 1, start_char - leading_spaces),
|
||||
Location::new(row + 1, end_char + trailing_spaces),
|
||||
)
|
||||
} else if line[end_byte..].trim_start().starts_with('#') {
|
||||
// Ex) `x = 1 # noqa # type: ignore`
|
||||
let start_char = line[..start_byte].chars().count();
|
||||
let end_char = start_char + line[start_byte..end_byte].chars().count();
|
||||
Edit::deletion(
|
||||
Location::new(row + 1, start_char),
|
||||
Location::new(row + 1, end_char + trailing_spaces),
|
||||
)
|
||||
} else {
|
||||
// Ex) `x = 1 # noqa here`
|
||||
let start_char = line[..start_byte].chars().count();
|
||||
let end_char = start_char + line[start_byte..end_byte].chars().count();
|
||||
Edit::deletion(
|
||||
Location::new(row + 1, start_char + 1 + 1),
|
||||
Location::new(row + 1, end_char + trailing_spaces),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
//! Lint rules based on checking physical lines.
|
||||
|
||||
use ruff_text_size::TextSize;
|
||||
use std::path::Path;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::newlines::StrExt;
|
||||
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
|
||||
use ruff_python_ast::source_code::{Locator, Stylist};
|
||||
|
||||
use crate::registry::Rule;
|
||||
use crate::rules::flake8_executable::helpers::{extract_shebang, ShebangDirective};
|
||||
@@ -25,8 +24,8 @@ pub fn check_physical_lines(
|
||||
path: &Path,
|
||||
locator: &Locator,
|
||||
stylist: &Stylist,
|
||||
indexer: &Indexer,
|
||||
doc_lines: &[TextSize],
|
||||
commented_lines: &[usize],
|
||||
doc_lines: &[usize],
|
||||
settings: &Settings,
|
||||
autofix: flags::Autofix,
|
||||
) -> Vec<Diagnostic> {
|
||||
@@ -56,19 +55,17 @@ pub fn check_physical_lines(
|
||||
let fix_shebang_whitespace =
|
||||
autofix.into() && settings.rules.should_fix(Rule::ShebangLeadingWhitespace);
|
||||
|
||||
let mut commented_lines_iter = indexer.comment_ranges().iter().peekable();
|
||||
let mut commented_lines_iter = commented_lines.iter().peekable();
|
||||
let mut doc_lines_iter = doc_lines.iter().peekable();
|
||||
let string_lines = indexer.triple_quoted_string_ranges();
|
||||
|
||||
for (index, line) in locator.contents().universal_newlines().enumerate() {
|
||||
while commented_lines_iter
|
||||
.next_if(|comment_range| line.range().contains_range(**comment_range))
|
||||
.next_if(|lineno| &(index + 1) == *lineno)
|
||||
.is_some()
|
||||
{
|
||||
if enforce_unnecessary_coding_comment {
|
||||
if index < 2 {
|
||||
if let Some(diagnostic) =
|
||||
unnecessary_coding_comment(&line, fix_unnecessary_coding_comment)
|
||||
unnecessary_coding_comment(index, line, fix_unnecessary_coding_comment)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
@@ -76,11 +73,15 @@ pub fn check_physical_lines(
|
||||
}
|
||||
|
||||
if enforce_blanket_type_ignore {
|
||||
blanket_type_ignore(&mut diagnostics, &line);
|
||||
if let Some(diagnostic) = blanket_type_ignore(index, line) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
if enforce_blanket_noqa {
|
||||
blanket_noqa(&mut diagnostics, &line);
|
||||
if let Some(diagnostic) = blanket_noqa(index, line) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
if enforce_shebang_missing
|
||||
@@ -89,31 +90,31 @@ pub fn check_physical_lines(
|
||||
|| enforce_shebang_newline
|
||||
|| enforce_shebang_python
|
||||
{
|
||||
let shebang = extract_shebang(&line);
|
||||
let shebang = extract_shebang(line);
|
||||
if enforce_shebang_not_executable {
|
||||
if let Some(diagnostic) = shebang_not_executable(path, line.range(), &shebang) {
|
||||
if let Some(diagnostic) = shebang_not_executable(path, index, &shebang) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
if enforce_shebang_missing {
|
||||
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(..)) {
|
||||
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(_, _, _, _)) {
|
||||
has_any_shebang = true;
|
||||
}
|
||||
}
|
||||
if enforce_shebang_whitespace {
|
||||
if let Some(diagnostic) =
|
||||
shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
|
||||
shebang_whitespace(index, &shebang, fix_shebang_whitespace)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
if enforce_shebang_newline {
|
||||
if let Some(diagnostic) = shebang_newline(line.range(), &shebang, index == 0) {
|
||||
if let Some(diagnostic) = shebang_newline(index, &shebang) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
if enforce_shebang_python {
|
||||
if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
|
||||
if let Some(diagnostic) = shebang_python(index, &shebang) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
@@ -121,40 +122,40 @@ pub fn check_physical_lines(
|
||||
}
|
||||
|
||||
while doc_lines_iter
|
||||
.next_if(|doc_line_start| line.range().contains(**doc_line_start))
|
||||
.next_if(|lineno| &(index + 1) == *lineno)
|
||||
.is_some()
|
||||
{
|
||||
if enforce_doc_line_too_long {
|
||||
if let Some(diagnostic) = doc_line_too_long(&line, settings) {
|
||||
if let Some(diagnostic) = doc_line_too_long(index, line, settings) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if enforce_mixed_spaces_and_tabs {
|
||||
if let Some(diagnostic) = mixed_spaces_and_tabs(&line) {
|
||||
if let Some(diagnostic) = mixed_spaces_and_tabs(index, line) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
if enforce_line_too_long {
|
||||
if let Some(diagnostic) = line_too_long(&line, settings) {
|
||||
if let Some(diagnostic) = line_too_long(index, line, settings) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
if enforce_bidirectional_unicode {
|
||||
diagnostics.extend(pylint::rules::bidirectional_unicode(&line));
|
||||
diagnostics.extend(pylint::rules::bidirectional_unicode(index, line));
|
||||
}
|
||||
|
||||
if enforce_trailing_whitespace || enforce_blank_line_contains_whitespace {
|
||||
if let Some(diagnostic) = trailing_whitespace(&line, settings, autofix) {
|
||||
if let Some(diagnostic) = trailing_whitespace(index, line, settings, autofix) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
if enforce_tab_indentation {
|
||||
if let Some(diagnostic) = tab_indentation(&line, string_lines) {
|
||||
if let Some(diagnostic) = tab_indentation(index, line) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
@@ -185,7 +186,7 @@ mod tests {
|
||||
use rustpython_parser::Mode;
|
||||
use std::path::Path;
|
||||
|
||||
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
|
||||
use ruff_python_ast::source_code::{Locator, Stylist};
|
||||
|
||||
use crate::registry::Rule;
|
||||
use crate::settings::{flags, Settings};
|
||||
@@ -197,7 +198,6 @@ mod tests {
|
||||
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
|
||||
let locator = Locator::new(line);
|
||||
let tokens: Vec<_> = lex(line, Mode::Module).collect();
|
||||
let indexer = Indexer::from_tokens(&tokens, &locator);
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
|
||||
let check_with_max_line_length = |line_length: usize| {
|
||||
@@ -205,7 +205,7 @@ mod tests {
|
||||
Path::new("foo.py"),
|
||||
&locator,
|
||||
&stylist,
|
||||
&indexer,
|
||||
&[],
|
||||
&[],
|
||||
&Settings {
|
||||
line_length,
|
||||
|
||||
@@ -64,7 +64,7 @@ pub fn check_tokens(
|
||||
// RUF001, RUF002, RUF003
|
||||
if enforce_ambiguous_unicode_character {
|
||||
let mut state_machine = StateMachine::default();
|
||||
for &(ref tok, range) in tokens.iter().flatten() {
|
||||
for &(start, ref tok, end) in tokens.iter().flatten() {
|
||||
let is_docstring = if enforce_ambiguous_unicode_character {
|
||||
state_machine.consume(tok)
|
||||
} else {
|
||||
@@ -74,7 +74,8 @@ pub fn check_tokens(
|
||||
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
|
||||
diagnostics.extend(ruff::rules::ambiguous_unicode_character(
|
||||
locator,
|
||||
range,
|
||||
start,
|
||||
end,
|
||||
if matches!(tok, Tok::String { .. }) {
|
||||
if is_docstring {
|
||||
Context::Docstring
|
||||
@@ -93,10 +94,10 @@ pub fn check_tokens(
|
||||
|
||||
// ERA001
|
||||
if enforce_commented_out_code {
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
for (start, tok, end) in tokens.iter().flatten() {
|
||||
if matches!(tok, Tok::Comment(_)) {
|
||||
if let Some(diagnostic) =
|
||||
eradicate::rules::commented_out_code(locator, *range, settings, autofix)
|
||||
eradicate::rules::commented_out_code(locator, *start, *end, settings, autofix)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
@@ -106,11 +107,12 @@ pub fn check_tokens(
|
||||
|
||||
// W605
|
||||
if enforce_invalid_escape_sequence {
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
for (start, tok, end) in tokens.iter().flatten() {
|
||||
if matches!(tok, Tok::String { .. }) {
|
||||
diagnostics.extend(pycodestyle::rules::invalid_escape_sequence(
|
||||
locator,
|
||||
*range,
|
||||
*start,
|
||||
*end,
|
||||
autofix.into() && settings.rules.should_fix(Rule::InvalidEscapeSequence),
|
||||
));
|
||||
}
|
||||
@@ -118,10 +120,10 @@ pub fn check_tokens(
|
||||
}
|
||||
// PLE2510, PLE2512, PLE2513
|
||||
if enforce_invalid_string_character {
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
for (start, tok, end) in tokens.iter().flatten() {
|
||||
if matches!(tok, Tok::String { .. }) {
|
||||
diagnostics.extend(
|
||||
pylint::rules::invalid_string_characters(locator, *range, autofix.into())
|
||||
pylint::rules::invalid_string_characters(locator, *start, *end, autofix.into())
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
);
|
||||
@@ -153,7 +155,6 @@ pub fn check_tokens(
|
||||
flake8_implicit_str_concat::rules::implicit(
|
||||
tokens,
|
||||
&settings.flake8_implicit_str_concat,
|
||||
locator,
|
||||
)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user