Compare commits
200 Commits
5246_try30
...
deps/parse
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
35cc48a64c | ||
|
|
0d4f1d86ad | ||
|
|
834910947e | ||
|
|
e34cfeb475 | ||
|
|
bfaa1f9530 | ||
|
|
52aa2fc875 | ||
|
|
e574a6a769 | ||
|
|
b9346a4fd6 | ||
|
|
8001a2f121 | ||
|
|
7dd30f0270 | ||
|
|
21063544f7 | ||
|
|
fb336898a5 | ||
|
|
f5f8eb31ed | ||
|
|
be6c744856 | ||
|
|
94998aedef | ||
|
|
1c0376a72d | ||
|
|
de2a13fcd7 | ||
|
|
cfec636046 | ||
|
|
ae431df146 | ||
|
|
2cd117ba81 | ||
|
|
a956226d95 | ||
|
|
1dd52ad139 | ||
|
|
d692ed0896 | ||
|
|
01b05fe247 | ||
|
|
59dfd0e793 | ||
|
|
c7ff743d30 | ||
|
|
b01a4d8446 | ||
|
|
f012ed2d77 | ||
|
|
06b5c6c06f | ||
|
|
4782675bf9 | ||
|
|
f2e995f78d | ||
|
|
6824b67f44 | ||
|
|
8ccd697020 | ||
|
|
2de6f30929 | ||
|
|
df2efe81c8 | ||
|
|
fa4855e6fe | ||
|
|
3cda89ecaf | ||
|
|
e1c119fde3 | ||
|
|
daa4b72d5f | ||
|
|
f029f8b784 | ||
|
|
bf248ede93 | ||
|
|
086f8a3c12 | ||
|
|
3dc73395ea | ||
|
|
7c32e98d10 | ||
|
|
81b88dcfb9 | ||
|
|
8187bf9f7e | ||
|
|
513de13c46 | ||
|
|
816f7644a9 | ||
|
|
fb46579d30 | ||
|
|
a961f75e13 | ||
|
|
5a4516b812 | ||
|
|
875e04e369 | ||
|
|
12489d3305 | ||
|
|
73228e914c | ||
|
|
af2a087806 | ||
|
|
51a313cca4 | ||
|
|
48309cad08 | ||
|
|
2c2e5b2704 | ||
|
|
5d135d4e0e | ||
|
|
06a04c10e2 | ||
|
|
fee0f43925 | ||
|
|
25e491ad6f | ||
|
|
e7b059cc5c | ||
|
|
5dd5ee0c5b | ||
|
|
f48ab2d621 | ||
|
|
cf48ad7b21 | ||
|
|
f44acc047a | ||
|
|
8420008e79 | ||
|
|
932c9a4789 | ||
|
|
549173b395 | ||
|
|
b1781abffb | ||
|
|
68e0f97354 | ||
|
|
e9771c9c63 | ||
|
|
067b2a6ce6 | ||
|
|
30702c2977 | ||
|
|
34b79ead3d | ||
|
|
19f475ae1f | ||
|
|
2b03bd18f4 | ||
|
|
c87faca884 | ||
|
|
6dbc6d2e59 | ||
|
|
6ce252f0ed | ||
|
|
c029c8b37a | ||
|
|
0ead9a16ac | ||
|
|
653429bef9 | ||
|
|
f0aa6bd4d3 | ||
|
|
5665968b42 | ||
|
|
33a91773f7 | ||
|
|
0666added9 | ||
|
|
7566ca8ff7 | ||
|
|
5dd9e56748 | ||
|
|
f8173daf4c | ||
|
|
511ec0d7bc | ||
|
|
30bec3fcfa | ||
|
|
8b9193ab1f | ||
|
|
62a24e1028 | ||
|
|
f1d367655b | ||
|
|
0c8ec80d7b | ||
|
|
df15ad9696 | ||
|
|
8665a1a19d | ||
|
|
9a8ba58b4c | ||
|
|
715250a179 | ||
|
|
d30e9125eb | ||
|
|
212fd86bf0 | ||
|
|
4b58a9c092 | ||
|
|
b7794f855b | ||
|
|
15c7b6bcf7 | ||
|
|
1782fb8c30 | ||
|
|
987111f5fb | ||
|
|
9f486fa841 | ||
|
|
4dee49d6fa | ||
|
|
e7e2f44440 | ||
|
|
93bfa239b7 | ||
|
|
14f2158e5d | ||
|
|
b8a6ce43a2 | ||
|
|
5ab9538573 | ||
|
|
d19839fe0f | ||
|
|
8dc06d1035 | ||
|
|
120e9d37f1 | ||
|
|
28fe2d334a | ||
|
|
3562d809b2 | ||
|
|
4cac75bc27 | ||
|
|
ed872145fe | ||
|
|
35b04c2fab | ||
|
|
ae4a7ef0ed | ||
|
|
cab3a507bc | ||
|
|
82317ba1fd | ||
|
|
24bcbb85a1 | ||
|
|
089a671adb | ||
|
|
bd8f65814c | ||
|
|
1e894f328c | ||
|
|
52b22ceb6e | ||
|
|
c9d7c0d7d5 | ||
|
|
eb69fe37bf | ||
|
|
27011448ea | ||
|
|
b4d6b7c230 | ||
|
|
fa1341b0db | ||
|
|
401d172e47 | ||
|
|
6a4b216362 | ||
|
|
9dd05424c4 | ||
|
|
ac2e374a5a | ||
|
|
38fa305f35 | ||
|
|
456273a92e | ||
|
|
507961f27d | ||
|
|
a1c559eaa4 | ||
|
|
d0dae7e576 | ||
|
|
efe7c393d1 | ||
|
|
0b9af031fb | ||
|
|
0f9d7283e7 | ||
|
|
bb7303f867 | ||
|
|
60d318ddcf | ||
|
|
5640c310bb | ||
|
|
072358e26b | ||
|
|
aaab9f1597 | ||
|
|
b22e6c3d38 | ||
|
|
40ddc1604c | ||
|
|
bf4b96c5de | ||
|
|
b11492e940 | ||
|
|
cd4718988a | ||
|
|
5908b39102 | ||
|
|
edfe76d673 | ||
|
|
5e5a96ca28 | ||
|
|
3650aaa8b3 | ||
|
|
cc822082a7 | ||
|
|
87ca6171cf | ||
|
|
9713ee4b80 | ||
|
|
528bf2df3a | ||
|
|
8184235f93 | ||
|
|
25981420c4 | ||
|
|
b56b8915ca | ||
|
|
bf02c77fd7 | ||
|
|
ba7041b6bf | ||
|
|
5dff3195d4 | ||
|
|
23363cafd1 | ||
|
|
e4596ebc35 | ||
|
|
c9e02c52a8 | ||
|
|
d097b49371 | ||
|
|
ea270da289 | ||
|
|
cdb9fda3b8 | ||
|
|
a0c0b74b6d | ||
|
|
1a2e444799 | ||
|
|
6f548d9872 | ||
|
|
5a74a8e5a1 | ||
|
|
c5bfd1e877 | ||
|
|
9e1039f823 | ||
|
|
9478454b96 | ||
|
|
9a8e5f7877 | ||
|
|
6fd71e6f53 | ||
|
|
dd60a3865c | ||
|
|
0726dc25c2 | ||
|
|
634ed8975c | ||
|
|
5100c56273 | ||
|
|
26a268a3ec | ||
|
|
324455f580 | ||
|
|
da1c320bfa | ||
|
|
485d997d35 | ||
|
|
d7214e77e6 | ||
|
|
952c623102 | ||
|
|
0a26201643 | ||
|
|
0e67757edb | ||
|
|
c395e44bd7 |
67
.github/workflows/ci.yaml
vendored
67
.github/workflows/ci.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTUP_MAX_RETRIES: 10
|
||||
PACKAGE_NAME: ruff
|
||||
PYTHON_VERSION: "3.7" # to build abi3 wheels
|
||||
PYTHON_VERSION: "3.11" # to build abi3 wheels
|
||||
|
||||
jobs:
|
||||
cargo-fmt:
|
||||
@@ -31,17 +31,6 @@ jobs:
|
||||
cargo-clippy:
|
||||
name: "cargo clippy"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: "Install Rust toolchain"
|
||||
run: |
|
||||
rustup component add clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- run: cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
|
||||
cargo-clippy-wasm:
|
||||
name: "cargo clippy (wasm)"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: "Install Rust toolchain"
|
||||
@@ -49,7 +38,10 @@ jobs:
|
||||
rustup component add clippy
|
||||
rustup target add wasm32-unknown-unknown
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
|
||||
- name: "Clippy"
|
||||
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
|
||||
- name: "Clippy (wasm)"
|
||||
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
|
||||
|
||||
cargo-test:
|
||||
strategy:
|
||||
@@ -62,21 +54,19 @@ jobs:
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- run: cargo install cargo-insta
|
||||
# cargo insta 1.30.0 fails for some reason (https://github.com/mitsuhiko/insta/issues/392)
|
||||
- run: cargo install cargo-insta@=1.29.0
|
||||
- run: pip install black[d]==23.1.0
|
||||
- name: "Run tests (Ubuntu)"
|
||||
if: ${{ matrix.os == 'ubuntu-latest' }}
|
||||
run: |
|
||||
cargo insta test --all --all-features --delete-unreferenced-snapshots
|
||||
git diff --exit-code
|
||||
run: cargo insta test --all --all-features --unreferenced reject
|
||||
- name: "Run tests (Windows)"
|
||||
if: ${{ matrix.os == 'windows-latest' }}
|
||||
shell: bash
|
||||
run: |
|
||||
cargo insta test --all --all-features
|
||||
git diff --exit-code
|
||||
# We can't reject unreferenced snapshots on windows because flake8_executable can't run on windows
|
||||
run: cargo insta test --all --all-features
|
||||
- run: cargo test --package ruff_cli --test black_compatibility_test -- --ignored
|
||||
# Skipped as it's currently broken. The resource were moved from the
|
||||
# TODO: Skipped as it's currently broken. The resource were moved from the
|
||||
# ruff_cli to ruff crate, but this test was not updated.
|
||||
if: false
|
||||
# Check for broken links in the documentation.
|
||||
@@ -152,7 +142,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.11"
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
name: Download Ruff binary
|
||||
@@ -236,7 +226,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.11"
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -260,13 +250,24 @@ jobs:
|
||||
docs:
|
||||
name: "mkdocs"
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: "Add SSH key"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
uses: webfactory/ssh-agent@v0.8.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: "Install Insiders dependencies"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
run: pip install -r docs/requirements-insiders.txt
|
||||
- name: "Install dependencies"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: "Update README File"
|
||||
run: python scripts/transform_readme.py --target mkdocs
|
||||
@@ -274,5 +275,23 @@ jobs:
|
||||
run: python scripts/generate_mkdocs.py
|
||||
- name: "Check docs formatting"
|
||||
run: python scripts/check_docs_formatted.py
|
||||
- name: "Build Insiders docs"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
run: mkdocs build --strict -f mkdocs.insiders.yml
|
||||
- name: "Build docs"
|
||||
run: mkdocs build --strict
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
|
||||
run: mkdocs build --strict -f mkdocs.generated.yml
|
||||
|
||||
check-formatter-stability:
|
||||
name: "Check formatter stability"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- name: "Cache rust"
|
||||
uses: Swatinem/rust-cache@v2
|
||||
- name: "Clone CPython 3.10"
|
||||
run: git clone --branch 3.10 --depth 1 https://github.com/python/cpython.git crates/ruff/resources/test/cpython
|
||||
- name: "Check stability"
|
||||
run: cargo run --bin ruff_dev -- format-dev --stability-check crates/ruff/resources/test/cpython
|
||||
|
||||
20
.github/workflows/docs.yaml
vendored
20
.github/workflows/docs.yaml
vendored
@@ -10,20 +10,34 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
|
||||
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- name: "Add SSH key"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
uses: webfactory/ssh-agent@v0.8.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: "Install Insiders dependencies"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
run: pip install -r docs/requirements-insiders.txt
|
||||
- name: "Install dependencies"
|
||||
run: |
|
||||
pip install -r docs/requirements.txt
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: "Copy README File"
|
||||
run: |
|
||||
python scripts/transform_readme.py --target mkdocs
|
||||
python scripts/generate_mkdocs.py
|
||||
mkdocs build --strict
|
||||
- name: "Build Insiders docs"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
run: mkdocs build --strict -f mkdocs.insiders.yml
|
||||
- name: "Build docs"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
|
||||
run: mkdocs build --strict -f mkdocs.generated.yml
|
||||
- name: "Deploy to Cloudflare Pages"
|
||||
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
|
||||
uses: cloudflare/wrangler-action@2.0.0
|
||||
|
||||
2
.github/workflows/flake8-to-ruff.yaml
vendored
2
.github/workflows/flake8-to-ruff.yaml
vendored
@@ -9,7 +9,7 @@ concurrency:
|
||||
env:
|
||||
PACKAGE_NAME: flake8-to-ruff
|
||||
CRATE_NAME: flake8_to_ruff
|
||||
PYTHON_VERSION: "3.7" # to build abi3 wheels
|
||||
PYTHON_VERSION: "3.11"
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
sha:
|
||||
description: "Optionally, the full sha of the commit to be released"
|
||||
type: string
|
||||
push:
|
||||
pull_request:
|
||||
paths:
|
||||
# When we change pyproject.toml, we want to ensure that the maturin builds still work
|
||||
- pyproject.toml
|
||||
@@ -20,7 +20,7 @@ concurrency:
|
||||
|
||||
env:
|
||||
PACKAGE_NAME: ruff
|
||||
PYTHON_VERSION: "3.7" # to build abi3 wheels
|
||||
PYTHON_VERSION: "3.11"
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,8 +1,7 @@
|
||||
# Benchmarking cpython (CONTRIBUTING.md)
|
||||
crates/ruff/resources/test/cpython
|
||||
# generate_mkdocs.py
|
||||
mkdocs.yml
|
||||
.overrides
|
||||
mkdocs.generated.yml
|
||||
# check_ecosystem.py
|
||||
ruff-old
|
||||
github_search*.jsonl
|
||||
@@ -11,7 +10,7 @@ schemastore
|
||||
# `maturin develop` and ecosystem_all_check.sh
|
||||
.venv*
|
||||
# Formatter debugging (crates/ruff_python_formatter/README.md)
|
||||
scratch.py
|
||||
scratch.*
|
||||
# Created by `perf` (CONTRIBUTING.md)
|
||||
perf.data
|
||||
perf.data.old
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
# default to true for all rules
|
||||
default: true
|
||||
|
||||
# MD007/unordered-list-indent
|
||||
MD007:
|
||||
indent: 4
|
||||
|
||||
# MD033/no-inline-html
|
||||
MD033: false
|
||||
|
||||
@@ -8,7 +12,4 @@ MD033: false
|
||||
MD041: false
|
||||
|
||||
# MD013/line-length
|
||||
MD013:
|
||||
line_length: 100
|
||||
code_blocks: false
|
||||
ignore_code_blocks: true
|
||||
MD013: false
|
||||
|
||||
@@ -22,6 +22,7 @@ repos:
|
||||
hooks:
|
||||
- id: mdformat
|
||||
additional_dependencies:
|
||||
- mdformat-mkdocs
|
||||
- mdformat-black
|
||||
- black==23.1.0 # Must be the latest version of Black
|
||||
|
||||
|
||||
@@ -1,5 +1,41 @@
|
||||
# Breaking Changes
|
||||
|
||||
## 0.0.277
|
||||
|
||||
### `.ipynb_checkpoints`, `.pyenv`, `.pytest_cache`, and `.vscode` are now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))
|
||||
|
||||
Ruff maintains a list of default exclusions, which now consists of the following patterns:
|
||||
|
||||
- `.bzr`
|
||||
- `.direnv`
|
||||
- `.eggs`
|
||||
- `.git`
|
||||
- `.git-rewrite`
|
||||
- `.hg`
|
||||
- `.ipynb_checkpoints`
|
||||
- `.mypy_cache`
|
||||
- `.nox`
|
||||
- `.pants.d`
|
||||
- `.pyenv`
|
||||
- `.pytest_cache`
|
||||
- `.pytype`
|
||||
- `.ruff_cache`
|
||||
- `.svn`
|
||||
- `.tox`
|
||||
- `.venv`
|
||||
- `.vscode`
|
||||
- `__pypackages__`
|
||||
- `_build`
|
||||
- `buck-out`
|
||||
- `build`
|
||||
- `dist`
|
||||
- `node_modules`
|
||||
- `venv`
|
||||
|
||||
Previously, the `.ipynb_checkpoints`, `.pyenv`, `.pytest_cache`, and `.vscode` directories were not
|
||||
excluded by default. This change brings Ruff's default exclusions in line with other tools like
|
||||
Black.
|
||||
|
||||
## 0.0.276
|
||||
|
||||
### The `keep-runtime-typing` setting has been reinstated ([#5470](https://github.com/astral-sh/ruff/pull/5470))
|
||||
@@ -12,12 +48,12 @@ Taking `UP006` (rewrite `List[int]` to `list[int]`) as an example, the setting n
|
||||
follows:
|
||||
|
||||
- On Python 3.7 and Python 3.8, setting `keep-runtime-typing = true` will cause Ruff to ignore
|
||||
`UP006` violations, even if `from __future__ import annotations` is present in the file.
|
||||
While such annotations are valid in Python 3.7 and Python 3.8 when combined with
|
||||
`from __future__ import annotations`, they aren't supported by libraries like Pydantic and
|
||||
FastAPI, which rely on runtime type checking.
|
||||
`UP006` violations, even if `from __future__ import annotations` is present in the file.
|
||||
While such annotations are valid in Python 3.7 and Python 3.8 when combined with
|
||||
`from __future__ import annotations`, they aren't supported by libraries like Pydantic and
|
||||
FastAPI, which rely on runtime type checking.
|
||||
- On Python 3.9 and above, the setting has no effect, as `list[int]` is a valid type annotation,
|
||||
and libraries like Pydantic and FastAPI support it without issue.
|
||||
and libraries like Pydantic and FastAPI support it without issue.
|
||||
|
||||
In short: `keep-runtime-typing` can be used to ensure that Ruff doesn't introduce type annotations
|
||||
that are not supported at runtime by the current Python version, which are unsupported by libraries
|
||||
@@ -167,25 +203,25 @@ This change is largely backwards compatible -- most users should experience
|
||||
no change in behavior. However, please note the following exceptions:
|
||||
|
||||
- Subcommands will now fail when invoked with unsupported arguments, instead
|
||||
of silently ignoring them. For example, the following will now fail:
|
||||
of silently ignoring them. For example, the following will now fail:
|
||||
|
||||
```console
|
||||
ruff --clean --respect-gitignore
|
||||
```
|
||||
```console
|
||||
ruff --clean --respect-gitignore
|
||||
```
|
||||
|
||||
(the `clean` command doesn't support `--respect-gitignore`.)
|
||||
(the `clean` command doesn't support `--respect-gitignore`.)
|
||||
|
||||
- The semantics of `ruff <arg>` have changed slightly when `<arg>` is a valid subcommand.
|
||||
For example, prior to this release, running `ruff rule` would run `ruff` over a file or
|
||||
directory called `rule`. Now, `ruff rule` would invoke the `rule` subcommand. This should
|
||||
only impact projects with files or directories named `rule`, `check`, `explain`, `clean`,
|
||||
or `generate-shell-completion`.
|
||||
For example, prior to this release, running `ruff rule` would run `ruff` over a file or
|
||||
directory called `rule`. Now, `ruff rule` would invoke the `rule` subcommand. This should
|
||||
only impact projects with files or directories named `rule`, `check`, `explain`, `clean`,
|
||||
or `generate-shell-completion`.
|
||||
|
||||
- Scripts that invoke ruff should supply `--` before any positional arguments.
|
||||
(The semantics of `ruff -- <arg>` have not changed.)
|
||||
(The semantics of `ruff -- <arg>` have not changed.)
|
||||
|
||||
- `--explain` previously treated `--format grouped` as a synonym for `--format text`.
|
||||
This is no longer supported; instead, use `--format text`.
|
||||
This is no longer supported; instead, use `--format text`.
|
||||
|
||||
## 0.0.226
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
- [Scope](#scope)
|
||||
- [Enforcement](#enforcement)
|
||||
- [Enforcement Guidelines](#enforcement-guidelines)
|
||||
- [1. Correction](#1-correction)
|
||||
- [2. Warning](#2-warning)
|
||||
- [3. Temporary Ban](#3-temporary-ban)
|
||||
- [4. Permanent Ban](#4-permanent-ban)
|
||||
- [1. Correction](#1-correction)
|
||||
- [2. Warning](#2-warning)
|
||||
- [3. Temporary Ban](#3-temporary-ban)
|
||||
- [4. Permanent Ban](#4-permanent-ban)
|
||||
- [Attribution](#attribution)
|
||||
|
||||
## Our Pledge
|
||||
@@ -33,20 +33,20 @@ community include:
|
||||
- Being respectful of differing opinions, viewpoints, and experiences
|
||||
- Giving and gracefully accepting constructive feedback
|
||||
- Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
and learning from the experience
|
||||
- Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
- The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
advances of any kind
|
||||
- Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
address, without their explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
|
||||
354
CONTRIBUTING.md
354
CONTRIBUTING.md
@@ -3,16 +3,29 @@
|
||||
Welcome! We're happy to have you here. Thank you in advance for your contribution to Ruff.
|
||||
|
||||
- [The Basics](#the-basics)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Development](#development)
|
||||
- [Project Structure](#project-structure)
|
||||
- [Example: Adding a new lint rule](#example-adding-a-new-lint-rule)
|
||||
- [Rule naming convention](#rule-naming-convention)
|
||||
- [Rule testing: fixtures and snapshots](#rule-testing-fixtures-and-snapshots)
|
||||
- [Example: Adding a new configuration option](#example-adding-a-new-configuration-option)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Development](#development)
|
||||
- [Project Structure](#project-structure)
|
||||
- [Example: Adding a new lint rule](#example-adding-a-new-lint-rule)
|
||||
- [Rule naming convention](#rule-naming-convention)
|
||||
- [Rule testing: fixtures and snapshots](#rule-testing-fixtures-and-snapshots)
|
||||
- [Example: Adding a new configuration option](#example-adding-a-new-configuration-option)
|
||||
- [MkDocs](#mkdocs)
|
||||
- [Release Process](#release-process)
|
||||
- [Benchmarks](#benchmarking-and-profiling)
|
||||
- [Creating a new release](#creating-a-new-release)
|
||||
- [Ecosystem CI](#ecosystem-ci)
|
||||
- [Benchmarking and Profiling](#benchmarking-and-profiling)
|
||||
- [CPython Benchmark](#cpython-benchmark)
|
||||
- [Microbenchmarks](#microbenchmarks)
|
||||
- [Benchmark-driven Development](#benchmark-driven-development)
|
||||
- [PR Summary](#pr-summary)
|
||||
- [Tips](#tips)
|
||||
- [Profiling Projects](#profiling-projects)
|
||||
- [Linux](#linux)
|
||||
- [Mac](#mac)
|
||||
- [`cargo dev`](#cargo-dev)
|
||||
- [Subsystems](#subsystems)
|
||||
- [Compilation Pipeline](#compilation-pipeline)
|
||||
|
||||
## The Basics
|
||||
|
||||
@@ -23,7 +36,10 @@ For small changes (e.g., bug fixes), feel free to submit a PR.
|
||||
For larger changes (e.g., new lint rules, new functionality, new configuration options), consider
|
||||
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed change.
|
||||
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5) to discuss your idea with the
|
||||
community.
|
||||
community. We've labeled [beginner-friendly tasks](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
||||
in the issue tracker, along with [bugs](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
and [improvements](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted)
|
||||
that are ready for contributions.
|
||||
|
||||
If you're looking for a place to start, we recommend implementing a new lint rule (see:
|
||||
[_Adding a new lint rule_](#example-adding-a-new-lint-rule), which will allow you to learn from and
|
||||
@@ -34,6 +50,8 @@ As a concrete example: consider taking on one of the rules from the [`flake8-pyi
|
||||
plugin, and looking to the originating [Python source](https://github.com/PyCQA/flake8-pyi) for
|
||||
guidance.
|
||||
|
||||
If you have suggestions on how we might improve the contributing documentation, [let us know](https://github.com/astral-sh/ruff/discussions/5693)!
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Ruff is written in Rust. You'll need to install the
|
||||
@@ -92,48 +110,56 @@ The vast majority of the code, including all lint rules, lives in the `ruff` cra
|
||||
At time of writing, the repository includes the following crates:
|
||||
|
||||
- `crates/ruff`: library crate containing all lint rules and the core logic for running them.
|
||||
If you're working on a rule, this is the crate for you.
|
||||
- `crates/ruff_benchmark`: binary crate for running micro-benchmarks.
|
||||
- `crates/ruff_cache`: library crate for caching lint results.
|
||||
- `crates/ruff_cli`: binary crate containing Ruff's command-line interface.
|
||||
- `crates/ruff_dev`: binary crate containing utilities used in the development of Ruff itself (e.g.,
|
||||
`cargo dev generate-all`).
|
||||
- `crates/ruff_diagnostics`: library crate for the lint diagnostics APIs.
|
||||
- `crates/ruff_formatter`: library crate for generic code formatting logic based on an intermediate
|
||||
representation.
|
||||
`cargo dev generate-all`), see the [`cargo dev`](#cargo-dev) section below.
|
||||
- `crates/ruff_diagnostics`: library crate for the rule-independent abstractions in the lint
|
||||
diagnostics APIs.
|
||||
- `crates/ruff_formatter`: library crate for language agnostic code formatting logic based on an
|
||||
intermediate representation. The backend for `ruff_python_formatter`.
|
||||
- `crates/ruff_index`: library crate inspired by `rustc_index`.
|
||||
- `crates/ruff_macros`: library crate containing macros used by Ruff.
|
||||
- `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities.
|
||||
- `crates/ruff_python_formatter`: library crate containing Python-specific code formatting logic.
|
||||
- `crates/ruff_macros`: proc macro crate containing macros used by Ruff.
|
||||
- `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities. Note
|
||||
that the AST schema itself is defined in the
|
||||
[rustpython-ast](https://github.com/astral-sh/RustPython-Parser) crate.
|
||||
- `crates/ruff_python_formatter`: library crate implementing the Python formatter. Emits an
|
||||
intermediate representation for each node, which `ruff_formatter` prints based on the configured
|
||||
line length.
|
||||
- `crates/ruff_python_semantic`: library crate containing Python-specific semantic analysis logic,
|
||||
including Ruff's semantic model.
|
||||
- `crates/ruff_python_stdlib`: library crate containing Python-specific standard library data.
|
||||
including Ruff's semantic model. Used to resolve queries like "What import does this variable
|
||||
refer to?"
|
||||
- `crates/ruff_python_stdlib`: library crate containing Python-specific standard library data, e.g.
|
||||
the names of all built-in exceptions and which standard library types are immutable.
|
||||
- `crates/ruff_python_whitespace`: library crate containing Python-specific whitespace analysis
|
||||
logic.
|
||||
logic (indentation and newlines).
|
||||
- `crates/ruff_rustpython`: library crate containing `RustPython`-specific utilities.
|
||||
- `crates/ruff_testing_macros`: library crate containing macros used for testing Ruff.
|
||||
- `crates/ruff_textwrap`: library crate to indent and dedent Python source code.
|
||||
- `crates/ruff_wasm`: library crate for exposing Ruff as a WebAssembly module.
|
||||
- `crates/ruff_wasm`: library crate for exposing Ruff as a WebAssembly module. Powers the
|
||||
[Ruff Playground](https://play.ruff.rs/).
|
||||
|
||||
### Example: Adding a new lint rule
|
||||
|
||||
At a high level, the steps involved in adding a new lint rule are as follows:
|
||||
|
||||
1. Determine a name for the new rule as per our [rule naming convention](#rule-naming-convention)
|
||||
(e.g., `AssertFalse`, as in, "allow `assert False`").
|
||||
(e.g., `AssertFalse`, as in, "allow `assert False`").
|
||||
|
||||
1. Create a file for your rule (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs`).
|
||||
|
||||
1. In that file, define a violation struct (e.g., `pub struct AssertFalse`). You can grep for
|
||||
`#[violation]` to see examples.
|
||||
`#[violation]` to see examples.
|
||||
|
||||
1. In that file, define a function that adds the violation to the diagnostic list as appropriate
|
||||
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
|
||||
an `ast::StmtAssert` node).
|
||||
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
|
||||
an `ast::StmtAssert` node).
|
||||
|
||||
1. Define the logic for triggering the violation in `crates/ruff/src/checkers/ast/mod.rs` (for
|
||||
AST-based checks), `crates/ruff/src/checkers/tokens.rs` (for token-based checks),
|
||||
`crates/ruff/src/checkers/lines.rs` (for text-based checks), or
|
||||
`crates/ruff/src/checkers/filesystem.rs` (for filesystem-based checks).
|
||||
AST-based checks), `crates/ruff/src/checkers/tokens.rs` (for token-based checks),
|
||||
`crates/ruff/src/checkers/lines.rs` (for text-based checks), or
|
||||
`crates/ruff/src/checkers/filesystem.rs` (for filesystem-based checks).
|
||||
|
||||
1. Map the violation struct to a rule code in `crates/ruff/src/codes.rs` (e.g., `B011`).
|
||||
|
||||
@@ -166,13 +192,13 @@ suppression comment would be framed as "allow `assert False`".
|
||||
As such, rule names should...
|
||||
|
||||
- Highlight the pattern that is being linted against, rather than the preferred alternative.
|
||||
For example, `AssertFalse` guards against `assert False` statements.
|
||||
For example, `AssertFalse` guards against `assert False` statements.
|
||||
|
||||
- _Not_ contain instructions on how to fix the violation, which instead belong in the rule
|
||||
documentation and the `autofix_title`.
|
||||
documentation and the `autofix_title`.
|
||||
|
||||
- _Not_ contain a redundant prefix, like `Disallow` or `Banned`, which are already implied by the
|
||||
convention.
|
||||
convention.
|
||||
|
||||
When re-implementing rules from other linters, we prioritize adhering to this convention over
|
||||
preserving the original rule name.
|
||||
@@ -187,25 +213,25 @@ Ruff's output for each fixture, which you can then commit alongside your changes
|
||||
Once you've completed the code for the rule itself, you can define tests with the following steps:
|
||||
|
||||
1. Add a Python file to `crates/ruff/resources/test/fixtures/[linter]` that contains the code you
|
||||
want to test. The file name should match the rule name (e.g., `E402.py`), and it should include
|
||||
examples of both violations and non-violations.
|
||||
want to test. The file name should match the rule name (e.g., `E402.py`), and it should include
|
||||
examples of both violations and non-violations.
|
||||
|
||||
1. Run Ruff locally against your file and verify the output is as expected. Once you're satisfied
|
||||
with the output (you see the violations you expect, and no others), proceed to the next step.
|
||||
For example, if you're adding a new rule named `E402`, you would run:
|
||||
with the output (you see the violations you expect, and no others), proceed to the next step.
|
||||
For example, if you're adding a new rule named `E402`, you would run:
|
||||
|
||||
```shell
|
||||
cargo run -p ruff_cli -- check crates/ruff/resources/test/fixtures/pycodestyle/E402.py --no-cache
|
||||
```
|
||||
```shell
|
||||
cargo run -p ruff_cli -- check crates/ruff/resources/test/fixtures/pycodestyle/E402.py --no-cache
|
||||
```
|
||||
|
||||
1. Add the test to the relevant `crates/ruff/src/rules/[linter]/mod.rs` file. If you're contributing
|
||||
a rule to a pre-existing set, you should be able to find a similar example to pattern-match
|
||||
against. If you're adding a new linter, you'll need to create a new `mod.rs` file (see,
|
||||
e.g., `crates/ruff/src/rules/flake8_bugbear/mod.rs`)
|
||||
a rule to a pre-existing set, you should be able to find a similar example to pattern-match
|
||||
against. If you're adding a new linter, you'll need to create a new `mod.rs` file (see,
|
||||
e.g., `crates/ruff/src/rules/flake8_bugbear/mod.rs`)
|
||||
|
||||
1. Run `cargo test`. Your test will fail, but you'll be prompted to follow-up
|
||||
with `cargo insta review`. Run `cargo insta review`, review and accept the generated snapshot,
|
||||
then commit the snapshot file alongside the rest of your changes.
|
||||
with `cargo insta review`. Run `cargo insta review`, review and accept the generated snapshot,
|
||||
then commit the snapshot file alongside the rest of your changes.
|
||||
|
||||
1. Run `cargo test` again to ensure that your test passes.
|
||||
|
||||
@@ -243,21 +269,25 @@ To preview any changes to the documentation locally:
|
||||
|
||||
1. Install MkDocs and Material for MkDocs with:
|
||||
|
||||
```shell
|
||||
pip install -r docs/requirements.txt
|
||||
```
|
||||
```shell
|
||||
pip install -r docs/requirements.txt
|
||||
```
|
||||
|
||||
1. Generate the MkDocs site with:
|
||||
|
||||
```shell
|
||||
python scripts/generate_mkdocs.py
|
||||
```
|
||||
```shell
|
||||
python scripts/generate_mkdocs.py
|
||||
```
|
||||
|
||||
1. Run the development server with:
|
||||
|
||||
```shell
|
||||
mkdocs serve
|
||||
```
|
||||
```shell
|
||||
# For contributors.
|
||||
mkdocs serve -f mkdocs.generated.yml
|
||||
|
||||
# For members of the Astral org, which has access to MkDocs Insiders via sponsorship.
|
||||
mkdocs serve -f mkdocs.insiders.yml
|
||||
```
|
||||
|
||||
The documentation should then be available locally at
|
||||
[http://127.0.0.1:8000/docs/](http://127.0.0.1:8000/docs/).
|
||||
@@ -278,20 +308,19 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
|
||||
1. Create a PR with the version and `BREAKING_CHANGES.md` updated
|
||||
1. Merge the PR
|
||||
1. Run the release workflow with the version number (without starting `v`) as input. Make sure
|
||||
main has your merged PR as last commit
|
||||
main has your merged PR as last commit
|
||||
1. The release workflow will do the following:
|
||||
1. Build all the assets. If this fails (even though we tested in step 4), we haven’t tagged or
|
||||
uploaded anything, you can restart after pushing a fix
|
||||
1. Upload to pypi
|
||||
1. Create and push the git tag (from pyproject.toml). We create the git tag only here
|
||||
because we can't change it ([#4468](https://github.com/charliermarsh/ruff/issues/4468)), so
|
||||
we want to make sure everything up to and including publishing to pypi worked.
|
||||
1. Attach artifacts to draft GitHub release
|
||||
1. Trigger downstream repositories. This can fail without causing fallout, it is possible (if
|
||||
inconvenient) to trigger the downstream jobs manually
|
||||
1. Create release notes in GitHub UI and promote from draft to proper release(<https://github.com/charliermarsh/ruff/releases/new>)
|
||||
1. Build all the assets. If this fails (even though we tested in step 4), we haven’t tagged or
|
||||
uploaded anything, you can restart after pushing a fix.
|
||||
1. Upload to PyPI.
|
||||
1. Create and push the Git tag (as extracted from `pyproject.toml`). We create the Git tag only
|
||||
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/charliermarsh/ruff/issues/4468)).
|
||||
1. Attach artifacts to draft GitHub release
|
||||
1. Trigger downstream repositories. This can fail non-catastrophically, as we can run any
|
||||
downstream jobs manually if needed.
|
||||
1. Create release notes in GitHub UI and promote from draft.
|
||||
1. If needed, [update the schemastore](https://github.com/charliermarsh/ruff/blob/main/scripts/update_schemastore.py)
|
||||
1. If needed, update ruff-lsp and ruff-vscode
|
||||
1. If needed, update the `ruff-lsp` and `ruff-vscode` repositories.
|
||||
|
||||
## Ecosystem CI
|
||||
|
||||
@@ -390,6 +419,13 @@ Summary
|
||||
159.43 ± 2.48 times faster than 'pycodestyle crates/ruff/resources/test/cpython'
|
||||
```
|
||||
|
||||
To benchmark a subset of rules, e.g. `LineTooLong` and `DocLineTooLong`:
|
||||
|
||||
```shell
|
||||
cargo build --release && hyperfine --warmup 10 \
|
||||
"./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache -e --select W505,E501"
|
||||
```
|
||||
|
||||
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
|
||||
above. All reported benchmarks were computed using the versions specified by
|
||||
`./scripts/benchmarks/pyproject.toml` on Python 3.11.
|
||||
@@ -434,7 +470,7 @@ Benchmark 1: find . -type f -name "*.py" | xargs -P 0 pyupgrade --py311-plus
|
||||
Range (min … max): 29.813 s … 30.356 s 10 runs
|
||||
```
|
||||
|
||||
## Microbenchmarks
|
||||
### Microbenchmarks
|
||||
|
||||
The `ruff_benchmark` crate benchmarks the linter and the formatter on individual files.
|
||||
|
||||
@@ -444,7 +480,7 @@ You can run the benchmarks with
|
||||
cargo benchmark
|
||||
```
|
||||
|
||||
### Benchmark driven Development
|
||||
#### Benchmark-driven Development
|
||||
|
||||
Ruff uses [Criterion.rs](https://bheisler.github.io/criterion.rs/book/) for benchmarks. You can use
|
||||
`--save-baseline=<name>` to store an initial baseline benchmark (e.g. on `main`) and then use
|
||||
@@ -459,7 +495,7 @@ cargo benchmark --save-baseline=main
|
||||
cargo benchmark --baseline=main
|
||||
```
|
||||
|
||||
### PR Summary
|
||||
#### PR Summary
|
||||
|
||||
You can use `--save-baseline` and `critcmp` to get a pretty comparison between two recordings.
|
||||
This is useful to illustrate the improvements of a PR.
|
||||
@@ -480,21 +516,21 @@ You must install [`critcmp`](https://github.com/BurntSushi/critcmp) for the comp
|
||||
cargo install critcmp
|
||||
```
|
||||
|
||||
### Tips
|
||||
#### Tips
|
||||
|
||||
- Use `cargo benchmark <filter>` to only run specific benchmarks. For example: `cargo benchmark linter/pydantic`
|
||||
to only run the pydantic tests.
|
||||
to only run the pydantic tests.
|
||||
- Use `cargo benchmark --quiet` for a more cleaned up output (without statistical relevance)
|
||||
- Use `cargo benchmark --quick` to get faster results (more prone to noise)
|
||||
|
||||
## Profiling Projects
|
||||
### Profiling Projects
|
||||
|
||||
You can either use the microbenchmarks from above or a project directory for benchmarking. There
|
||||
are a lot of profiling tools out there,
|
||||
[The Rust Performance Book](https://nnethercote.github.io/perf-book/profiling.html) lists some
|
||||
examples.
|
||||
|
||||
### Linux
|
||||
#### Linux
|
||||
|
||||
Install `perf` and build `ruff_benchmark` with the `release-debug` profile and then run it with perf
|
||||
|
||||
@@ -527,7 +563,7 @@ An alternative is to convert the perf data to `flamegraph.svg` using
|
||||
flamegraph --perfdata perf.data
|
||||
```
|
||||
|
||||
### Mac
|
||||
#### Mac
|
||||
|
||||
Install [`cargo-instruments`](https://crates.io/crates/cargo-instruments):
|
||||
|
||||
@@ -542,7 +578,179 @@ cargo instruments -t time --bench linter --profile release-debug -p ruff_benchma
|
||||
```
|
||||
|
||||
- `-t`: Specifies what to profile. Useful options are `time` to profile the wall time and `alloc`
|
||||
for profiling the allocations.
|
||||
for profiling the allocations.
|
||||
- You may want to pass an additional filter to run a single test file
|
||||
|
||||
Otherwise, follow the instructions from the linux section.
|
||||
|
||||
## `cargo dev`
|
||||
|
||||
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
|
||||
utils with it:
|
||||
|
||||
- `cargo dev print-ast <file>`: Print the AST of a python file using the
|
||||
[RustPython parser](https://github.com/astral-sh/RustPython-Parser/tree/main/parser) that is
|
||||
mainly used in Ruff. For `if True: pass # comment`, you can see the syntax tree, the byte offsets
|
||||
for start and stop of each node and also how the `:` token, the comment and whitespace are not
|
||||
represented anymore:
|
||||
|
||||
```text
|
||||
[
|
||||
If(
|
||||
StmtIf {
|
||||
range: 0..13,
|
||||
test: Constant(
|
||||
ExprConstant {
|
||||
range: 3..7,
|
||||
value: Bool(
|
||||
true,
|
||||
),
|
||||
kind: None,
|
||||
},
|
||||
),
|
||||
body: [
|
||||
Pass(
|
||||
StmtPass {
|
||||
range: 9..13,
|
||||
},
|
||||
),
|
||||
],
|
||||
orelse: [],
|
||||
},
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
- `cargo dev print-tokens <file>`: Print the tokens that the AST is built upon. Again for
|
||||
`if True: pass # comment`:
|
||||
|
||||
```text
|
||||
0 If 2
|
||||
3 True 7
|
||||
7 Colon 8
|
||||
9 Pass 13
|
||||
14 Comment(
|
||||
"# comment",
|
||||
) 23
|
||||
23 Newline 24
|
||||
```
|
||||
|
||||
- `cargo dev print-cst <file>`: Print the CST of a python file using
|
||||
[LibCST](https://github.com/Instagram/LibCST), which is used in addition to the RustPython parser
|
||||
in Ruff. E.g. for `if True: pass # comment` everything including the whitespace is represented:
|
||||
|
||||
```text
|
||||
Module {
|
||||
body: [
|
||||
Compound(
|
||||
If(
|
||||
If {
|
||||
test: Name(
|
||||
Name {
|
||||
value: "True",
|
||||
lpar: [],
|
||||
rpar: [],
|
||||
},
|
||||
),
|
||||
body: SimpleStatementSuite(
|
||||
SimpleStatementSuite {
|
||||
body: [
|
||||
Pass(
|
||||
Pass {
|
||||
semicolon: None,
|
||||
},
|
||||
),
|
||||
],
|
||||
leading_whitespace: SimpleWhitespace(
|
||||
" ",
|
||||
),
|
||||
trailing_whitespace: TrailingWhitespace {
|
||||
whitespace: SimpleWhitespace(
|
||||
" ",
|
||||
),
|
||||
comment: Some(
|
||||
Comment(
|
||||
"# comment",
|
||||
),
|
||||
),
|
||||
newline: Newline(
|
||||
None,
|
||||
Real,
|
||||
),
|
||||
},
|
||||
},
|
||||
),
|
||||
orelse: None,
|
||||
leading_lines: [],
|
||||
whitespace_before_test: SimpleWhitespace(
|
||||
" ",
|
||||
),
|
||||
whitespace_after_test: SimpleWhitespace(
|
||||
"",
|
||||
),
|
||||
is_elif: false,
|
||||
},
|
||||
),
|
||||
),
|
||||
],
|
||||
header: [],
|
||||
footer: [],
|
||||
default_indent: " ",
|
||||
default_newline: "\n",
|
||||
has_trailing_newline: true,
|
||||
encoding: "utf-8",
|
||||
}
|
||||
```
|
||||
|
||||
- `cargo dev generate-all`: Update `ruff.schema.json`, `docs/configuration.md` and `docs/rules`.
|
||||
You can also set `RUFF_UPDATE_SCHEMA=1` to update `ruff.schema.json` during `cargo test`.
|
||||
- `cargo dev generate-cli-help`, `cargo dev generate-docs` and `cargo dev generate-json-schema`:
|
||||
Update just `docs/configuration.md`, `docs/rules` and `ruff.schema.json` respectively.
|
||||
- `cargo dev generate-options`: Generate a markdown-compatible table of all `pyproject.toml`
|
||||
options. Used for <https://beta.ruff.rs/docs/settings/>
|
||||
- `cargo dev generate-rules-table`: Generate a markdown-compatible table of all rules. Used for <https://beta.ruff.rs/docs/rules/>
|
||||
- `cargo dev round-trip <python file or jupyter notebook>`: Read a Python file or Jupyter Notebook,
|
||||
parse it, serialize the parsed representation and write it back. Used to check how good our
|
||||
representation is so that fixes don't rewrite irrelevant parts of a file.
|
||||
- `cargo dev format_dev`: See ruff_python_formatter README.md
|
||||
|
||||
## Subsystems
|
||||
|
||||
### Compilation Pipeline
|
||||
|
||||
If we view Ruff as a compiler, in which the inputs are paths to Python files and the outputs are
|
||||
diagnostics, then our current compilation pipeline proceeds as follows:
|
||||
|
||||
1. **File discovery**: Given paths like `foo/`, locate all Python files in any specified subdirectories, taking into account our hierarchical settings system and any `exclude` options.
|
||||
|
||||
1. **Package resolution**: Determine the “package root” for every file by traversing over its parent directories and looking for `__init__.py` files.
|
||||
|
||||
1. **Cache initialization**: For every “package root”, initialize an empty cache.
|
||||
|
||||
1. **Analysis**: For every file, in parallel:
|
||||
|
||||
1. **Cache read**: If the file is cached (i.e., its modification timestamp hasn't changed since it was last analyzed), short-circuit, and return the cached diagnostics.
|
||||
|
||||
1. **Tokenization**: Run the lexer over the file to generate a token stream.
|
||||
|
||||
1. **Indexing**: Extract metadata from the token stream, such as: comment ranges, `# noqa` locations, `# isort: off` locations, “doc lines”, etc.
|
||||
|
||||
1. **Token-based rule evaluation**: Run any lint rules that are based on the contents of the token stream (e.g., commented-out code).
|
||||
|
||||
1. **Filesystem-based rule evaluation**: Run any lint rules that are based on the contents of the filesystem (e.g., lack of `__init__.py` file in a package).
|
||||
|
||||
1. **Logical line-based rule evaluation**: Run any lint rules that are based on logical lines (e.g., stylistic rules).
|
||||
|
||||
1. **Parsing**: Run the parser over the token stream to produce an AST. (This consumes the token stream, so anything that relies on the token stream needs to happen before parsing.)
|
||||
|
||||
1. **AST-based rule evaluation**: Run any lint rules that are based on the AST. This includes the vast majority of lint rules. As part of this step, we also build the semantic model for the current file as we traverse over the AST. Some lint rules are evaluated eagerly, as we iterate over the AST, while others are evaluated in a deferred manner (e.g., unused imports, since we can’t determine whether an import is unused until we’ve finished analyzing the entire file), after we’ve finished the initial traversal.
|
||||
|
||||
1. **Import-based rule evaluation**: Run any lint rules that are based on the module’s imports (e.g., import sorting). These could, in theory, be included in the AST-based rule evaluation phase — they’re just separated for simplicity.
|
||||
|
||||
1. **Physical line-based rule evaluation**: Run any lint rules that are based on physical lines (e.g., line-length).
|
||||
|
||||
1. **Suppression enforcement**: Remove any violations that are suppressed via `# noqa` directives or `per-file-ignores`.
|
||||
|
||||
1. **Cache write**: Write the generated diagnostics to the package cache using the file as a key.
|
||||
|
||||
1. **Reporting**: Print diagnostics in the specified format (text, JSON, etc.), to the specified output channel (stdout, a file, etc.).
|
||||
|
||||
448
Cargo.lock
generated
448
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
20
Cargo.toml
20
Cargo.toml
@@ -45,19 +45,21 @@ strum = { version = "0.24.1", features = ["strum_macros"] }
|
||||
strum_macros = { version = "0.24.3" }
|
||||
syn = { version = "2.0.15" }
|
||||
test-case = { version = "3.0.0" }
|
||||
thiserror = { version = "1.0.43" }
|
||||
toml = { version = "0.7.2" }
|
||||
wsl = { version = "0.1.0" }
|
||||
|
||||
# v0.0.1
|
||||
libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "80e4c1399f95e5beb532fdd1e209ad2dbb470438" }
|
||||
# v1.0.1
|
||||
libcst = { git = "https://github.com/Instagram/LibCST.git", rev = "3cacca1a1029f05707e50703b49fe3dd860aa839", default-features = false }
|
||||
|
||||
# Please tag the RustPython version everytime you update its revision here and in fuzz/Cargo.toml
|
||||
# Please tag the RustPython version every time you update its revision here and in fuzz/Cargo.toml
|
||||
# Tagging the version ensures that older ruff versions continue to build from source even when we rebase our RustPython fork.
|
||||
# Current tag: v0.0.7
|
||||
ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0" }
|
||||
rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0" , default-features = false, features = ["num-bigint"]}
|
||||
rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0", default-features = false, features = ["num-bigint"] }
|
||||
rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0", default-features = false }
|
||||
rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0" , default-features = false, features = ["full-lexer", "num-bigint"] }
|
||||
# Note: As of tag v0.0.8 we are cherry-picking commits instead of rebasing so the tag is not necessary
|
||||
ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34" }
|
||||
rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34" , default-features = false, features = ["num-bigint"]}
|
||||
rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34", default-features = false, features = ["num-bigint"] }
|
||||
rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34", default-features = false }
|
||||
rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "126652b684910c29a7bcc32293d4ca0f81454e34" , default-features = false, features = ["full-lexer", "num-bigint"] }
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
|
||||
25
README.md
25
README.md
@@ -32,9 +32,10 @@ An extremely fast Python linter, written in Rust.
|
||||
- 🔧 Autofix support, for automatic error correction (e.g., automatically remove unused imports)
|
||||
- 📏 Over [500 built-in rules](https://beta.ruff.rs/docs/rules/)
|
||||
- ⚖️ [Near-parity](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) with the
|
||||
built-in Flake8 rule set
|
||||
built-in Flake8 rule set
|
||||
- 🔌 Native re-implementations of dozens of Flake8 plugins, like flake8-bugbear
|
||||
- ⌨️ First-party editor integrations for [VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
|
||||
- ⌨️ First-party [editor integrations](https://beta.ruff.rs/docs/editor-integrations/) for
|
||||
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
|
||||
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://beta.ruff.rs/docs/configuration/#pyprojecttoml-discovery)
|
||||
|
||||
Ruff aims to be orders of magnitude faster than alternative tools while integrating more
|
||||
@@ -139,7 +140,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
|
||||
```yaml
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.0.276
|
||||
rev: v0.0.278
|
||||
hooks:
|
||||
- id: ruff
|
||||
```
|
||||
@@ -347,6 +348,7 @@ Ruff is released under the MIT license.
|
||||
Ruff is used by a number of major open-source projects and companies, including:
|
||||
|
||||
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
|
||||
- Anthropic ([Python SDK](https://github.com/anthropics/anthropic-sdk-python))
|
||||
- [Apache Airflow](https://github.com/apache/airflow)
|
||||
- AstraZeneca ([Magnus](https://github.com/AstraZeneca/magnus-core))
|
||||
- Benchling ([Refac](https://github.com/benchling/refac))
|
||||
@@ -356,26 +358,30 @@ Ruff is used by a number of major open-source projects and companies, including:
|
||||
- [DVC](https://github.com/iterative/dvc)
|
||||
- [Dagger](https://github.com/dagger/dagger)
|
||||
- [Dagster](https://github.com/dagster-io/dagster)
|
||||
- Databricks ([MLflow](https://github.com/mlflow/mlflow))
|
||||
- [FastAPI](https://github.com/tiangolo/fastapi)
|
||||
- [Gradio](https://github.com/gradio-app/gradio)
|
||||
- [Great Expectations](https://github.com/great-expectations/great_expectations)
|
||||
- [HTTPX](https://github.com/encode/httpx)
|
||||
- Hugging Face ([Transformers](https://github.com/huggingface/transformers),
|
||||
[Datasets](https://github.com/huggingface/datasets),
|
||||
[Diffusers](https://github.com/huggingface/diffusers))
|
||||
[Datasets](https://github.com/huggingface/datasets),
|
||||
[Diffusers](https://github.com/huggingface/diffusers))
|
||||
- [Hatch](https://github.com/pypa/hatch)
|
||||
- [Home Assistant](https://github.com/home-assistant/core)
|
||||
- ING Bank ([popmon](https://github.com/ing-bank/popmon), [probatus](https://github.com/ing-bank/probatus))
|
||||
- [Ibis](https://github.com/ibis-project/ibis)
|
||||
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
|
||||
- [LangChain](https://github.com/hwchase17/langchain)
|
||||
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
|
||||
- Matrix ([Synapse](https://github.com/matrix-org/synapse))
|
||||
- [MegaLinter](https://github.com/oxsecurity/megalinter)
|
||||
- Meltano ([Meltano CLI](https://github.com/meltano/meltano), [Singer SDK](https://github.com/meltano/sdk))
|
||||
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel),
|
||||
[ONNX Runtime](https://github.com/microsoft/onnxruntime),
|
||||
[LightGBM](https://github.com/microsoft/LightGBM))
|
||||
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
|
||||
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
|
||||
- [MegaLinter](https://github.com/oxsecurity/megalinter)
|
||||
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel),
|
||||
[ONNX Runtime](https://github.com/microsoft/onnxruntime),
|
||||
[LightGBM](https://github.com/microsoft/LightGBM))
|
||||
- [Mypy](https://github.com/python/mypy)
|
||||
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
|
||||
- [Neon](https://github.com/neondatabase/neon)
|
||||
- [ONNX](https://github.com/onnx/onnx)
|
||||
@@ -411,6 +417,7 @@ Ruff is used by a number of major open-source projects and companies, including:
|
||||
- [featuretools](https://github.com/alteryx/featuretools)
|
||||
- [meson-python](https://github.com/mesonbuild/meson-python)
|
||||
- [nox](https://github.com/wntrblm/nox)
|
||||
- [pip](https://github.com/pypa/pip)
|
||||
|
||||
### Show Your Support
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
extend-exclude = ["resources", "snapshots"]
|
||||
|
||||
[default.extend-words]
|
||||
trivias = "trivias"
|
||||
hel = "hel"
|
||||
whos = "whos"
|
||||
spawnve = "spawnve"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "flake8-to-ruff"
|
||||
version = "0.0.276"
|
||||
version = "0.0.278"
|
||||
description = """
|
||||
Convert Flake8 configuration files to Ruff configuration files.
|
||||
"""
|
||||
|
||||
@@ -82,12 +82,12 @@ flake8-to-ruff path/to/.flake8 --plugin flake8-builtins --plugin flake8-quotes
|
||||
## Limitations
|
||||
|
||||
1. Ruff only supports a subset of the Flake configuration options. `flake8-to-ruff` will warn on and
|
||||
ignore unsupported options in the `.flake8` file (or equivalent). (Similarly, Ruff has a few
|
||||
configuration options that don't exist in Flake8.)
|
||||
ignore unsupported options in the `.flake8` file (or equivalent). (Similarly, Ruff has a few
|
||||
configuration options that don't exist in Flake8.)
|
||||
1. Ruff will omit any rule codes that are unimplemented or unsupported by Ruff, including rule
|
||||
codes from unsupported plugins. (See the
|
||||
[documentation](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) for the complete
|
||||
list of supported plugins.)
|
||||
codes from unsupported plugins. (See the
|
||||
[documentation](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) for the complete
|
||||
list of supported plugins.)
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ruff"
|
||||
version = "0.0.276"
|
||||
version = "0.0.278"
|
||||
publish = false
|
||||
authors = { workspace = true }
|
||||
edition = { workspace = true }
|
||||
@@ -73,11 +73,12 @@ shellexpand = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
thiserror = { version = "1.0.38" }
|
||||
thiserror = { version = "1.0.43" }
|
||||
toml = { workspace = true }
|
||||
typed-arena = { version = "2.0.2" }
|
||||
unicode-width = { version = "0.1.10" }
|
||||
unicode_names2 = { version = "0.6.0", git = "https://github.com/youknowone/unicode_names2.git", rev = "4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde" }
|
||||
wsl = { version = "0.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
insta = { workspace = true }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Type
|
||||
from typing import Annotated, Any, Optional, Type, Union
|
||||
from typing_extensions import override
|
||||
|
||||
# Error
|
||||
@@ -95,27 +95,27 @@ class Foo:
|
||||
def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
|
||||
pass
|
||||
|
||||
# ANN401
|
||||
# OK
|
||||
@override
|
||||
def foo(self: "Foo", a: Any, *params: str, **options: str) -> int:
|
||||
pass
|
||||
|
||||
# ANN401
|
||||
# OK
|
||||
@override
|
||||
def foo(self: "Foo", a: int, *params: str, **options: str) -> Any:
|
||||
pass
|
||||
|
||||
# ANN401
|
||||
# OK
|
||||
@override
|
||||
def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
|
||||
pass
|
||||
|
||||
# ANN401
|
||||
# OK
|
||||
@override
|
||||
def foo(self: "Foo", a: int, *params: Any, **options: str) -> int:
|
||||
pass
|
||||
|
||||
# ANN401
|
||||
# OK
|
||||
@override
|
||||
def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
|
||||
pass
|
||||
@@ -137,3 +137,18 @@ class Foo:
|
||||
|
||||
# OK
|
||||
def f(*args: *tuple[int]) -> None: ...
|
||||
def f(a: object) -> None: ...
|
||||
def f(a: str | bytes) -> None: ...
|
||||
def f(a: Union[str, bytes]) -> None: ...
|
||||
def f(a: Optional[str]) -> None: ...
|
||||
def f(a: Annotated[str, ...]) -> None: ...
|
||||
def f(a: "Union[str, bytes]") -> None: ...
|
||||
def f(a: int + int) -> None: ...
|
||||
|
||||
# ANN401
|
||||
def f(a: Any | int) -> None: ...
|
||||
def f(a: int | Any) -> None: ...
|
||||
def f(a: Union[str, bytes, Any]) -> None: ...
|
||||
def f(a: Optional[Any]) -> None: ...
|
||||
def f(a: Annotated[Any, ...]) -> None: ...
|
||||
def f(a: "Union[str, bytes, Any]") -> None: ...
|
||||
|
||||
@@ -177,6 +177,9 @@ def str_okay(value=str("foo")):
|
||||
def bool_okay(value=bool("bar")):
|
||||
pass
|
||||
|
||||
# Allow immutable bytes() value
|
||||
def bytes_okay(value=bytes(1)):
|
||||
pass
|
||||
|
||||
# Allow immutable int() value
|
||||
def int_okay(value=int("12")):
|
||||
|
||||
27
crates/ruff/resources/test/fixtures/flake8_bugbear/B034.py
vendored
Normal file
27
crates/ruff/resources/test/fixtures/flake8_bugbear/B034.py
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import re
|
||||
from re import sub
|
||||
|
||||
# B034
|
||||
re.sub("a", "b", "aaa", re.IGNORECASE)
|
||||
re.sub("a", "b", "aaa", 5)
|
||||
re.sub("a", "b", "aaa", 5, re.IGNORECASE)
|
||||
re.subn("a", "b", "aaa", re.IGNORECASE)
|
||||
re.subn("a", "b", "aaa", 5)
|
||||
re.subn("a", "b", "aaa", 5, re.IGNORECASE)
|
||||
re.split(" ", "a a a a", re.I)
|
||||
re.split(" ", "a a a a", 2)
|
||||
re.split(" ", "a a a a", 2, re.I)
|
||||
sub("a", "b", "aaa", re.IGNORECASE)
|
||||
|
||||
# OK
|
||||
re.sub("a", "b", "aaa")
|
||||
re.sub("a", "b", "aaa", flags=re.IGNORECASE)
|
||||
re.sub("a", "b", "aaa", count=5)
|
||||
re.sub("a", "b", "aaa", count=5, flags=re.IGNORECASE)
|
||||
re.subn("a", "b", "aaa")
|
||||
re.subn("a", "b", "aaa", flags=re.IGNORECASE)
|
||||
re.subn("a", "b", "aaa", count=5)
|
||||
re.subn("a", "b", "aaa", count=5, flags=re.IGNORECASE)
|
||||
re.split(" ", "a a a a", flags=re.I)
|
||||
re.split(" ", "a a a a", maxsplit=2)
|
||||
re.split(" ", "a a a a", maxsplit=2, flags=re.I)
|
||||
@@ -14,9 +14,10 @@ except AssertionError:
|
||||
except Exception as err:
|
||||
assert err
|
||||
raise Exception("No cause here...")
|
||||
except BaseException as base_err:
|
||||
# Might use this instead of bare raise with the `.with_traceback()` method
|
||||
raise base_err
|
||||
except BaseException as err:
|
||||
raise err
|
||||
except BaseException as err:
|
||||
raise some_other_err
|
||||
finally:
|
||||
raise Exception("Nothing to chain from, so no warning here")
|
||||
|
||||
|
||||
@@ -12,7 +12,8 @@ set(reversed(x))
|
||||
sorted(list(x))
|
||||
sorted(tuple(x))
|
||||
sorted(sorted(x))
|
||||
sorted(sorted(x, key=lambda y: y))
|
||||
sorted(sorted(x, key=foo, reverse=False), reverse=False, key=foo)
|
||||
sorted(sorted(x, reverse=True), reverse=True)
|
||||
sorted(reversed(x))
|
||||
sorted(list(x), key=lambda y: y)
|
||||
tuple(
|
||||
@@ -21,3 +22,9 @@ tuple(
|
||||
"o"]
|
||||
)
|
||||
)
|
||||
|
||||
# Nested sorts with differing keyword arguments. Not flagged.
|
||||
sorted(sorted(x, key=lambda y: y))
|
||||
sorted(sorted(x, key=lambda y: y), key=lambda x: x)
|
||||
sorted(sorted(x), reverse=True)
|
||||
sorted(sorted(x, reverse=False), reverse=True)
|
||||
|
||||
@@ -25,10 +25,15 @@ map(lambda x=2, y=1: x + y, nums, nums)
|
||||
set(map(lambda x, y: x, nums, nums))
|
||||
|
||||
|
||||
def myfunc(arg1: int, arg2: int = 4):
|
||||
def func(arg1: int, arg2: int = 4):
|
||||
return 2 * arg1 + arg2
|
||||
|
||||
|
||||
list(map(myfunc, nums))
|
||||
# Non-error: `func` is not a lambda.
|
||||
list(map(func, nums))
|
||||
|
||||
[x for x in nums]
|
||||
# False positive: need to preserve the late-binding of `x` in the inner lambda.
|
||||
map(lambda x: lambda: x, range(4))
|
||||
|
||||
# Error: the `x` is overridden by the inner lambda.
|
||||
map(lambda x: lambda x: x, range(4))
|
||||
|
||||
@@ -19,3 +19,6 @@ from datetime import datetime
|
||||
|
||||
# no args unqualified
|
||||
datetime(2000, 1, 1, 0, 0, 0)
|
||||
|
||||
# uses `astimezone` method
|
||||
datetime(2000, 1, 1, 0, 0, 0).astimezone()
|
||||
|
||||
@@ -7,3 +7,6 @@ from datetime import datetime
|
||||
|
||||
# unqualified
|
||||
datetime.today()
|
||||
|
||||
# uses `astimezone` method
|
||||
datetime.today().astimezone()
|
||||
|
||||
@@ -7,3 +7,6 @@ from datetime import datetime
|
||||
|
||||
# unqualified
|
||||
datetime.utcnow()
|
||||
|
||||
# uses `astimezone` method
|
||||
datetime.utcnow().astimezone()
|
||||
|
||||
@@ -7,3 +7,6 @@ from datetime import datetime
|
||||
|
||||
# unqualified
|
||||
datetime.utcfromtimestamp(1234)
|
||||
|
||||
# uses `astimezone` method
|
||||
datetime.utcfromtimestamp(1234).astimezone()
|
||||
|
||||
@@ -16,3 +16,6 @@ from datetime import datetime
|
||||
|
||||
# no args unqualified
|
||||
datetime.now()
|
||||
|
||||
# uses `astimezone` method
|
||||
datetime.now().astimezone()
|
||||
|
||||
@@ -16,3 +16,6 @@ from datetime import datetime
|
||||
|
||||
# no args unqualified
|
||||
datetime.fromtimestamp(1234)
|
||||
|
||||
# uses `astimezone` method
|
||||
datetime.fromtimestamp(1234).astimezone()
|
||||
|
||||
@@ -5,15 +5,18 @@ import matplotlib.pyplot # unconventional
|
||||
import numpy # unconventional
|
||||
import pandas # unconventional
|
||||
import seaborn # unconventional
|
||||
import tkinter # unconventional
|
||||
|
||||
import altair as altr # unconventional
|
||||
import matplotlib.pyplot as plot # unconventional
|
||||
import numpy as nmp # unconventional
|
||||
import pandas as pdas # unconventional
|
||||
import seaborn as sbrn # unconventional
|
||||
import tkinter as tkr # unconventional
|
||||
|
||||
import altair as alt # conventional
|
||||
import matplotlib.pyplot as plt # conventional
|
||||
import numpy as np # conventional
|
||||
import pandas as pd # conventional
|
||||
import seaborn as sns # conventional
|
||||
import tkinter as tk # conventional
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import typing
|
||||
|
||||
# Shouldn't affect non-union field types.
|
||||
field1: str
|
||||
|
||||
@@ -30,3 +32,45 @@ field10: (str | int) | str # PYI016: Duplicate union member `str`
|
||||
|
||||
# Should emit for nested unions.
|
||||
field11: dict[int | int, str]
|
||||
|
||||
# Should emit for unions with more than two cases
|
||||
field12: int | int | int # Error
|
||||
field13: int | int | int | int # Error
|
||||
|
||||
# Should emit for unions with more than two cases, even if not directly adjacent
|
||||
field14: int | int | str | int # Error
|
||||
|
||||
# Should emit for duplicate literal types; also covered by PYI030
|
||||
field15: typing.Literal[1] | typing.Literal[1] # Error
|
||||
|
||||
# Shouldn't emit if in new parent type
|
||||
field16: int | dict[int, str] # OK
|
||||
|
||||
# Shouldn't emit if not in a union parent
|
||||
field17: dict[int, int] # OK
|
||||
|
||||
# Should emit in cases with newlines
|
||||
field18: typing.Union[
|
||||
set[
|
||||
int # foo
|
||||
],
|
||||
set[
|
||||
int # bar
|
||||
],
|
||||
] # Error, newline and comment will not be emitted in message
|
||||
|
||||
# Should emit in cases with `typing.Union` instead of `|`
|
||||
field19: typing.Union[int, int] # Error
|
||||
|
||||
# Should emit in cases with nested `typing.Union`
|
||||
field20: typing.Union[int, typing.Union[int, str]] # Error
|
||||
|
||||
# Should emit in cases with mixed `typing.Union` and `|`
|
||||
field21: typing.Union[int, int | str] # Error
|
||||
|
||||
# Should emit only once in cases with multiple nested `typing.Union`
|
||||
field22: typing.Union[int, typing.Union[int, typing.Union[int, int]]] # Error
|
||||
|
||||
# Should emit in cases with newlines
|
||||
field23: set[ # foo
|
||||
int] | set[int]
|
||||
|
||||
24
crates/ruff/resources/test/fixtures/flake8_pyi/PYI030.py
vendored
Normal file
24
crates/ruff/resources/test/fixtures/flake8_pyi/PYI030.py
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
from typing import Literal
|
||||
# Shouldn't emit for any cases in the non-stub file for compatibility with flake8-pyi.
|
||||
# Note that this rule could be applied here in the future.
|
||||
|
||||
field1: Literal[1] # OK
|
||||
field2: Literal[1] | Literal[2] # OK
|
||||
|
||||
def func1(arg1: Literal[1] | Literal[2]): # OK
|
||||
print(arg1)
|
||||
|
||||
|
||||
def func2() -> Literal[1] | Literal[2]: # OK
|
||||
return "my Literal[1]ing"
|
||||
|
||||
|
||||
field3: Literal[1] | Literal[2] | str # OK
|
||||
field4: str | Literal[1] | Literal[2] # OK
|
||||
field5: Literal[1] | str | Literal[2] # OK
|
||||
field6: Literal[1] | bool | Literal[2] | str # OK
|
||||
field7 = Literal[1] | Literal[2] # OK
|
||||
field8: Literal[1] | (Literal[2] | str) # OK
|
||||
field9: Literal[1] | (Literal[2] | str) # OK
|
||||
field10: (Literal[1] | str) | Literal[2] # OK
|
||||
field11: dict[Literal[1] | Literal[2], str] # OK
|
||||
86
crates/ruff/resources/test/fixtures/flake8_pyi/PYI030.pyi
vendored
Normal file
86
crates/ruff/resources/test/fixtures/flake8_pyi/PYI030.pyi
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
import typing
|
||||
import typing_extensions
|
||||
from typing import Literal
|
||||
|
||||
# Shouldn't affect non-union field types.
|
||||
field1: Literal[1] # OK
|
||||
|
||||
# Should emit for duplicate field types.
|
||||
field2: Literal[1] | Literal[2] # Error
|
||||
|
||||
# Should emit for union types in arguments.
|
||||
def func1(arg1: Literal[1] | Literal[2]): # Error
|
||||
print(arg1)
|
||||
|
||||
|
||||
# Should emit for unions in return types.
|
||||
def func2() -> Literal[1] | Literal[2]: # Error
|
||||
return "my Literal[1]ing"
|
||||
|
||||
|
||||
# Should emit in longer unions, even if not directly adjacent.
|
||||
field3: Literal[1] | Literal[2] | str # Error
|
||||
field4: str | Literal[1] | Literal[2] # Error
|
||||
field5: Literal[1] | str | Literal[2] # Error
|
||||
field6: Literal[1] | bool | Literal[2] | str # Error
|
||||
|
||||
# Should emit for non-type unions.
|
||||
field7 = Literal[1] | Literal[2] # Error
|
||||
|
||||
# Should emit for parenthesized unions.
|
||||
field8: Literal[1] | (Literal[2] | str) # Error
|
||||
|
||||
# Should handle user parentheses when fixing.
|
||||
field9: Literal[1] | (Literal[2] | str) # Error
|
||||
field10: (Literal[1] | str) | Literal[2] # Error
|
||||
|
||||
# Should emit for union in generic parent type.
|
||||
field11: dict[Literal[1] | Literal[2], str] # Error
|
||||
|
||||
# Should emit for unions with more than two cases
|
||||
field12: Literal[1] | Literal[2] | Literal[3] # Error
|
||||
field13: Literal[1] | Literal[2] | Literal[3] | Literal[4] # Error
|
||||
|
||||
# Should emit for unions with more than two cases, even if not directly adjacent
|
||||
field14: Literal[1] | Literal[2] | str | Literal[3] # Error
|
||||
|
||||
# Should emit for unions with mixed literal internal types
|
||||
field15: Literal[1] | Literal["foo"] | Literal[True] # Error
|
||||
|
||||
# Shouldn't emit for duplicate field types with same value; covered by Y016
|
||||
field16: Literal[1] | Literal[1] # OK
|
||||
|
||||
# Shouldn't emit if in new parent type
|
||||
field17: Literal[1] | dict[Literal[2], str] # OK
|
||||
|
||||
# Shouldn't emit if not in a union parent
|
||||
field18: dict[Literal[1], Literal[2]] # OK
|
||||
|
||||
# Should respect name of literal type used
|
||||
field19: typing.Literal[1] | typing.Literal[2] # Error
|
||||
|
||||
# Should emit in cases with newlines
|
||||
field20: typing.Union[
|
||||
Literal[
|
||||
1 # test
|
||||
],
|
||||
Literal[2],
|
||||
] # Error, newline and comment will not be emitted in message
|
||||
|
||||
# Should handle multiple unions with multiple members
|
||||
field21: Literal[1, 2] | Literal[3, 4] # Error
|
||||
|
||||
# Should emit in cases with `typing.Union` instead of `|`
|
||||
field22: typing.Union[Literal[1], Literal[2]] # Error
|
||||
|
||||
# Should emit in cases with `typing_extensions.Literal`
|
||||
field23: typing_extensions.Literal[1] | typing_extensions.Literal[2] # Error
|
||||
|
||||
# Should emit in cases with nested `typing.Union`
|
||||
field24: typing.Union[Literal[1], typing.Union[Literal[2], str]] # Error
|
||||
|
||||
# Should emit in cases with mixed `typing.Union` and `|`
|
||||
field25: typing.Union[Literal[1], Literal[2] | str] # Error
|
||||
|
||||
# Should emit only once in cases with multiple nested `typing.Union`
|
||||
field24: typing.Union[Literal[1], typing.Union[Literal[2], typing.Union[Literal[3], Literal[4]]]] # Error
|
||||
75
crates/ruff/resources/test/fixtures/flake8_pyi/PYI036.py
vendored
Normal file
75
crates/ruff/resources/test/fixtures/flake8_pyi/PYI036.py
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
import builtins
|
||||
import types
|
||||
import typing
|
||||
from collections.abc import Awaitable
|
||||
from types import TracebackType
|
||||
from typing import Any, Type
|
||||
|
||||
import _typeshed
|
||||
import typing_extensions
|
||||
from _typeshed import Unused
|
||||
|
||||
class GoodOne:
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
async def __aexit__(self, *args) -> str: ...
|
||||
|
||||
class GoodTwo:
|
||||
def __exit__(self, typ: type[builtins.BaseException] | None, *args: builtins.object) -> bool | None: ...
|
||||
async def __aexit__(self, /, typ: Type[BaseException] | None, *args: object, **kwargs) -> bool: ...
|
||||
|
||||
class GoodThree:
|
||||
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: object) -> None: ...
|
||||
async def __aexit__(self, typ: typing_extensions.Type[BaseException] | None, __exc: BaseException | None, *args: object) -> None: ...
|
||||
|
||||
class GoodFour:
|
||||
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, *args: list[None]) -> None: ...
|
||||
|
||||
class GoodFive:
|
||||
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: int, **kwargs: str) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> Awaitable[None]: ...
|
||||
|
||||
class GoodSix:
|
||||
def __exit__(self, typ: object, exc: builtins.object, tb: object) -> None: ...
|
||||
async def __aexit__(self, typ: object, exc: object, tb: builtins.object) -> None: ...
|
||||
|
||||
class GoodSeven:
|
||||
def __exit__(self, *args: Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: Type[BaseException] | None, *args: _typeshed.Unused) -> Awaitable[None]: ...
|
||||
|
||||
class GoodEight:
|
||||
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: _typeshed.Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
||||
|
||||
class GoodNine:
|
||||
def __exit__(self, __typ: typing.Union[typing.Type[BaseException] , None], exc: typing.Union[BaseException , None], *args: _typeshed.Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: typing.Union[typing.Type[BaseException], None], exc: typing.Union[BaseException , None], tb: typing.Union[TracebackType , None], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
||||
|
||||
class GoodTen:
|
||||
def __exit__(self, __typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], *args: _typeshed.Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], tb: typing.Optional[TracebackType], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
||||
|
||||
|
||||
class BadOne:
|
||||
def __exit__(self, *args: Any) -> None: ... # PYI036: Bad star-args annotation
|
||||
async def __aexit__(self) -> None: ... # PYI036: Missing args
|
||||
|
||||
class BadTwo:
|
||||
def __exit__(self, typ, exc, tb, weird_extra_arg) -> None: ... # PYI036: Extra arg must have default
|
||||
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ...# PYI036: Extra arg must have default
|
||||
|
||||
class BadThree:
|
||||
def __exit__(self, typ: type[BaseException], exc: BaseException | None, tb: TracebackType | None) -> None: ... # PYI036: First arg has bad annotation
|
||||
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException, __tb: TracebackType) -> bool | None: ... # PYI036: Second arg has bad annotation
|
||||
|
||||
class BadFour:
|
||||
def __exit__(self, typ: typing.Optional[type[BaseException]], exc: typing.Union[BaseException, None], tb: TracebackType) -> None: ... # PYI036: Third arg has bad annotation
|
||||
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException | None, __tb: typing.Union[TracebackType, None, int]) -> bool | None: ... # PYI036: Third arg has bad annotation
|
||||
|
||||
class BadFive:
|
||||
def __exit__(self, typ: BaseException | None, *args: list[str]) -> bool: ... # PYI036: Bad star-args annotation
|
||||
async def __aexit__(self, /, typ: type[BaseException] | None, *args: Any) -> Awaitable[None]: ... # PYI036: Bad star-args annotation
|
||||
|
||||
class BadSix:
|
||||
def __exit__(self, typ, exc, tb, weird_extra_arg, extra_arg2 = None) -> None: ... # PYI036: Extra arg must have default
|
||||
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ... # PYI036: kwargs must have default
|
||||
75
crates/ruff/resources/test/fixtures/flake8_pyi/PYI036.pyi
vendored
Normal file
75
crates/ruff/resources/test/fixtures/flake8_pyi/PYI036.pyi
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
import builtins
|
||||
import types
|
||||
import typing
|
||||
from collections.abc import Awaitable
|
||||
from types import TracebackType
|
||||
from typing import Any, Type
|
||||
|
||||
import _typeshed
|
||||
import typing_extensions
|
||||
from _typeshed import Unused
|
||||
|
||||
class GoodOne:
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
async def __aexit__(self, *args) -> str: ...
|
||||
|
||||
class GoodTwo:
|
||||
def __exit__(self, typ: type[builtins.BaseException] | None, *args: builtins.object) -> bool | None: ...
|
||||
async def __aexit__(self, /, typ: Type[BaseException] | None, *args: object, **kwargs) -> bool: ...
|
||||
|
||||
class GoodThree:
|
||||
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: object) -> None: ...
|
||||
async def __aexit__(self, typ: typing_extensions.Type[BaseException] | None, __exc: BaseException | None, *args: object) -> None: ...
|
||||
|
||||
class GoodFour:
|
||||
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, *args: list[None]) -> None: ...
|
||||
|
||||
class GoodFive:
|
||||
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: int, **kwargs: str) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> Awaitable[None]: ...
|
||||
|
||||
class GoodSix:
|
||||
def __exit__(self, typ: object, exc: builtins.object, tb: object) -> None: ...
|
||||
async def __aexit__(self, typ: object, exc: object, tb: builtins.object) -> None: ...
|
||||
|
||||
class GoodSeven:
|
||||
def __exit__(self, *args: Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: Type[BaseException] | None, *args: _typeshed.Unused) -> Awaitable[None]: ...
|
||||
|
||||
class GoodEight:
|
||||
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: _typeshed.Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
||||
|
||||
class GoodNine:
|
||||
def __exit__(self, __typ: typing.Union[typing.Type[BaseException] , None], exc: typing.Union[BaseException , None], *args: _typeshed.Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: typing.Union[typing.Type[BaseException], None], exc: typing.Union[BaseException , None], tb: typing.Union[TracebackType , None], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
||||
|
||||
class GoodTen:
|
||||
def __exit__(self, __typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], *args: _typeshed.Unused) -> bool: ...
|
||||
async def __aexit__(self, typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], tb: typing.Optional[TracebackType], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
|
||||
|
||||
|
||||
class BadOne:
|
||||
def __exit__(self, *args: Any) -> None: ... # PYI036: Bad star-args annotation
|
||||
async def __aexit__(self) -> None: ... # PYI036: Missing args
|
||||
|
||||
class BadTwo:
|
||||
def __exit__(self, typ, exc, tb, weird_extra_arg) -> None: ... # PYI036: Extra arg must have default
|
||||
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg1, weird_extra_arg2) -> None: ...# PYI036: kwargs must have default
|
||||
|
||||
class BadThree:
|
||||
def __exit__(self, typ: type[BaseException], exc: BaseException | None, tb: TracebackType | None) -> None: ... # PYI036: First arg has bad annotation
|
||||
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException, __tb: TracebackType) -> bool | None: ... # PYI036: Second arg has bad annotation
|
||||
|
||||
class BadFour:
|
||||
def __exit__(self, typ: typing.Optional[type[BaseException]], exc: typing.Union[BaseException, None], tb: TracebackType) -> None: ... # PYI036: Third arg has bad annotation
|
||||
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException | None, __tb: typing.Union[TracebackType, None, int]) -> bool | None: ... # PYI036: Third arg has bad annotation
|
||||
|
||||
class BadFive:
|
||||
def __exit__(self, typ: BaseException | None, *args: list[str]) -> bool: ... # PYI036: Bad star-args annotation
|
||||
async def __aexit__(self, /, typ: type[BaseException] | None, *args: Any) -> Awaitable[None]: ... # PYI036: Bad star-args annotation
|
||||
|
||||
class BadSix:
|
||||
def __exit__(self, typ, exc, tb, weird_extra_arg, extra_arg2 = None) -> None: ... # PYI036: Extra arg must have default
|
||||
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ... # PYI036: kwargs must have default
|
||||
47
crates/ruff/resources/test/fixtures/flake8_pyi/PYI041.py
vendored
Normal file
47
crates/ruff/resources/test/fixtures/flake8_pyi/PYI041.py
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import (
|
||||
TypeAlias,
|
||||
)
|
||||
|
||||
TA0: TypeAlias = int
|
||||
TA1: TypeAlias = int | float | bool
|
||||
TA2: TypeAlias = Union[int, float, bool]
|
||||
|
||||
|
||||
def good1(arg: int) -> int | bool:
|
||||
...
|
||||
|
||||
|
||||
def good2(arg: int, arg2: int | bool) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f0(arg1: float | int) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f1(arg1: float, *, arg2: float | list[str] | type[bool] | complex) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f2(arg1: int, /, arg2: int | int | float) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f3(arg1: int, *args: Union[int | int | float]) -> None:
|
||||
...
|
||||
|
||||
|
||||
async def f4(**kwargs: int | int | float) -> None:
|
||||
...
|
||||
|
||||
|
||||
class Foo:
|
||||
def good(self, arg: int) -> None:
|
||||
...
|
||||
|
||||
def bad(self, arg: int | float | complex) -> None:
|
||||
...
|
||||
39
crates/ruff/resources/test/fixtures/flake8_pyi/PYI041.pyi
vendored
Normal file
39
crates/ruff/resources/test/fixtures/flake8_pyi/PYI041.pyi
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from typing_extensions import (
|
||||
TypeAlias,
|
||||
)
|
||||
|
||||
# Type aliases not flagged
|
||||
TA0: TypeAlias = int
|
||||
TA1: TypeAlias = int | float | bool
|
||||
TA2: TypeAlias = Union[int, float, bool]
|
||||
|
||||
|
||||
def good1(arg: int) -> int | bool: ...
|
||||
|
||||
|
||||
def good2(arg: int, arg2: int | bool) -> None: ...
|
||||
|
||||
|
||||
def f0(arg1: float | int) -> None: ... # PYI041
|
||||
|
||||
|
||||
def f1(arg1: float, *, arg2: float | list[str] | type[bool] | complex) -> None: ... # PYI041
|
||||
|
||||
|
||||
def f2(arg1: int, /, arg2: int | int | float) -> None: ... # PYI041
|
||||
|
||||
|
||||
def f3(arg1: int, *args: Union[int | int | float]) -> None: ... # PYI041
|
||||
|
||||
|
||||
async def f4(**kwargs: int | int | float) -> None: ... # PYI041
|
||||
|
||||
|
||||
class Foo:
|
||||
def good(self, arg: int) -> None: ...
|
||||
|
||||
def bad(self, arg: int | float | complex) -> None: ... # PYI041
|
||||
@@ -29,6 +29,26 @@ raise TypeError(
|
||||
# Hello, world!
|
||||
)
|
||||
|
||||
# OK
|
||||
raise AssertionError
|
||||
|
||||
# OK
|
||||
raise AttributeError("test message")
|
||||
|
||||
|
||||
def return_error():
|
||||
return ValueError("Something")
|
||||
|
||||
|
||||
# OK
|
||||
raise return_error()
|
||||
|
||||
|
||||
class Class:
|
||||
@staticmethod
|
||||
def error():
|
||||
return ValueError("Something")
|
||||
|
||||
|
||||
# OK
|
||||
raise Class.error()
|
||||
|
||||
@@ -4,3 +4,10 @@ class Bad(str): # SLOT000
|
||||
|
||||
class Good(str): # Ok
|
||||
__slots__ = ["foo"]
|
||||
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Fine(str, Enum): # Ok
|
||||
__slots__ = ["foo"]
|
||||
|
||||
9
crates/ruff/resources/test/fixtures/isort/case_sensitive.py
vendored
Normal file
9
crates/ruff/resources/test/fixtures/isort/case_sensitive.py
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import A
|
||||
import B
|
||||
import b
|
||||
import C
|
||||
import d
|
||||
import E
|
||||
import f
|
||||
from g import a, B, c
|
||||
from h import A, b, C
|
||||
@@ -26,3 +26,9 @@ def f():
|
||||
import os # isort:skip
|
||||
import collections
|
||||
import abc
|
||||
|
||||
|
||||
def f():
|
||||
import sys; import os # isort:skip
|
||||
import sys; import os # isort:skip # isort:skip
|
||||
import sys; import os
|
||||
|
||||
@@ -19,3 +19,13 @@ if True:
|
||||
|
||||
import D
|
||||
import B
|
||||
|
||||
|
||||
import e
|
||||
import f
|
||||
|
||||
# isort: split
|
||||
# isort: split
|
||||
|
||||
import d
|
||||
import c
|
||||
|
||||
27
crates/ruff/resources/test/fixtures/pandas_vet/PD101.py
vendored
Normal file
27
crates/ruff/resources/test/fixtures/pandas_vet/PD101.py
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import pandas as pd
|
||||
|
||||
|
||||
data = pd.Series(range(1000))
|
||||
|
||||
# PD101
|
||||
data.nunique() <= 1
|
||||
data.nunique(dropna=True) <= 1
|
||||
data.nunique(dropna=False) <= 1
|
||||
data.nunique() == 1
|
||||
data.nunique(dropna=True) == 1
|
||||
data.nunique(dropna=False) == 1
|
||||
data.nunique() != 1
|
||||
data.nunique(dropna=True) != 1
|
||||
data.nunique(dropna=False) != 1
|
||||
data.nunique() > 1
|
||||
data.dropna().nunique() == 1
|
||||
data[data.notnull()].nunique() == 1
|
||||
|
||||
# No violation of this rule
|
||||
data.nunique() == 0 # empty
|
||||
data.nunique() >= 1 # not-empty
|
||||
data.nunique() < 1 # empty
|
||||
data.nunique() == 2 # not constant
|
||||
data.unique() == 1 # not `nunique`
|
||||
|
||||
{"hello": "world"}.nunique() == 1 # no pd.Series
|
||||
20
crates/ruff/resources/test/fixtures/pandas_vet/pandas_use_of_dot_read_table.py
vendored
Normal file
20
crates/ruff/resources/test/fixtures/pandas_vet/pandas_use_of_dot_read_table.py
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import pandas as pd
|
||||
|
||||
# Errors.
|
||||
df = pd.read_table("data.csv", sep=",")
|
||||
df = pd.read_table("data.csv", sep=",", header=0)
|
||||
filename = "data.csv"
|
||||
df = pd.read_table(filename, sep=",")
|
||||
df = pd.read_table(filename, sep=",", header=0)
|
||||
|
||||
# Non-errors.
|
||||
df = pd.read_csv("data.csv")
|
||||
df = pd.read_table("data.tsv")
|
||||
df = pd.read_table("data.tsv", sep="\t")
|
||||
df = pd.read_table("data.tsv", sep=",,")
|
||||
df = pd.read_table("data.tsv", sep=", ")
|
||||
df = pd.read_table("data.tsv", sep=" ,")
|
||||
df = pd.read_table("data.tsv", sep=" , ")
|
||||
not_pd.read_table("data.csv", sep=",")
|
||||
data = read_table("data.csv", sep=",")
|
||||
data = read_table
|
||||
@@ -1,71 +1,101 @@
|
||||
some_dict = {"a": 12, "b": 32, "c": 44}
|
||||
|
||||
for _, value in some_dict.items(): # PERF102
|
||||
print(value)
|
||||
|
||||
def f():
|
||||
for _, value in some_dict.items(): # PERF102
|
||||
print(value)
|
||||
|
||||
|
||||
for key, _ in some_dict.items(): # PERF102
|
||||
print(key)
|
||||
def f():
|
||||
for key, _ in some_dict.items(): # PERF102
|
||||
print(key)
|
||||
|
||||
|
||||
for weird_arg_name, _ in some_dict.items(): # PERF102
|
||||
print(weird_arg_name)
|
||||
def f():
|
||||
for weird_arg_name, _ in some_dict.items(): # PERF102
|
||||
print(weird_arg_name)
|
||||
|
||||
|
||||
for name, (_, _) in some_dict.items(): # PERF102
|
||||
pass
|
||||
def f():
|
||||
for name, (_, _) in some_dict.items(): # PERF102
|
||||
print(name)
|
||||
|
||||
|
||||
for name, (value1, _) in some_dict.items(): # OK
|
||||
pass
|
||||
def f():
|
||||
for name, (value1, _) in some_dict.items(): # OK
|
||||
print(name, value1)
|
||||
|
||||
|
||||
for (key1, _), (_, _) in some_dict.items(): # PERF102
|
||||
pass
|
||||
def f():
|
||||
for (key1, _), (_, _) in some_dict.items(): # PERF102
|
||||
print(key1)
|
||||
|
||||
|
||||
for (_, (_, _)), (value, _) in some_dict.items(): # PERF102
|
||||
pass
|
||||
def f():
|
||||
for (_, (_, _)), (value, _) in some_dict.items(): # PERF102
|
||||
print(value)
|
||||
|
||||
|
||||
for (_, key2), (value1, _) in some_dict.items(): # OK
|
||||
pass
|
||||
def f():
|
||||
for (_, key2), (value1, _) in some_dict.items(): # OK
|
||||
print(key2, value1)
|
||||
|
||||
|
||||
for ((_, key2), (value1, _)) in some_dict.items(): # OK
|
||||
pass
|
||||
def f():
|
||||
for ((_, key2), (value1, _)) in some_dict.items(): # OK
|
||||
print(key2, value1)
|
||||
|
||||
|
||||
for ((_, key2), (_, _)) in some_dict.items(): # PERF102
|
||||
pass
|
||||
def f():
|
||||
for ((_, key2), (_, _)) in some_dict.items(): # PERF102
|
||||
print(key2)
|
||||
|
||||
|
||||
for (_, _, _, variants), (r_language, _, _, _) in some_dict.items(): # OK
|
||||
pass
|
||||
def f():
|
||||
for (_, _, _, variants), (r_language, _, _, _) in some_dict.items(): # OK
|
||||
print(variants, r_language)
|
||||
|
||||
|
||||
for (_, _, (_, variants)), (_, (_, (r_language, _))) in some_dict.items(): # OK
|
||||
pass
|
||||
def f():
|
||||
for (_, _, (_, variants)), (_, (_, (r_language, _))) in some_dict.items(): # OK
|
||||
print(variants, r_language)
|
||||
|
||||
|
||||
for key, value in some_dict.items(): # OK
|
||||
print(key, value)
|
||||
def f():
|
||||
for key, value in some_dict.items(): # OK
|
||||
print(key, value)
|
||||
|
||||
|
||||
for _, value in some_dict.items(12): # OK
|
||||
print(value)
|
||||
def f():
|
||||
for _, value in some_dict.items(12): # OK
|
||||
print(value)
|
||||
|
||||
|
||||
for key in some_dict.keys(): # OK
|
||||
print(key)
|
||||
def f():
|
||||
for key in some_dict.keys(): # OK
|
||||
print(key)
|
||||
|
||||
|
||||
for value in some_dict.values(): # OK
|
||||
print(value)
|
||||
def f():
|
||||
for value in some_dict.values(): # OK
|
||||
print(value)
|
||||
|
||||
|
||||
for name, (_, _) in (some_function()).items(): # PERF102
|
||||
pass
|
||||
def f():
|
||||
for name, (_, _) in (some_function()).items(): # PERF102
|
||||
print(name)
|
||||
|
||||
for name, (_, _) in (some_function().some_attribute).items(): # PERF102
|
||||
pass
|
||||
|
||||
def f():
|
||||
for name, (_, _) in (some_function().some_attribute).items(): # PERF102
|
||||
print(name)
|
||||
|
||||
|
||||
def f():
|
||||
for name, unused_value in some_dict.items(): # PERF102
|
||||
print(name)
|
||||
|
||||
|
||||
def f():
|
||||
for unused_name, value in some_dict.items(): # PERF102
|
||||
print(value)
|
||||
|
||||
@@ -30,3 +30,18 @@ def f():
|
||||
result = []
|
||||
for i in items:
|
||||
result.append(i) # OK
|
||||
|
||||
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = {}
|
||||
for i in items:
|
||||
result[i].append(i) # OK
|
||||
|
||||
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = []
|
||||
for i in items:
|
||||
if i not in result:
|
||||
result.append(i) # OK
|
||||
|
||||
@@ -17,3 +17,10 @@ def f():
|
||||
result = []
|
||||
for i in items:
|
||||
result.append(i * i) # OK
|
||||
|
||||
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = {}
|
||||
for i in items:
|
||||
result[i].append(i * i) # OK
|
||||
|
||||
@@ -36,3 +36,4 @@ if (True) == TrueElement or x == TrueElement:
|
||||
assert (not foo) in bar
|
||||
assert {"x": not foo} in bar
|
||||
assert [42, not foo] in bar
|
||||
assert not (re.search(r"^.:\\Users\\[^\\]*\\Downloads\\.*") is None)
|
||||
|
||||
@@ -36,3 +36,4 @@ if (True) == TrueElement or x == TrueElement:
|
||||
assert (not foo) in bar
|
||||
assert {"x": not foo} in bar
|
||||
assert [42, not foo] in bar
|
||||
assert not (re.search(r"^.:\\Users\\[^\\]*\\Downloads\\.*") is None)
|
||||
|
||||
@@ -48,3 +48,8 @@ x = {
|
||||
|
||||
x = {"a": 1, "a": 1}
|
||||
x = {"a": 1, "b": 2, "a": 1}
|
||||
|
||||
x = {
|
||||
('a', 'b'): 'asdf',
|
||||
('a', 'b'): 'qwer',
|
||||
}
|
||||
|
||||
@@ -80,3 +80,8 @@ def multiple_assignment():
|
||||
global CONSTANT # [global-statement]
|
||||
CONSTANT = 1
|
||||
CONSTANT = 2
|
||||
|
||||
|
||||
def no_assignment():
|
||||
"""Shouldn't warn"""
|
||||
global CONSTANT
|
||||
|
||||
34
crates/ruff/resources/test/fixtures/pylint/repeated_equality_comparison_target.py
vendored
Normal file
34
crates/ruff/resources/test/fixtures/pylint/repeated_equality_comparison_target.py
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
# Errors.
|
||||
foo == "a" or foo == "b"
|
||||
|
||||
foo != "a" and foo != "b"
|
||||
|
||||
foo == "a" or foo == "b" or foo == "c"
|
||||
|
||||
foo != "a" and foo != "b" and foo != "c"
|
||||
|
||||
foo == a or foo == "b" or foo == 3 # Mixed types.
|
||||
|
||||
# False negatives (the current implementation doesn't support Yoda conditions).
|
||||
"a" == foo or "b" == foo or "c" == foo
|
||||
|
||||
"a" != foo and "b" != foo and "c" != foo
|
||||
|
||||
"a" == foo or foo == "b" or "c" == foo
|
||||
|
||||
# OK
|
||||
foo == "a" and foo == "b" and foo == "c" # `and` mixed with `==`.
|
||||
|
||||
foo != "a" or foo != "b" or foo != "c" # `or` mixed with `!=`.
|
||||
|
||||
foo == a or foo == b() or foo == c # Call expression.
|
||||
|
||||
foo != a or foo() != b or foo != c # Call expression.
|
||||
|
||||
foo in {"a", "b", "c"} # Uses membership test already.
|
||||
|
||||
foo not in {"a", "b", "c"} # Uses membership test already.
|
||||
|
||||
foo == "a" # Single comparison.
|
||||
|
||||
foo != "a" # Single comparison.
|
||||
37
crates/ruff/resources/test/fixtures/pylint/type_bivariance.py
vendored
Normal file
37
crates/ruff/resources/test/fixtures/pylint/type_bivariance.py
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
from typing import ParamSpec, TypeVar
|
||||
|
||||
# Errors.
|
||||
|
||||
T = TypeVar("T", covariant=True, contravariant=True)
|
||||
T = TypeVar(name="T", covariant=True, contravariant=True)
|
||||
|
||||
T = ParamSpec("T", covariant=True, contravariant=True)
|
||||
T = ParamSpec(name="T", covariant=True, contravariant=True)
|
||||
|
||||
# Non-errors.
|
||||
|
||||
T = TypeVar("T")
|
||||
T = TypeVar("T", covariant=False)
|
||||
T = TypeVar("T", contravariant=False)
|
||||
T = TypeVar("T", covariant=False, contravariant=False)
|
||||
T = TypeVar("T", covariant=True)
|
||||
T = TypeVar("T", covariant=True, contravariant=False)
|
||||
T = TypeVar(name="T", covariant=True, contravariant=False)
|
||||
T = TypeVar(name="T", covariant=True)
|
||||
T = TypeVar("T", contravariant=True)
|
||||
T = TypeVar("T", covariant=False, contravariant=True)
|
||||
T = TypeVar(name="T", covariant=False, contravariant=True)
|
||||
T = TypeVar(name="T", contravariant=True)
|
||||
|
||||
T = ParamSpec("T")
|
||||
T = ParamSpec("T", covariant=False)
|
||||
T = ParamSpec("T", contravariant=False)
|
||||
T = ParamSpec("T", covariant=False, contravariant=False)
|
||||
T = ParamSpec("T", covariant=True)
|
||||
T = ParamSpec("T", covariant=True, contravariant=False)
|
||||
T = ParamSpec(name="T", covariant=True, contravariant=False)
|
||||
T = ParamSpec(name="T", covariant=True)
|
||||
T = ParamSpec("T", contravariant=True)
|
||||
T = ParamSpec("T", covariant=False, contravariant=True)
|
||||
T = ParamSpec(name="T", covariant=False, contravariant=True)
|
||||
T = ParamSpec(name="T", contravariant=True)
|
||||
68
crates/ruff/resources/test/fixtures/pylint/type_name_incorrect_variance.py
vendored
Normal file
68
crates/ruff/resources/test/fixtures/pylint/type_name_incorrect_variance.py
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
from typing import ParamSpec, TypeVar
|
||||
|
||||
# Errors.
|
||||
|
||||
T = TypeVar("T", covariant=True)
|
||||
T = TypeVar("T", covariant=True, contravariant=False)
|
||||
T = TypeVar("T", contravariant=True)
|
||||
T = TypeVar("T", covariant=False, contravariant=True)
|
||||
P = ParamSpec("P", covariant=True)
|
||||
P = ParamSpec("P", covariant=True, contravariant=False)
|
||||
P = ParamSpec("P", contravariant=True)
|
||||
P = ParamSpec("P", covariant=False, contravariant=True)
|
||||
|
||||
T_co = TypeVar("T_co")
|
||||
T_co = TypeVar("T_co", covariant=False)
|
||||
T_co = TypeVar("T_co", contravariant=False)
|
||||
T_co = TypeVar("T_co", covariant=False, contravariant=False)
|
||||
T_co = TypeVar("T_co", contravariant=True)
|
||||
T_co = TypeVar("T_co", covariant=False, contravariant=True)
|
||||
P_co = ParamSpec("P_co")
|
||||
P_co = ParamSpec("P_co", covariant=False)
|
||||
P_co = ParamSpec("P_co", contravariant=False)
|
||||
P_co = ParamSpec("P_co", covariant=False, contravariant=False)
|
||||
P_co = ParamSpec("P_co", contravariant=True)
|
||||
P_co = ParamSpec("P_co", covariant=False, contravariant=True)
|
||||
|
||||
T_contra = TypeVar("T_contra")
|
||||
T_contra = TypeVar("T_contra", covariant=False)
|
||||
T_contra = TypeVar("T_contra", contravariant=False)
|
||||
T_contra = TypeVar("T_contra", covariant=False, contravariant=False)
|
||||
T_contra = TypeVar("T_contra", covariant=True)
|
||||
T_contra = TypeVar("T_contra", covariant=True, contravariant=False)
|
||||
P_contra = ParamSpec("P_contra")
|
||||
P_contra = ParamSpec("P_contra", covariant=False)
|
||||
P_contra = ParamSpec("P_contra", contravariant=False)
|
||||
P_contra = ParamSpec("P_contra", covariant=False, contravariant=False)
|
||||
P_contra = ParamSpec("P_contra", covariant=True)
|
||||
P_contra = ParamSpec("P_contra", covariant=True, contravariant=False)
|
||||
|
||||
# Non-errors.
|
||||
|
||||
T = TypeVar("T")
|
||||
T = TypeVar("T", covariant=False)
|
||||
T = TypeVar("T", contravariant=False)
|
||||
T = TypeVar("T", covariant=False, contravariant=False)
|
||||
P = ParamSpec("P")
|
||||
P = ParamSpec("P", covariant=False)
|
||||
P = ParamSpec("P", contravariant=False)
|
||||
P = ParamSpec("P", covariant=False, contravariant=False)
|
||||
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_co = TypeVar("T_co", covariant=True, contravariant=False)
|
||||
P_co = ParamSpec("P_co", covariant=True)
|
||||
P_co = ParamSpec("P_co", covariant=True, contravariant=False)
|
||||
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
T_contra = TypeVar("T_contra", covariant=False, contravariant=True)
|
||||
P_contra = ParamSpec("P_contra", contravariant=True)
|
||||
P_contra = ParamSpec("P_contra", covariant=False, contravariant=True)
|
||||
|
||||
# Bivariate types are errors, but not covered by this check.
|
||||
|
||||
T = TypeVar("T", covariant=True, contravariant=True)
|
||||
P = ParamSpec("P", covariant=True, contravariant=True)
|
||||
T_co = TypeVar("T_co", covariant=True, contravariant=True)
|
||||
P_co = ParamSpec("P_co", covariant=True, contravariant=True)
|
||||
T_contra = TypeVar("T_contra", covariant=True, contravariant=True)
|
||||
P_contra = ParamSpec("P_contra", covariant=True, contravariant=True)
|
||||
56
crates/ruff/resources/test/fixtures/pylint/type_param_name_mismatch.py
vendored
Normal file
56
crates/ruff/resources/test/fixtures/pylint/type_param_name_mismatch.py
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
from typing import TypeVar, ParamSpec, NewType, TypeVarTuple
|
||||
|
||||
# Errors.
|
||||
|
||||
X = TypeVar("T")
|
||||
X = TypeVar(name="T")
|
||||
|
||||
Y = ParamSpec("T")
|
||||
Y = ParamSpec(name="T")
|
||||
|
||||
Z = NewType("T", int)
|
||||
Z = NewType(name="T", tp=int)
|
||||
|
||||
Ws = TypeVarTuple("Ts")
|
||||
Ws = TypeVarTuple(name="Ts")
|
||||
|
||||
# Non-errors.
|
||||
|
||||
T = TypeVar("T")
|
||||
T = TypeVar(name="T")
|
||||
|
||||
T = ParamSpec("T")
|
||||
T = ParamSpec(name="T")
|
||||
|
||||
T = NewType("T", int)
|
||||
T = NewType(name="T", tp=int)
|
||||
|
||||
Ts = TypeVarTuple("Ts")
|
||||
Ts = TypeVarTuple(name="Ts")
|
||||
|
||||
# Errors, but not covered by this rule.
|
||||
|
||||
# Non-string literal name.
|
||||
T = TypeVar(some_str)
|
||||
T = TypeVar(name=some_str)
|
||||
T = TypeVar(1)
|
||||
T = TypeVar(name=1)
|
||||
T = ParamSpec(some_str)
|
||||
T = ParamSpec(name=some_str)
|
||||
T = ParamSpec(1)
|
||||
T = ParamSpec(name=1)
|
||||
T = NewType(some_str, int)
|
||||
T = NewType(name=some_str, tp=int)
|
||||
T = NewType(1, int)
|
||||
T = NewType(name=1, tp=int)
|
||||
Ts = TypeVarTuple(some_str)
|
||||
Ts = TypeVarTuple(name=some_str)
|
||||
Ts = TypeVarTuple(1)
|
||||
Ts = TypeVarTuple(name=1)
|
||||
|
||||
# No names provided.
|
||||
T = TypeVar()
|
||||
T = ParamSpec()
|
||||
T = NewType()
|
||||
T = NewType(tp=int)
|
||||
Ts = TypeVarTuple()
|
||||
@@ -27,6 +27,14 @@ def f(x: typing.Union[(str, int), float]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[(int,)]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[()]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: "Union[str, int, Union[float, bytes]]") -> None:
|
||||
...
|
||||
|
||||
|
||||
@@ -4,23 +4,9 @@ import typing
|
||||
# with complex annotations
|
||||
MyType = NamedTuple("MyType", [("a", int), ("b", tuple[str, ...])])
|
||||
|
||||
# with default values as list
|
||||
MyType = NamedTuple(
|
||||
"MyType",
|
||||
[("a", int), ("b", str), ("c", list[bool])],
|
||||
defaults=["foo", [True]],
|
||||
)
|
||||
|
||||
# with namespace
|
||||
MyType = typing.NamedTuple("MyType", [("a", int), ("b", str)])
|
||||
|
||||
# too many default values (OK)
|
||||
MyType = NamedTuple(
|
||||
"MyType",
|
||||
[("a", int), ("b", str)],
|
||||
defaults=[1, "bar", "baz"],
|
||||
)
|
||||
|
||||
# invalid identifiers (OK)
|
||||
MyType = NamedTuple("MyType", [("x-y", int), ("b", tuple[str, ...])])
|
||||
|
||||
@@ -29,3 +15,10 @@ MyType = typing.NamedTuple("MyType")
|
||||
|
||||
# empty fields
|
||||
MyType = typing.NamedTuple("MyType", [])
|
||||
|
||||
# keywords
|
||||
MyType = typing.NamedTuple("MyType", a=int, b=tuple[str, ...])
|
||||
|
||||
# unfixable
|
||||
MyType = typing.NamedTuple("MyType", [("a", int)], [("b", str)])
|
||||
MyType = typing.NamedTuple("MyType", [("a", int)], b=str)
|
||||
|
||||
@@ -54,6 +54,14 @@ print("foo {} ".format(x))
|
||||
|
||||
'''{[b]}'''.format(a)
|
||||
|
||||
"{}".format(
|
||||
1
|
||||
)
|
||||
|
||||
"123456789 {}".format(
|
||||
1111111111111111111111111111111111111111111111111111111111111111111111111,
|
||||
)
|
||||
|
||||
###
|
||||
# Non-errors
|
||||
###
|
||||
@@ -87,6 +95,9 @@ r'"\N{snowman} {}".format(a)'
|
||||
|
||||
"{a}" "{b}".format(a=1, b=1)
|
||||
|
||||
"123456789 {}".format(
|
||||
11111111111111111111111111111111111111111111111111111111111111111111111111,
|
||||
)
|
||||
|
||||
async def c():
|
||||
return "{}".format(await 3)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"""A mirror of UP037_1.py, with `from __future__ import annotations`."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
108
crates/ruff/resources/test/fixtures/pyupgrade/UP037_1.py
vendored
Normal file
108
crates/ruff/resources/test/fixtures/pyupgrade/UP037_1.py
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
"""A mirror of UP037_0.py, without `from __future__ import annotations`."""
|
||||
|
||||
from typing import (
|
||||
Annotated,
|
||||
Callable,
|
||||
List,
|
||||
Literal,
|
||||
NamedTuple,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
TypedDict,
|
||||
cast,
|
||||
)
|
||||
|
||||
from mypy_extensions import Arg, DefaultArg, DefaultNamedArg, NamedArg, VarArg
|
||||
|
||||
|
||||
def foo(var: "MyClass") -> "MyClass":
|
||||
x: "MyClass"
|
||||
|
||||
|
||||
def foo(*, inplace: "bool"):
|
||||
pass
|
||||
|
||||
|
||||
def foo(*args: "str", **kwargs: "int"):
|
||||
pass
|
||||
|
||||
|
||||
x: Tuple["MyClass"]
|
||||
|
||||
x: Callable[["MyClass"], None]
|
||||
|
||||
|
||||
class Foo(NamedTuple):
|
||||
x: "MyClass"
|
||||
|
||||
|
||||
class D(TypedDict):
|
||||
E: TypedDict("E", foo="int", total=False)
|
||||
|
||||
|
||||
class D(TypedDict):
|
||||
E: TypedDict("E", {"foo": "int"})
|
||||
|
||||
|
||||
x: Annotated["str", "metadata"]
|
||||
|
||||
x: Arg("str", "name")
|
||||
|
||||
x: DefaultArg("str", "name")
|
||||
|
||||
x: NamedArg("str", "name")
|
||||
|
||||
x: DefaultNamedArg("str", "name")
|
||||
|
||||
x: DefaultNamedArg("str", name="name")
|
||||
|
||||
x: VarArg("str")
|
||||
|
||||
x: List[List[List["MyClass"]]]
|
||||
|
||||
x: NamedTuple("X", [("foo", "int"), ("bar", "str")])
|
||||
|
||||
x: NamedTuple("X", fields=[("foo", "int"), ("bar", "str")])
|
||||
|
||||
x: NamedTuple(typename="X", fields=[("foo", "int")])
|
||||
|
||||
X: MyCallable("X")
|
||||
|
||||
|
||||
# OK
|
||||
class D(TypedDict):
|
||||
E: TypedDict("E")
|
||||
|
||||
|
||||
x: Annotated[()]
|
||||
|
||||
x: DefaultNamedArg(name="name", quox="str")
|
||||
|
||||
x: DefaultNamedArg(name="name")
|
||||
|
||||
x: NamedTuple("X", [("foo",), ("bar",)])
|
||||
|
||||
x: NamedTuple("X", ["foo", "bar"])
|
||||
|
||||
x: NamedTuple()
|
||||
|
||||
x: Literal["foo", "bar"]
|
||||
|
||||
x = cast(x, "str")
|
||||
|
||||
|
||||
def foo(x, *args, **kwargs):
|
||||
...
|
||||
|
||||
|
||||
def foo(*, inplace):
|
||||
...
|
||||
|
||||
|
||||
x: Annotated[1:2] = ...
|
||||
|
||||
x = TypeVar("x", "str", "int")
|
||||
|
||||
x = cast("str", x)
|
||||
|
||||
X = List["MyClass"]
|
||||
@@ -6,6 +6,7 @@ from fractions import Fraction
|
||||
from pathlib import Path
|
||||
from typing import ClassVar, NamedTuple
|
||||
|
||||
|
||||
def default_function() -> list[int]:
|
||||
return []
|
||||
|
||||
@@ -25,12 +26,13 @@ class A:
|
||||
fine_timedelta: datetime.timedelta = datetime.timedelta(hours=7)
|
||||
fine_tuple: tuple[int] = tuple([1])
|
||||
fine_regex: re.Pattern = re.compile(r".*")
|
||||
fine_float: float = float('-inf')
|
||||
fine_float: float = float("-inf")
|
||||
fine_int: int = int(12)
|
||||
fine_complex: complex = complex(1, 2)
|
||||
fine_str: str = str("foo")
|
||||
fine_bool: bool = bool("foo")
|
||||
fine_fraction: Fraction = Fraction(1,2)
|
||||
fine_fraction: Fraction = Fraction(1, 2)
|
||||
|
||||
|
||||
DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES = ImmutableType(40)
|
||||
DEFAULT_A_FOR_ALL_DATACLASSES = A([1, 2, 3])
|
||||
@@ -45,3 +47,25 @@ class B:
|
||||
okay_variant: A = DEFAULT_A_FOR_ALL_DATACLASSES
|
||||
|
||||
fine_dataclass_function: list[int] = field(default_factory=list)
|
||||
|
||||
|
||||
class IntConversionDescriptor:
|
||||
def __init__(self, *, default):
|
||||
self._default = default
|
||||
|
||||
def __set_name__(self, owner, name):
|
||||
self._name = "_" + name
|
||||
|
||||
def __get__(self, obj, type):
|
||||
if obj is None:
|
||||
return self._default
|
||||
|
||||
return getattr(obj, self._name, self._default)
|
||||
|
||||
def __set__(self, obj, value):
|
||||
setattr(obj, self._name, int(value))
|
||||
|
||||
|
||||
@dataclass
|
||||
class InventoryItem:
|
||||
quantity_on_hand: IntConversionDescriptor = IntConversionDescriptor(default=100)
|
||||
|
||||
@@ -34,3 +34,7 @@ f"{ascii(bla)}" # OK
|
||||
" intermediary content "
|
||||
f" that flows {repr(obj)} of type {type(obj)}.{additional_message}" # RUF010
|
||||
)
|
||||
|
||||
|
||||
# OK
|
||||
f"{str({})}"
|
||||
|
||||
@@ -48,6 +48,10 @@ def f(arg: typing.Optional[int] = None):
|
||||
# Union
|
||||
|
||||
|
||||
def f(arg: Union[None] = None):
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Union[None, int] = None):
|
||||
pass
|
||||
|
||||
@@ -68,6 +72,10 @@ def f(arg: Union = None): # RUF013
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Union[int] = None): # RUF013
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Union[int, str] = None): # RUF013
|
||||
pass
|
||||
|
||||
@@ -106,10 +114,18 @@ def f(arg: None = None):
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Literal[None] = None):
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Literal[1, 2, None, 3] = None):
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Literal[1] = None): # RUF013
|
||||
pass
|
||||
|
||||
|
||||
def f(arg: Literal[1, "foo"] = None): # RUF013
|
||||
pass
|
||||
|
||||
|
||||
44
crates/ruff/resources/test/fixtures/ruff/RUF015.py
vendored
Normal file
44
crates/ruff/resources/test/fixtures/ruff/RUF015.py
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
x = range(10)
|
||||
|
||||
# RUF015
|
||||
list(x)[0]
|
||||
list(x)[:1]
|
||||
list(x)[:1:1]
|
||||
list(x)[:1:2]
|
||||
tuple(x)[0]
|
||||
tuple(x)[:1]
|
||||
tuple(x)[:1:1]
|
||||
tuple(x)[:1:2]
|
||||
list(i for i in x)[0]
|
||||
list(i for i in x)[:1]
|
||||
list(i for i in x)[:1:1]
|
||||
list(i for i in x)[:1:2]
|
||||
[i for i in x][0]
|
||||
[i for i in x][:1]
|
||||
[i for i in x][:1:1]
|
||||
[i for i in x][:1:2]
|
||||
|
||||
# OK (not indexing (solely) the first element)
|
||||
list(x)
|
||||
list(x)[1]
|
||||
list(x)[-1]
|
||||
list(x)[1:]
|
||||
list(x)[:3:2]
|
||||
list(x)[::2]
|
||||
list(x)[::]
|
||||
[i for i in x]
|
||||
[i for i in x][1]
|
||||
[i for i in x][-1]
|
||||
[i for i in x][1:]
|
||||
[i for i in x][:3:2]
|
||||
[i for i in x][::2]
|
||||
[i for i in x][::]
|
||||
|
||||
# OK (doesn't mirror the underlying list)
|
||||
[i + 1 for i in x][0]
|
||||
[i for i in x if i > 5][0]
|
||||
[(i, i + 1) for i in x][0]
|
||||
|
||||
# OK (multiple generators)
|
||||
y = range(10)
|
||||
[i + j for i in x for j in y][0]
|
||||
115
crates/ruff/resources/test/fixtures/ruff/RUF016.py
vendored
Normal file
115
crates/ruff/resources/test/fixtures/ruff/RUF016.py
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
# Should not emit for valid access with index
|
||||
var = "abc"[0]
|
||||
var = f"abc"[0]
|
||||
var = [1, 2, 3][0]
|
||||
var = (1, 2, 3)[0]
|
||||
var = b"abc"[0]
|
||||
|
||||
# Should not emit for valid access with slice
|
||||
var = "abc"[0:2]
|
||||
var = f"abc"[0:2]
|
||||
var = b"abc"[0:2]
|
||||
var = [1, 2, 3][0:2]
|
||||
var = (1, 2, 3)[0:2]
|
||||
var = [1, 2, 3][None:2]
|
||||
var = [1, 2, 3][0:None]
|
||||
var = [1, 2, 3][:2]
|
||||
var = [1, 2, 3][0:]
|
||||
|
||||
# Should emit for invalid access on strings
|
||||
var = "abc"["x"]
|
||||
var = f"abc"["x"]
|
||||
|
||||
# Should emit for invalid access on bytes
|
||||
var = b"abc"["x"]
|
||||
|
||||
# Should emit for invalid access on lists and tuples
|
||||
var = [1, 2, 3]["x"]
|
||||
var = (1, 2, 3)["x"]
|
||||
|
||||
# Should emit for invalid access on list comprehensions
|
||||
var = [x for x in range(10)]["x"]
|
||||
|
||||
# Should emit for invalid access using tuple
|
||||
var = "abc"[1, 2]
|
||||
|
||||
# Should emit for invalid access using string
|
||||
var = [1, 2]["x"]
|
||||
|
||||
# Should emit for invalid access using float
|
||||
var = [1, 2][0.25]
|
||||
|
||||
# Should emit for invalid access using dict
|
||||
var = [1, 2][{"x": "y"}]
|
||||
|
||||
# Should emit for invalid access using dict comp
|
||||
var = [1, 2][{x: "y" for x in range(2)}]
|
||||
|
||||
# Should emit for invalid access using list
|
||||
var = [1, 2][2, 3]
|
||||
|
||||
# Should emit for invalid access using list comp
|
||||
var = [1, 2][[x for x in range(2)]]
|
||||
|
||||
# Should emit on invalid access using set
|
||||
var = [1, 2][{"x", "y"}]
|
||||
|
||||
# Should emit on invalid access using set comp
|
||||
var = [1, 2][{x for x in range(2)}]
|
||||
|
||||
# Should emit on invalid access using bytes
|
||||
var = [1, 2][b"x"]
|
||||
|
||||
# Should emit for non-integer slice start
|
||||
var = [1, 2, 3]["x":2]
|
||||
var = [1, 2, 3][f"x":2]
|
||||
var = [1, 2, 3][1.2:2]
|
||||
var = [1, 2, 3][{"x"}:2]
|
||||
var = [1, 2, 3][{x for x in range(2)}:2]
|
||||
var = [1, 2, 3][{"x": x for x in range(2)}:2]
|
||||
var = [1, 2, 3][[x for x in range(2)]:2]
|
||||
|
||||
# Should emit for non-integer slice end
|
||||
var = [1, 2, 3][0:"x"]
|
||||
var = [1, 2, 3][0:f"x"]
|
||||
var = [1, 2, 3][0:1.2]
|
||||
var = [1, 2, 3][0:{"x"}]
|
||||
var = [1, 2, 3][0:{x for x in range(2)}]
|
||||
var = [1, 2, 3][0:{"x": x for x in range(2)}]
|
||||
var = [1, 2, 3][0:[x for x in range(2)]]
|
||||
|
||||
# Should emit for non-integer slice step
|
||||
var = [1, 2, 3][0:1:"x"]
|
||||
var = [1, 2, 3][0:1:f"x"]
|
||||
var = [1, 2, 3][0:1:1.2]
|
||||
var = [1, 2, 3][0:1:{"x"}]
|
||||
var = [1, 2, 3][0:1:{x for x in range(2)}]
|
||||
var = [1, 2, 3][0:1:{"x": x for x in range(2)}]
|
||||
var = [1, 2, 3][0:1:[x for x in range(2)]]
|
||||
|
||||
# Should emit for non-integer slice start and end; should emit twice with specific ranges
|
||||
var = [1, 2, 3]["x":"y"]
|
||||
|
||||
# Should emit once for repeated invalid access
|
||||
var = [1, 2, 3]["x"]["y"]["z"]
|
||||
|
||||
# Cannot emit on invalid access using variable in index
|
||||
x = "x"
|
||||
var = "abc"[x]
|
||||
|
||||
# Cannot emit on invalid access using call
|
||||
def func():
|
||||
return 1
|
||||
var = "abc"[func()]
|
||||
|
||||
# Cannot emit on invalid access using a variable in parent
|
||||
x = [1, 2, 3]
|
||||
var = x["y"]
|
||||
|
||||
# Cannot emit for invalid access on byte array
|
||||
var = bytearray(b"abc")["x"]
|
||||
|
||||
# Cannot emit for slice bound using variable
|
||||
x = "x"
|
||||
var = [1, 2, 3][0:x]
|
||||
var = [1, 2, 3][x:1]
|
||||
@@ -62,6 +62,5 @@ def fine():
|
||||
def fine():
|
||||
try:
|
||||
raise ValueError("a doesn't exist")
|
||||
|
||||
except TypeError: # A different exception is caught
|
||||
print("A different exception is caught")
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
use anyhow::{bail, Result};
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use rustpython_parser::ast::{self, ExceptHandler, Expr, Keyword, Ranged, Stmt};
|
||||
use rustpython_parser::{lexer, Mode, Tok};
|
||||
use rustpython_parser::{lexer, Mode};
|
||||
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_python_ast::helpers;
|
||||
@@ -98,7 +98,7 @@ pub(crate) fn remove_argument(
|
||||
// Case 1: there is only one argument.
|
||||
let mut count = 0u32;
|
||||
for (tok, range) in lexer::lex_starts_at(contents, Mode::Module, call_at).flatten() {
|
||||
if matches!(tok, Tok::Lpar) {
|
||||
if tok.is_lpar() {
|
||||
if count == 0 {
|
||||
fix_start = Some(if remove_parentheses {
|
||||
range.start()
|
||||
@@ -109,7 +109,7 @@ pub(crate) fn remove_argument(
|
||||
count = count.saturating_add(1);
|
||||
}
|
||||
|
||||
if matches!(tok, Tok::Rpar) {
|
||||
if tok.is_rpar() {
|
||||
count = count.saturating_sub(1);
|
||||
if count == 0 {
|
||||
fix_end = Some(if remove_parentheses {
|
||||
@@ -131,11 +131,11 @@ pub(crate) fn remove_argument(
|
||||
let mut seen_comma = false;
|
||||
for (tok, range) in lexer::lex_starts_at(contents, Mode::Module, call_at).flatten() {
|
||||
if seen_comma {
|
||||
if matches!(tok, Tok::NonLogicalNewline) {
|
||||
if tok.is_non_logical_newline() {
|
||||
// Also delete any non-logical newlines after the comma.
|
||||
continue;
|
||||
}
|
||||
fix_end = Some(if matches!(tok, Tok::Newline) {
|
||||
fix_end = Some(if tok.is_newline() {
|
||||
range.end()
|
||||
} else {
|
||||
range.start()
|
||||
@@ -145,7 +145,7 @@ pub(crate) fn remove_argument(
|
||||
if range.start() == expr_range.start() {
|
||||
fix_start = Some(range.start());
|
||||
}
|
||||
if fix_start.is_some() && matches!(tok, Tok::Comma) {
|
||||
if fix_start.is_some() && tok.is_comma() {
|
||||
seen_comma = true;
|
||||
}
|
||||
}
|
||||
@@ -157,7 +157,7 @@ pub(crate) fn remove_argument(
|
||||
fix_end = Some(expr_range.end());
|
||||
break;
|
||||
}
|
||||
if matches!(tok, Tok::Comma) {
|
||||
if tok.is_comma() {
|
||||
fix_start = Some(range.start());
|
||||
}
|
||||
}
|
||||
@@ -317,10 +317,10 @@ mod tests {
|
||||
Some(TextSize::from(6))
|
||||
);
|
||||
|
||||
let contents = r#"
|
||||
let contents = r"
|
||||
x = 1 \
|
||||
; y = 1
|
||||
"#
|
||||
"
|
||||
.trim();
|
||||
let program = Suite::parse(contents, "<filename>")?;
|
||||
let stmt = program.first().unwrap();
|
||||
@@ -349,10 +349,10 @@ x = 1 \
|
||||
TextSize::from(6)
|
||||
);
|
||||
|
||||
let contents = r#"
|
||||
let contents = r"
|
||||
x = 1 \
|
||||
; y = 1
|
||||
"#
|
||||
"
|
||||
.trim();
|
||||
let locator = Locator::new(contents);
|
||||
assert_eq!(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,7 @@
|
||||
|
||||
use itertools::Itertools;
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use rustpython_parser::ast::Ranged;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Edit, Fix};
|
||||
use ruff_python_ast::source_code::Locator;
|
||||
@@ -22,7 +23,7 @@ pub(crate) fn check_noqa(
|
||||
settings: &Settings,
|
||||
) -> Vec<usize> {
|
||||
// Identify any codes that are globally exempted (within the current file).
|
||||
let exemption = noqa::file_exemption(locator.contents(), comment_ranges);
|
||||
let exemption = FileExemption::try_extract(locator.contents(), comment_ranges, locator);
|
||||
|
||||
// Extract all `noqa` directives.
|
||||
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, locator);
|
||||
@@ -37,19 +38,19 @@ pub(crate) fn check_noqa(
|
||||
}
|
||||
|
||||
match &exemption {
|
||||
FileExemption::All => {
|
||||
Some(FileExemption::All) => {
|
||||
// If the file is exempted, ignore all diagnostics.
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
FileExemption::Codes(codes) => {
|
||||
Some(FileExemption::Codes(codes)) => {
|
||||
// If the diagnostic is ignored by a global exemption, ignore it.
|
||||
if codes.contains(&diagnostic.kind.rule().noqa_code()) {
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
FileExemption::None => {}
|
||||
None => {}
|
||||
}
|
||||
|
||||
let noqa_offsets = diagnostic
|
||||
@@ -63,15 +64,15 @@ pub(crate) fn check_noqa(
|
||||
if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
|
||||
{
|
||||
let suppressed = match &directive_line.directive {
|
||||
Directive::All(..) => {
|
||||
Directive::All(_) => {
|
||||
directive_line
|
||||
.matches
|
||||
.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
true
|
||||
}
|
||||
Directive::Codes(.., codes, _) => {
|
||||
if noqa::includes(diagnostic.kind.rule(), codes) {
|
||||
Directive::Codes(directive) => {
|
||||
if noqa::includes(diagnostic.kind.rule(), directive.codes()) {
|
||||
directive_line
|
||||
.matches
|
||||
.push(diagnostic.kind.rule().noqa_code());
|
||||
@@ -81,7 +82,6 @@ pub(crate) fn check_noqa(
|
||||
false
|
||||
}
|
||||
}
|
||||
Directive::None => unreachable!(),
|
||||
};
|
||||
|
||||
if suppressed {
|
||||
@@ -95,36 +95,31 @@ pub(crate) fn check_noqa(
|
||||
if analyze_directives && settings.rules.enabled(Rule::UnusedNOQA) {
|
||||
for line in noqa_directives.lines() {
|
||||
match &line.directive {
|
||||
Directive::All(leading_spaces, noqa_range, trailing_spaces) => {
|
||||
Directive::All(directive) => {
|
||||
if line.matches.is_empty() {
|
||||
let mut diagnostic =
|
||||
Diagnostic::new(UnusedNOQA { codes: None }, *noqa_range);
|
||||
Diagnostic::new(UnusedNOQA { codes: None }, directive.range());
|
||||
if settings.rules.should_fix(diagnostic.kind.rule()) {
|
||||
#[allow(deprecated)]
|
||||
diagnostic.set_fix_from_edit(delete_noqa(
|
||||
*leading_spaces,
|
||||
*noqa_range,
|
||||
*trailing_spaces,
|
||||
locator,
|
||||
));
|
||||
diagnostic.set_fix_from_edit(delete_noqa(directive.range(), locator));
|
||||
}
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
Directive::Codes(leading_spaces, range, codes, trailing_spaces) => {
|
||||
Directive::Codes(directive) => {
|
||||
let mut disabled_codes = vec![];
|
||||
let mut unknown_codes = vec![];
|
||||
let mut unmatched_codes = vec![];
|
||||
let mut valid_codes = vec![];
|
||||
let mut self_ignore = false;
|
||||
for code in codes {
|
||||
for code in directive.codes() {
|
||||
let code = get_redirect_target(code).unwrap_or(code);
|
||||
if Rule::UnusedNOQA.noqa_code() == code {
|
||||
self_ignore = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if line.matches.iter().any(|m| *m == code)
|
||||
if line.matches.iter().any(|match_| *match_ == code)
|
||||
|| settings.external.contains(code)
|
||||
{
|
||||
valid_codes.push(code);
|
||||
@@ -166,29 +161,24 @@ pub(crate) fn check_noqa(
|
||||
.collect(),
|
||||
}),
|
||||
},
|
||||
*range,
|
||||
directive.range(),
|
||||
);
|
||||
if settings.rules.should_fix(diagnostic.kind.rule()) {
|
||||
if valid_codes.is_empty() {
|
||||
#[allow(deprecated)]
|
||||
diagnostic.set_fix_from_edit(delete_noqa(
|
||||
*leading_spaces,
|
||||
*range,
|
||||
*trailing_spaces,
|
||||
locator,
|
||||
));
|
||||
diagnostic
|
||||
.set_fix_from_edit(delete_noqa(directive.range(), locator));
|
||||
} else {
|
||||
#[allow(deprecated)]
|
||||
diagnostic.set_fix(Fix::unspecified(Edit::range_replacement(
|
||||
format!("# noqa: {}", valid_codes.join(", ")),
|
||||
*range,
|
||||
directive.range(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
Directive::None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -198,38 +188,46 @@ pub(crate) fn check_noqa(
|
||||
}
|
||||
|
||||
/// Generate a [`Edit`] to delete a `noqa` directive.
|
||||
fn delete_noqa(
|
||||
leading_spaces: TextSize,
|
||||
noqa_range: TextRange,
|
||||
trailing_spaces: TextSize,
|
||||
locator: &Locator,
|
||||
) -> Edit {
|
||||
let line_range = locator.line_range(noqa_range.start());
|
||||
fn delete_noqa(range: TextRange, locator: &Locator) -> Edit {
|
||||
let line_range = locator.line_range(range.start());
|
||||
|
||||
// Compute the leading space.
|
||||
let prefix = locator.slice(TextRange::new(line_range.start(), range.start()));
|
||||
let leading_space = prefix
|
||||
.rfind(|c: char| !c.is_whitespace())
|
||||
.map_or(prefix.len(), |i| prefix.len() - i - 1);
|
||||
let leading_space_len = TextSize::try_from(leading_space).unwrap();
|
||||
|
||||
// Compute the trailing space.
|
||||
let suffix = locator.slice(TextRange::new(range.end(), line_range.end()));
|
||||
let trailing_space = suffix
|
||||
.find(|c: char| !c.is_whitespace())
|
||||
.map_or(suffix.len(), |i| i);
|
||||
let trailing_space_len = TextSize::try_from(trailing_space).unwrap();
|
||||
|
||||
// Ex) `# noqa`
|
||||
if line_range
|
||||
== TextRange::new(
|
||||
noqa_range.start() - leading_spaces,
|
||||
noqa_range.end() + trailing_spaces,
|
||||
range.start() - leading_space_len,
|
||||
range.end() + trailing_space_len,
|
||||
)
|
||||
{
|
||||
let full_line_end = locator.full_line_end(line_range.end());
|
||||
Edit::deletion(line_range.start(), full_line_end)
|
||||
}
|
||||
// Ex) `x = 1 # noqa`
|
||||
else if noqa_range.end() + trailing_spaces == line_range.end() {
|
||||
Edit::deletion(noqa_range.start() - leading_spaces, line_range.end())
|
||||
else if range.end() + trailing_space_len == line_range.end() {
|
||||
Edit::deletion(range.start() - leading_space_len, line_range.end())
|
||||
}
|
||||
// Ex) `x = 1 # noqa # type: ignore`
|
||||
else if locator.contents()[usize::from(noqa_range.end() + trailing_spaces)..].starts_with('#')
|
||||
{
|
||||
Edit::deletion(noqa_range.start(), noqa_range.end() + trailing_spaces)
|
||||
else if locator.contents()[usize::from(range.end() + trailing_space_len)..].starts_with('#') {
|
||||
Edit::deletion(range.start(), range.end() + trailing_space_len)
|
||||
}
|
||||
// Ex) `x = 1 # noqa here`
|
||||
else {
|
||||
Edit::deletion(
|
||||
noqa_range.start() + "# ".text_len(),
|
||||
noqa_range.end() + trailing_spaces,
|
||||
range.start() + "# ".text_len(),
|
||||
range.end() + trailing_space_len,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,9 @@ use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
|
||||
use ruff_python_whitespace::UniversalNewlines;
|
||||
|
||||
use crate::comments::shebang::ShebangDirective;
|
||||
use crate::registry::Rule;
|
||||
use crate::rules::flake8_copyright::rules::missing_copyright_notice;
|
||||
use crate::rules::flake8_executable::helpers::{extract_shebang, ShebangDirective};
|
||||
use crate::rules::flake8_executable::rules::{
|
||||
shebang_missing, shebang_newline, shebang_not_executable, shebang_python, shebang_whitespace,
|
||||
};
|
||||
@@ -87,32 +87,33 @@ pub(crate) fn check_physical_lines(
|
||||
|| enforce_shebang_newline
|
||||
|| enforce_shebang_python
|
||||
{
|
||||
let shebang = extract_shebang(&line);
|
||||
if enforce_shebang_not_executable {
|
||||
if let Some(diagnostic) = shebang_not_executable(path, line.range(), &shebang) {
|
||||
diagnostics.push(diagnostic);
|
||||
if let Some(shebang) = ShebangDirective::try_extract(&line) {
|
||||
has_any_shebang = true;
|
||||
if enforce_shebang_not_executable {
|
||||
if let Some(diagnostic) =
|
||||
shebang_not_executable(path, line.range(), &shebang)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
if enforce_shebang_missing {
|
||||
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(..)) {
|
||||
has_any_shebang = true;
|
||||
if enforce_shebang_whitespace {
|
||||
if let Some(diagnostic) =
|
||||
shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
if enforce_shebang_whitespace {
|
||||
if let Some(diagnostic) =
|
||||
shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
if enforce_shebang_newline {
|
||||
if let Some(diagnostic) =
|
||||
shebang_newline(line.range(), &shebang, index == 0)
|
||||
{
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
if enforce_shebang_newline {
|
||||
if let Some(diagnostic) = shebang_newline(line.range(), &shebang, index == 0) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
if enforce_shebang_python {
|
||||
if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
|
||||
diagnostics.push(diagnostic);
|
||||
if enforce_shebang_python {
|
||||
if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,9 @@
|
||||
use rustpython_parser::lexer::LexResult;
|
||||
use rustpython_parser::Tok;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::source_code::{Indexer, Locator};
|
||||
|
||||
use crate::directives::TodoComment;
|
||||
use crate::lex::docstring_detection::StateMachine;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
@@ -12,8 +15,6 @@ use crate::rules::{
|
||||
flake8_todos, pycodestyle, pylint, pyupgrade, ruff,
|
||||
};
|
||||
use crate::settings::Settings;
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::source_code::{Indexer, Locator};
|
||||
|
||||
pub(crate) fn check_tokens(
|
||||
locator: &Locator,
|
||||
@@ -88,10 +89,11 @@ pub(crate) fn check_tokens(
|
||||
};
|
||||
|
||||
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
|
||||
diagnostics.extend(ruff::rules::ambiguous_unicode_character(
|
||||
ruff::rules::ambiguous_unicode_character(
|
||||
&mut diagnostics,
|
||||
locator,
|
||||
range,
|
||||
if matches!(tok, Tok::String { .. }) {
|
||||
if tok.is_string() {
|
||||
if is_docstring {
|
||||
Context::Docstring
|
||||
} else {
|
||||
@@ -101,93 +103,77 @@ pub(crate) fn check_tokens(
|
||||
Context::Comment
|
||||
},
|
||||
settings,
|
||||
));
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ERA001
|
||||
if enforce_commented_out_code {
|
||||
diagnostics.extend(eradicate::rules::commented_out_code(
|
||||
locator, indexer, settings,
|
||||
));
|
||||
eradicate::rules::commented_out_code(&mut diagnostics, locator, indexer, settings);
|
||||
}
|
||||
|
||||
// W605
|
||||
if enforce_invalid_escape_sequence {
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
if matches!(tok, Tok::String { .. }) {
|
||||
diagnostics.extend(pycodestyle::rules::invalid_escape_sequence(
|
||||
if tok.is_string() {
|
||||
pycodestyle::rules::invalid_escape_sequence(
|
||||
&mut diagnostics,
|
||||
locator,
|
||||
*range,
|
||||
settings.rules.should_fix(Rule::InvalidEscapeSequence),
|
||||
));
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// PLE2510, PLE2512, PLE2513
|
||||
if enforce_invalid_string_character {
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
if matches!(tok, Tok::String { .. }) {
|
||||
diagnostics.extend(
|
||||
pylint::rules::invalid_string_characters(locator, *range)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
);
|
||||
if tok.is_string() {
|
||||
pylint::rules::invalid_string_characters(&mut diagnostics, *range, locator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// E701, E702, E703
|
||||
if enforce_compound_statements {
|
||||
diagnostics.extend(
|
||||
pycodestyle::rules::compound_statements(tokens, locator, indexer, settings)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
pycodestyle::rules::compound_statements(
|
||||
&mut diagnostics,
|
||||
tokens,
|
||||
locator,
|
||||
indexer,
|
||||
settings,
|
||||
);
|
||||
}
|
||||
|
||||
// Q001, Q002, Q003
|
||||
if enforce_quotes {
|
||||
diagnostics.extend(
|
||||
flake8_quotes::rules::from_tokens(tokens, locator, settings)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
);
|
||||
flake8_quotes::rules::from_tokens(&mut diagnostics, tokens, locator, settings);
|
||||
}
|
||||
|
||||
// ISC001, ISC002
|
||||
if enforce_implicit_string_concatenation {
|
||||
diagnostics.extend(
|
||||
flake8_implicit_str_concat::rules::implicit(
|
||||
tokens,
|
||||
&settings.flake8_implicit_str_concat,
|
||||
locator,
|
||||
)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
flake8_implicit_str_concat::rules::implicit(
|
||||
&mut diagnostics,
|
||||
tokens,
|
||||
&settings.flake8_implicit_str_concat,
|
||||
locator,
|
||||
);
|
||||
}
|
||||
|
||||
// COM812, COM818, COM819
|
||||
if enforce_trailing_comma {
|
||||
diagnostics.extend(
|
||||
flake8_commas::rules::trailing_commas(tokens, locator, settings)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
);
|
||||
flake8_commas::rules::trailing_commas(&mut diagnostics, tokens, locator, settings);
|
||||
}
|
||||
|
||||
// UP034
|
||||
if enforce_extraneous_parenthesis {
|
||||
diagnostics.extend(
|
||||
pyupgrade::rules::extraneous_parentheses(tokens, locator, settings).into_iter(),
|
||||
);
|
||||
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens, locator, settings);
|
||||
}
|
||||
|
||||
// PYI033
|
||||
if enforce_type_comment_in_stub && is_stub {
|
||||
diagnostics.extend(flake8_pyi::rules::type_comment_in_stub(locator, indexer));
|
||||
flake8_pyi::rules::type_comment_in_stub(&mut diagnostics, locator, indexer);
|
||||
}
|
||||
|
||||
// TD001, TD002, TD003, TD004, TD005, TD006, TD007
|
||||
@@ -203,18 +189,12 @@ pub(crate) fn check_tokens(
|
||||
})
|
||||
.collect();
|
||||
|
||||
diagnostics.extend(
|
||||
flake8_todos::rules::todos(&todo_comments, locator, indexer, settings)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
);
|
||||
flake8_todos::rules::todos(&mut diagnostics, &todo_comments, locator, indexer, settings);
|
||||
|
||||
diagnostics.extend(
|
||||
flake8_fixme::rules::todos(&todo_comments)
|
||||
.into_iter()
|
||||
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
|
||||
);
|
||||
flake8_fixme::rules::todos(&mut diagnostics, &todo_comments);
|
||||
}
|
||||
|
||||
diagnostics.retain(|diagnostic| settings.rules.enabled(diagnostic.kind.rule()));
|
||||
|
||||
diagnostics
|
||||
}
|
||||
|
||||
@@ -14,6 +14,18 @@ use crate::rules;
|
||||
#[derive(PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct NoqaCode(&'static str, &'static str);
|
||||
|
||||
impl NoqaCode {
|
||||
/// Return the prefix for the [`NoqaCode`], e.g., `SIM` for `SIM101`.
|
||||
pub fn prefix(&self) -> &str {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Return the suffix for the [`NoqaCode`], e.g., `101` for `SIM101`.
|
||||
pub fn suffix(&self) -> &str {
|
||||
self.1
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for NoqaCode {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
std::fmt::Display::fmt(self, f)
|
||||
@@ -156,6 +168,9 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(Pyflakes, "901") => (RuleGroup::Unspecified, rules::pyflakes::rules::RaiseNotImplemented),
|
||||
|
||||
// pylint
|
||||
(Pylint, "C0105") => (RuleGroup::Unspecified, rules::pylint::rules::TypeNameIncorrectVariance),
|
||||
(Pylint, "C0131") => (RuleGroup::Unspecified, rules::pylint::rules::TypeBivariance),
|
||||
(Pylint, "C0132") => (RuleGroup::Unspecified, rules::pylint::rules::TypeParamNameMismatch),
|
||||
(Pylint, "C0205") => (RuleGroup::Unspecified, rules::pylint::rules::SingleStringSlots),
|
||||
(Pylint, "C0414") => (RuleGroup::Unspecified, rules::pylint::rules::UselessImportAlias),
|
||||
(Pylint, "C1901") => (RuleGroup::Nursery, rules::pylint::rules::CompareToEmptyString),
|
||||
@@ -194,6 +209,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(Pylint, "R0915") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyStatements),
|
||||
(Pylint, "R1701") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedIsinstanceCalls),
|
||||
(Pylint, "R1711") => (RuleGroup::Unspecified, rules::pylint::rules::UselessReturn),
|
||||
(Pylint, "R1714") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedEqualityComparisonTarget),
|
||||
(Pylint, "R1722") => (RuleGroup::Unspecified, rules::pylint::rules::SysExitAlias),
|
||||
(Pylint, "R2004") => (RuleGroup::Unspecified, rules::pylint::rules::MagicValueComparison),
|
||||
(Pylint, "R5501") => (RuleGroup::Unspecified, rules::pylint::rules::CollapsibleElseIf),
|
||||
@@ -251,6 +267,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(Flake8Bugbear, "031") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ReuseOfGroupbyGenerator),
|
||||
(Flake8Bugbear, "032") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::UnintentionalTypeAnnotation),
|
||||
(Flake8Bugbear, "033") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::DuplicateValue),
|
||||
(Flake8Bugbear, "034") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ReSubPositionalArgs),
|
||||
(Flake8Bugbear, "904") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::RaiseWithoutFromInsideExcept),
|
||||
(Flake8Bugbear, "905") => (RuleGroup::Unspecified, rules::flake8_bugbear::rules::ZipWithoutExplicitStrict),
|
||||
|
||||
@@ -375,8 +392,8 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(Flake8Simplify, "401") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::IfElseBlockInsteadOfDictGet),
|
||||
(Flake8Simplify, "910") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::DictGetWithNoneDefault),
|
||||
|
||||
// copyright
|
||||
(Copyright, "001") => (RuleGroup::Nursery, rules::flake8_copyright::rules::MissingCopyrightNotice),
|
||||
// flake8-copyright
|
||||
(Flake8Copyright, "001") => (RuleGroup::Nursery, rules::flake8_copyright::rules::MissingCopyrightNotice),
|
||||
|
||||
// pyupgrade
|
||||
(Pyupgrade, "001") => (RuleGroup::Unspecified, rules::pyupgrade::rules::UselessMetaclassType),
|
||||
@@ -587,6 +604,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(PandasVet, "012") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotReadTable),
|
||||
(PandasVet, "013") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfDotStack),
|
||||
(PandasVet, "015") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasUseOfPdMerge),
|
||||
(PandasVet, "101") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasNuniqueConstantSeriesCheck),
|
||||
(PandasVet, "901") => (RuleGroup::Unspecified, rules::pandas_vet::rules::PandasDfVariableName),
|
||||
|
||||
// flake8-errmsg
|
||||
@@ -616,10 +634,13 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(Flake8Pyi, "024") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::CollectionsNamedTuple),
|
||||
(Flake8Pyi, "025") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnaliasedCollectionsAbcSetImport),
|
||||
(Flake8Pyi, "029") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::StrOrReprDefinedInStub),
|
||||
(Flake8Pyi, "030") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnnecessaryLiteralUnion),
|
||||
(Flake8Pyi, "032") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::AnyEqNeAnnotation),
|
||||
(Flake8Pyi, "033") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TypeCommentInStub),
|
||||
(Flake8Pyi, "034") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::NonSelfReturnType),
|
||||
(Flake8Pyi, "035") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::UnassignedSpecialVariableInStub),
|
||||
(Flake8Pyi, "036") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::BadExitAnnotation),
|
||||
(Flake8Pyi, "041") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::RedundantNumericUnion),
|
||||
(Flake8Pyi, "042") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::SnakeCaseTypeAlias),
|
||||
(Flake8Pyi, "043") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::TSuffixedTypeAlias),
|
||||
(Flake8Pyi, "044") => (RuleGroup::Unspecified, rules::flake8_pyi::rules::FutureAnnotationsInStub),
|
||||
@@ -763,6 +784,8 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
|
||||
(Ruff, "013") => (RuleGroup::Unspecified, rules::ruff::rules::ImplicitOptional),
|
||||
#[cfg(feature = "unreachable-code")]
|
||||
(Ruff, "014") => (RuleGroup::Nursery, rules::ruff::rules::UnreachableCode),
|
||||
(Ruff, "015") => (RuleGroup::Unspecified, rules::ruff::rules::UnnecessaryIterableAllocationForFirstElement),
|
||||
(Ruff, "016") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidIndexType),
|
||||
(Ruff, "100") => (RuleGroup::Unspecified, rules::ruff::rules::UnusedNOQA),
|
||||
(Ruff, "200") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidPyprojectToml),
|
||||
|
||||
|
||||
1
crates/ruff/src/comments/mod.rs
Normal file
1
crates/ruff/src/comments/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub(crate) mod shebang;
|
||||
67
crates/ruff/src/comments/shebang.rs
Normal file
67
crates/ruff/src/comments/shebang.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use ruff_python_whitespace::{is_python_whitespace, Cursor};
|
||||
use ruff_text_size::{TextLen, TextSize};
|
||||
|
||||
/// A shebang directive (e.g., `#!/usr/bin/env python3`).
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub(crate) struct ShebangDirective<'a> {
|
||||
/// The offset of the directive contents (e.g., `/usr/bin/env python3`) from the start of the
|
||||
/// line.
|
||||
pub(crate) offset: TextSize,
|
||||
/// The contents of the directive (e.g., `"/usr/bin/env python3"`).
|
||||
pub(crate) contents: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> ShebangDirective<'a> {
|
||||
/// Parse a shebang directive from a line, or return `None` if the line does not contain a
|
||||
/// shebang directive.
|
||||
pub(crate) fn try_extract(line: &'a str) -> Option<Self> {
|
||||
let mut cursor = Cursor::new(line);
|
||||
|
||||
// Trim whitespace.
|
||||
cursor.eat_while(is_python_whitespace);
|
||||
|
||||
// Trim the `#!` prefix.
|
||||
if !cursor.eat_char('#') {
|
||||
return None;
|
||||
}
|
||||
if !cursor.eat_char('!') {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Self {
|
||||
offset: line.text_len() - cursor.text_len(),
|
||||
contents: cursor.chars().as_str(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use insta::assert_debug_snapshot;
|
||||
|
||||
use super::ShebangDirective;
|
||||
|
||||
#[test]
|
||||
fn shebang_non_match() {
|
||||
let source = "not a match";
|
||||
assert_debug_snapshot!(ShebangDirective::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shebang_end_of_line() {
|
||||
let source = "print('test') #!/usr/bin/python";
|
||||
assert_debug_snapshot!(ShebangDirective::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shebang_match() {
|
||||
let source = "#!/usr/bin/env python";
|
||||
assert_debug_snapshot!(ShebangDirective::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shebang_leading_space() {
|
||||
let source = " #!/usr/bin/env python";
|
||||
assert_debug_snapshot!(ShebangDirective::try_extract(source));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
None
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
Some(
|
||||
ShebangDirective {
|
||||
offset: 4,
|
||||
contents: "/usr/bin/env python",
|
||||
},
|
||||
)
|
||||
@@ -0,0 +1,10 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
Some(
|
||||
ShebangDirective {
|
||||
offset: 2,
|
||||
contents: "/usr/bin/env python",
|
||||
},
|
||||
)
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
None
|
||||
@@ -427,22 +427,22 @@ ghi
|
||||
NoqaMapping::from_iter([TextRange::new(TextSize::from(6), TextSize::from(28))])
|
||||
);
|
||||
|
||||
let contents = r#"x = \
|
||||
1"#;
|
||||
let contents = r"x = \
|
||||
1";
|
||||
assert_eq!(
|
||||
noqa_mappings(contents),
|
||||
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(6))])
|
||||
);
|
||||
|
||||
let contents = r#"from foo import \
|
||||
let contents = r"from foo import \
|
||||
bar as baz, \
|
||||
qux as quux"#;
|
||||
qux as quux";
|
||||
assert_eq!(
|
||||
noqa_mappings(contents),
|
||||
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(36))])
|
||||
);
|
||||
|
||||
let contents = r#"
|
||||
let contents = r"
|
||||
# Foo
|
||||
from foo import \
|
||||
bar as baz, \
|
||||
@@ -450,7 +450,7 @@ from foo import \
|
||||
x = \
|
||||
1
|
||||
y = \
|
||||
2"#;
|
||||
2";
|
||||
assert_eq!(
|
||||
noqa_mappings(contents),
|
||||
NoqaMapping::from_iter([
|
||||
|
||||
@@ -14,6 +14,7 @@ pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
mod autofix;
|
||||
mod checkers;
|
||||
mod codes;
|
||||
mod comments;
|
||||
mod cst;
|
||||
pub mod directives;
|
||||
mod doc_lines;
|
||||
@@ -38,6 +39,7 @@ mod rule_selector;
|
||||
pub mod rules;
|
||||
pub mod settings;
|
||||
pub mod source_kind;
|
||||
pub mod upstream_categories;
|
||||
|
||||
#[cfg(any(test, fuzzing))]
|
||||
pub mod test;
|
||||
|
||||
@@ -51,7 +51,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn output() {
|
||||
let mut emitter = AzureEmitter::default();
|
||||
let mut emitter = AzureEmitter;
|
||||
let content = capture_emitter_output(&mut emitter, &create_messages());
|
||||
|
||||
assert_snapshot!(content);
|
||||
|
||||
@@ -66,7 +66,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn output() {
|
||||
let mut emitter = GithubEmitter::default();
|
||||
let mut emitter = GithubEmitter;
|
||||
let content = capture_emitter_output(&mut emitter, &create_messages());
|
||||
|
||||
assert_snapshot!(content);
|
||||
|
||||
@@ -108,7 +108,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn output() {
|
||||
let mut emitter = JsonEmitter::default();
|
||||
let mut emitter = JsonEmitter;
|
||||
let content = capture_emitter_output(&mut emitter, &create_messages());
|
||||
|
||||
assert_snapshot!(content);
|
||||
|
||||
@@ -24,14 +24,14 @@ impl Emitter for JsonLinesEmitter {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::message::json_lines::JsonLinesEmitter;
|
||||
use insta::assert_snapshot;
|
||||
|
||||
use crate::message::json_lines::JsonLinesEmitter;
|
||||
use crate::message::tests::{capture_emitter_output, create_messages};
|
||||
|
||||
#[test]
|
||||
fn output() {
|
||||
let mut emitter = JsonLinesEmitter::default();
|
||||
let mut emitter = JsonLinesEmitter;
|
||||
let content = capture_emitter_output(&mut emitter, &create_messages());
|
||||
|
||||
assert_snapshot!(content);
|
||||
|
||||
@@ -93,7 +93,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn output() {
|
||||
let mut emitter = JunitEmitter::default();
|
||||
let mut emitter = JunitEmitter;
|
||||
let content = capture_emitter_output(&mut emitter, &create_messages());
|
||||
|
||||
assert_snapshot!(content);
|
||||
|
||||
@@ -49,7 +49,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn output() {
|
||||
let mut emitter = PylintEmitter::default();
|
||||
let mut emitter = PylintEmitter;
|
||||
let content = capture_emitter_output(&mut emitter, &create_messages());
|
||||
|
||||
assert_snapshot!(content);
|
||||
|
||||
@@ -1,123 +1,188 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::error::Error;
|
||||
use std::fmt::{Display, Write};
|
||||
use std::fs;
|
||||
use std::ops::Add;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use itertools::Itertools;
|
||||
use log::warn;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use rustpython_parser::ast::Ranged;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::source_code::Locator;
|
||||
use ruff_python_whitespace::{LineEnding, PythonWhitespace};
|
||||
use ruff_python_whitespace::LineEnding;
|
||||
|
||||
use crate::codes::NoqaCode;
|
||||
use crate::registry::{AsRule, Rule, RuleSet};
|
||||
use crate::rule_redirects::get_redirect_target;
|
||||
|
||||
static NOQA_LINE_REGEX: Lazy<Regex> = Lazy::new(|| {
|
||||
Regex::new(
|
||||
r"(?P<leading_spaces>\s*)(?P<noqa>(?i:# noqa)(?::\s?(?P<codes>(?:[A-Z]+[0-9]+)(?:[,\s]+[A-Z]+[0-9]+)*))?)(?P<trailing_spaces>\s*)",
|
||||
)
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
/// A directive to ignore a set of rules for a given line of Python source code (e.g.,
|
||||
/// `# noqa: F401, F841`).
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum Directive<'a> {
|
||||
None,
|
||||
// (leading spaces, noqa_range, trailing_spaces)
|
||||
All(TextSize, TextRange, TextSize),
|
||||
// (leading spaces, start_offset, end_offset, codes, trailing_spaces)
|
||||
Codes(TextSize, TextRange, Vec<&'a str>, TextSize),
|
||||
/// The `noqa` directive ignores all rules (e.g., `# noqa`).
|
||||
All(All),
|
||||
/// The `noqa` directive ignores specific rules (e.g., `# noqa: F401, F841`).
|
||||
Codes(Codes<'a>),
|
||||
}
|
||||
|
||||
/// Extract the noqa `Directive` from a line of Python source code.
|
||||
pub(crate) fn extract_noqa_directive<'a>(range: TextRange, locator: &'a Locator) -> Directive<'a> {
|
||||
let text = &locator.contents()[range];
|
||||
match NOQA_LINE_REGEX.captures(text) {
|
||||
Some(caps) => match (
|
||||
caps.name("leading_spaces"),
|
||||
caps.name("noqa"),
|
||||
caps.name("codes"),
|
||||
caps.name("trailing_spaces"),
|
||||
) {
|
||||
(Some(leading_spaces), Some(noqa), Some(codes), Some(trailing_spaces)) => {
|
||||
let codes = codes
|
||||
.as_str()
|
||||
.split(|c: char| c.is_whitespace() || c == ',')
|
||||
.map(str::trim)
|
||||
.filter(|code| !code.is_empty())
|
||||
.collect_vec();
|
||||
let start = range.start() + TextSize::try_from(noqa.start()).unwrap();
|
||||
if codes.is_empty() {
|
||||
#[allow(deprecated)]
|
||||
let line = locator.compute_line_index(start);
|
||||
warn!("Expected rule codes on `noqa` directive: \"{line}\"");
|
||||
}
|
||||
Directive::Codes(
|
||||
leading_spaces.as_str().text_len(),
|
||||
TextRange::at(start, noqa.as_str().text_len()),
|
||||
codes,
|
||||
trailing_spaces.as_str().text_len(),
|
||||
)
|
||||
impl<'a> Directive<'a> {
|
||||
/// Extract the noqa `Directive` from a line of Python source code.
|
||||
pub(crate) fn try_extract(text: &'a str, offset: TextSize) -> Result<Option<Self>, ParseError> {
|
||||
for (char_index, char) in text.char_indices() {
|
||||
// Only bother checking for the `noqa` literal if the character is `n` or `N`.
|
||||
if !matches!(char, 'n' | 'N') {
|
||||
continue;
|
||||
}
|
||||
|
||||
(Some(leading_spaces), Some(noqa), None, Some(trailing_spaces)) => Directive::All(
|
||||
leading_spaces.as_str().text_len(),
|
||||
TextRange::at(
|
||||
range.start() + TextSize::try_from(noqa.start()).unwrap(),
|
||||
noqa.as_str().text_len(),
|
||||
),
|
||||
trailing_spaces.as_str().text_len(),
|
||||
),
|
||||
_ => Directive::None,
|
||||
},
|
||||
None => Directive::None,
|
||||
}
|
||||
}
|
||||
|
||||
enum ParsedExemption<'a> {
|
||||
None,
|
||||
All,
|
||||
Codes(Vec<&'a str>),
|
||||
}
|
||||
|
||||
/// Return a [`ParsedExemption`] for a given comment line.
|
||||
fn parse_file_exemption(line: &str) -> ParsedExemption {
|
||||
let line = line.trim_whitespace_start();
|
||||
|
||||
if line.starts_with("# flake8: noqa")
|
||||
|| line.starts_with("# flake8: NOQA")
|
||||
|| line.starts_with("# flake8: NoQA")
|
||||
{
|
||||
return ParsedExemption::All;
|
||||
}
|
||||
|
||||
if let Some(remainder) = line
|
||||
.strip_prefix("# ruff: noqa")
|
||||
.or_else(|| line.strip_prefix("# ruff: NOQA"))
|
||||
.or_else(|| line.strip_prefix("# ruff: NoQA"))
|
||||
{
|
||||
if remainder.is_empty() {
|
||||
return ParsedExemption::All;
|
||||
} else if let Some(codes) = remainder.strip_prefix(':') {
|
||||
let codes = codes
|
||||
.split(|c: char| c.is_whitespace() || c == ',')
|
||||
.map(str::trim)
|
||||
.filter(|code| !code.is_empty())
|
||||
.collect_vec();
|
||||
if codes.is_empty() {
|
||||
warn!("Expected rule codes on `noqa` directive: \"{line}\"");
|
||||
// Determine the start of the `noqa` literal.
|
||||
if !matches!(
|
||||
text[char_index..].as_bytes(),
|
||||
[b'n' | b'N', b'o' | b'O', b'q' | b'Q', b'a' | b'A', ..]
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
return ParsedExemption::Codes(codes);
|
||||
|
||||
let noqa_literal_start = char_index;
|
||||
let noqa_literal_end = noqa_literal_start + "noqa".len();
|
||||
|
||||
// Determine the start of the comment.
|
||||
let mut comment_start = noqa_literal_start;
|
||||
|
||||
// Trim any whitespace between the `#` character and the `noqa` literal.
|
||||
comment_start = text[..comment_start].trim_end().len();
|
||||
|
||||
// The next character has to be the `#` character.
|
||||
if text[..comment_start]
|
||||
.chars()
|
||||
.last()
|
||||
.map_or(false, |c| c != '#')
|
||||
{
|
||||
continue;
|
||||
}
|
||||
comment_start -= '#'.len_utf8();
|
||||
|
||||
// If the next character is `:`, then it's a list of codes. Otherwise, it's a directive
|
||||
// to ignore all rules.
|
||||
return Ok(Some(
|
||||
if text[noqa_literal_end..]
|
||||
.chars()
|
||||
.next()
|
||||
.map_or(false, |c| c == ':')
|
||||
{
|
||||
// E.g., `# noqa: F401, F841`.
|
||||
let mut codes_start = noqa_literal_end;
|
||||
|
||||
// Skip the `:` character.
|
||||
codes_start += ':'.len_utf8();
|
||||
|
||||
// Skip any whitespace between the `:` and the codes.
|
||||
codes_start += text[codes_start..]
|
||||
.find(|c: char| !c.is_whitespace())
|
||||
.unwrap_or(0);
|
||||
|
||||
// Extract the comma-separated list of codes.
|
||||
let mut codes = vec![];
|
||||
let mut codes_end = codes_start;
|
||||
let mut leading_space = 0;
|
||||
while let Some(code) = Self::lex_code(&text[codes_end + leading_space..]) {
|
||||
codes.push(code);
|
||||
codes_end += leading_space;
|
||||
codes_end += code.len();
|
||||
|
||||
// Codes can be comma- or whitespace-delimited. Compute the length of the
|
||||
// delimiter, but only add it in the next iteration, once we find the next
|
||||
// code.
|
||||
if let Some(space_between) =
|
||||
text[codes_end..].find(|c: char| !(c.is_whitespace() || c == ','))
|
||||
{
|
||||
leading_space = space_between;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't identify any codes, warn.
|
||||
if codes.is_empty() {
|
||||
return Err(ParseError::MissingCodes);
|
||||
}
|
||||
|
||||
let range = TextRange::new(
|
||||
TextSize::try_from(comment_start).unwrap(),
|
||||
TextSize::try_from(codes_end).unwrap(),
|
||||
);
|
||||
|
||||
Self::Codes(Codes {
|
||||
range: range.add(offset),
|
||||
codes,
|
||||
})
|
||||
} else {
|
||||
// E.g., `# noqa`.
|
||||
let range = TextRange::new(
|
||||
TextSize::try_from(comment_start).unwrap(),
|
||||
TextSize::try_from(noqa_literal_end).unwrap(),
|
||||
);
|
||||
Self::All(All {
|
||||
range: range.add(offset),
|
||||
})
|
||||
},
|
||||
));
|
||||
}
|
||||
warn!("Unexpected suffix on `noqa` directive: \"{line}\"");
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
ParsedExemption::None
|
||||
/// Lex an individual rule code (e.g., `F401`).
|
||||
#[inline]
|
||||
fn lex_code(line: &str) -> Option<&str> {
|
||||
// Extract, e.g., the `F` in `F401`.
|
||||
let prefix = line.chars().take_while(char::is_ascii_uppercase).count();
|
||||
// Extract, e.g., the `401` in `F401`.
|
||||
let suffix = line[prefix..]
|
||||
.chars()
|
||||
.take_while(char::is_ascii_digit)
|
||||
.count();
|
||||
if prefix > 0 && suffix > 0 {
|
||||
Some(&line[..prefix + suffix])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct All {
|
||||
range: TextRange,
|
||||
}
|
||||
|
||||
impl Ranged for All {
|
||||
/// The range of the `noqa` directive.
|
||||
fn range(&self) -> TextRange {
|
||||
self.range
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Codes<'a> {
|
||||
range: TextRange,
|
||||
codes: Vec<&'a str>,
|
||||
}
|
||||
|
||||
impl Codes<'_> {
|
||||
/// The codes that are ignored by the `noqa` directive.
|
||||
pub(crate) fn codes(&self) -> &[&str] {
|
||||
&self.codes
|
||||
}
|
||||
}
|
||||
|
||||
impl Ranged for Codes<'_> {
|
||||
/// The range of the `noqa` directive.
|
||||
fn range(&self) -> TextRange {
|
||||
self.range
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the string list of `codes` includes `code` (or an alias
|
||||
@@ -138,50 +203,230 @@ pub(crate) fn rule_is_ignored(
|
||||
) -> bool {
|
||||
let offset = noqa_line_for.resolve(offset);
|
||||
let line_range = locator.line_range(offset);
|
||||
match extract_noqa_directive(line_range, locator) {
|
||||
Directive::None => false,
|
||||
Directive::All(..) => true,
|
||||
Directive::Codes(.., codes, _) => includes(code, &codes),
|
||||
match Directive::try_extract(locator.slice(line_range), line_range.start()) {
|
||||
Ok(Some(Directive::All(_))) => true,
|
||||
Ok(Some(Directive::Codes(Codes { codes, range: _ }))) => includes(code, &codes),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// The file-level exemptions extracted from a given Python file.
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum FileExemption {
|
||||
None,
|
||||
/// The file is exempt from all rules.
|
||||
All,
|
||||
/// The file is exempt from the given rules.
|
||||
Codes(Vec<NoqaCode>),
|
||||
}
|
||||
|
||||
/// Extract the [`FileExemption`] for a given Python source file, enumerating any rules that are
|
||||
/// globally ignored within the file.
|
||||
pub(crate) fn file_exemption(contents: &str, comment_ranges: &[TextRange]) -> FileExemption {
|
||||
let mut exempt_codes: Vec<NoqaCode> = vec![];
|
||||
impl FileExemption {
|
||||
/// Extract the [`FileExemption`] for a given Python source file, enumerating any rules that are
|
||||
/// globally ignored within the file.
|
||||
pub(crate) fn try_extract(
|
||||
contents: &str,
|
||||
comment_ranges: &[TextRange],
|
||||
locator: &Locator,
|
||||
) -> Option<Self> {
|
||||
let mut exempt_codes: Vec<NoqaCode> = vec![];
|
||||
|
||||
for range in comment_ranges {
|
||||
match parse_file_exemption(&contents[*range]) {
|
||||
ParsedExemption::All => {
|
||||
return FileExemption::All;
|
||||
for range in comment_ranges {
|
||||
match ParsedFileExemption::try_extract(&contents[*range]) {
|
||||
Err(err) => {
|
||||
#[allow(deprecated)]
|
||||
let line = locator.compute_line_index(range.start());
|
||||
warn!("Invalid `# noqa` directive on line {line}: {err}");
|
||||
}
|
||||
Ok(Some(ParsedFileExemption::All)) => {
|
||||
return Some(Self::All);
|
||||
}
|
||||
Ok(Some(ParsedFileExemption::Codes(codes))) => {
|
||||
exempt_codes.extend(codes.into_iter().filter_map(|code| {
|
||||
if let Ok(rule) = Rule::from_code(get_redirect_target(code).unwrap_or(code))
|
||||
{
|
||||
Some(rule.noqa_code())
|
||||
} else {
|
||||
#[allow(deprecated)]
|
||||
let line = locator.compute_line_index(range.start());
|
||||
warn!("Invalid code provided to `# ruff: noqa` on line {line}: {code}");
|
||||
None
|
||||
}
|
||||
}));
|
||||
}
|
||||
Ok(None) => {}
|
||||
}
|
||||
ParsedExemption::Codes(codes) => {
|
||||
exempt_codes.extend(codes.into_iter().filter_map(|code| {
|
||||
if let Ok(rule) = Rule::from_code(get_redirect_target(code).unwrap_or(code)) {
|
||||
Some(rule.noqa_code())
|
||||
} else {
|
||||
warn!("Invalid code provided to `# ruff: noqa`: {}", code);
|
||||
None
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
if exempt_codes.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Self::Codes(exempt_codes))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An individual file-level exemption (e.g., `# ruff: noqa` or `# ruff: noqa: F401, F841`). Like
|
||||
/// [`FileExemption`], but only for a single line, as opposed to an aggregated set of exemptions
|
||||
/// across a source file.
|
||||
#[derive(Debug)]
|
||||
enum ParsedFileExemption<'a> {
|
||||
/// The file-level exemption ignores all rules (e.g., `# ruff: noqa`).
|
||||
All,
|
||||
/// The file-level exemption ignores specific rules (e.g., `# ruff: noqa: F401, F841`).
|
||||
Codes(Vec<&'a str>),
|
||||
}
|
||||
|
||||
impl<'a> ParsedFileExemption<'a> {
|
||||
/// Return a [`ParsedFileExemption`] for a given comment line.
|
||||
fn try_extract(line: &'a str) -> Result<Option<Self>, ParseError> {
|
||||
let line = Self::lex_whitespace(line);
|
||||
let Some(line) = Self::lex_char(line, '#') else {
|
||||
return Ok(None);
|
||||
};
|
||||
let line = Self::lex_whitespace(line);
|
||||
|
||||
let Some(line) = Self::lex_flake8(line).or_else(|| Self::lex_ruff(line)) else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let line = Self::lex_whitespace(line);
|
||||
let Some(line) = Self::lex_char(line, ':') else {
|
||||
return Ok(None);
|
||||
};
|
||||
let line = Self::lex_whitespace(line);
|
||||
let Some(line) = Self::lex_noqa(line) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let line = Self::lex_whitespace(line);
|
||||
|
||||
Ok(Some(if line.is_empty() {
|
||||
// Ex) `# ruff: noqa`
|
||||
Self::All
|
||||
} else {
|
||||
// Ex) `# ruff: noqa: F401, F841`
|
||||
let Some(line) = Self::lex_char(line, ':') else {
|
||||
return Err(ParseError::InvalidSuffix);
|
||||
};
|
||||
let line = Self::lex_whitespace(line);
|
||||
|
||||
// Extract the codes from the line (e.g., `F401, F841`).
|
||||
let mut codes = vec![];
|
||||
let mut line = line;
|
||||
while let Some(code) = Self::lex_code(line) {
|
||||
codes.push(code);
|
||||
line = &line[code.len()..];
|
||||
|
||||
// Codes can be comma- or whitespace-delimited.
|
||||
if let Some(rest) = Self::lex_delimiter(line).map(Self::lex_whitespace) {
|
||||
line = rest;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ParsedExemption::None => {}
|
||||
|
||||
// If we didn't identify any codes, warn.
|
||||
if codes.is_empty() {
|
||||
return Err(ParseError::MissingCodes);
|
||||
}
|
||||
|
||||
Self::Codes(codes)
|
||||
}))
|
||||
}
|
||||
|
||||
/// Lex optional leading whitespace.
|
||||
#[inline]
|
||||
fn lex_whitespace(line: &str) -> &str {
|
||||
line.trim_start()
|
||||
}
|
||||
|
||||
/// Lex a specific character, or return `None` if the character is not the first character in
|
||||
/// the line.
|
||||
#[inline]
|
||||
fn lex_char(line: &str, c: char) -> Option<&str> {
|
||||
let mut chars = line.chars();
|
||||
if chars.next() == Some(c) {
|
||||
Some(chars.as_str())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
if exempt_codes.is_empty() {
|
||||
FileExemption::None
|
||||
} else {
|
||||
FileExemption::Codes(exempt_codes)
|
||||
/// Lex the "flake8" prefix of a `noqa` directive.
|
||||
#[inline]
|
||||
fn lex_flake8(line: &str) -> Option<&str> {
|
||||
line.strip_prefix("flake8")
|
||||
}
|
||||
|
||||
/// Lex the "ruff" prefix of a `noqa` directive.
|
||||
#[inline]
|
||||
fn lex_ruff(line: &str) -> Option<&str> {
|
||||
line.strip_prefix("ruff")
|
||||
}
|
||||
|
||||
/// Lex a `noqa` directive with case-insensitive matching.
|
||||
#[inline]
|
||||
fn lex_noqa(line: &str) -> Option<&str> {
|
||||
match line.as_bytes() {
|
||||
[b'n' | b'N', b'o' | b'O', b'q' | b'Q', b'a' | b'A', ..] => Some(&line["noqa".len()..]),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Lex a code delimiter, which can either be a comma or whitespace.
|
||||
#[inline]
|
||||
fn lex_delimiter(line: &str) -> Option<&str> {
|
||||
let mut chars = line.chars();
|
||||
if let Some(c) = chars.next() {
|
||||
if c == ',' || c.is_whitespace() {
|
||||
Some(chars.as_str())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Lex an individual rule code (e.g., `F401`).
|
||||
#[inline]
|
||||
fn lex_code(line: &str) -> Option<&str> {
|
||||
// Extract, e.g., the `F` in `F401`.
|
||||
let prefix = line.chars().take_while(char::is_ascii_uppercase).count();
|
||||
// Extract, e.g., the `401` in `F401`.
|
||||
let suffix = line[prefix..]
|
||||
.chars()
|
||||
.take_while(char::is_ascii_digit)
|
||||
.count();
|
||||
if prefix > 0 && suffix > 0 {
|
||||
Some(&line[..prefix + suffix])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of an [`Importer::get_or_import_symbol`] call.
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ParseError {
|
||||
/// The `noqa` directive was missing valid codes (e.g., `# noqa: unused-import` instead of `# noqa: F401`).
|
||||
MissingCodes,
|
||||
/// The `noqa` directive used an invalid suffix (e.g., `# noqa; F401` instead of `# noqa: F401`).
|
||||
InvalidSuffix,
|
||||
}
|
||||
|
||||
impl Display for ParseError {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ParseError::MissingCodes => fmt.write_str("expected a comma-separated list of codes (e.g., `# noqa: F401, F841`)."),
|
||||
ParseError::InvalidSuffix => {
|
||||
fmt.write_str("expected `:` followed by a comma-separated list of codes (e.g., `# noqa: F401, F841`).")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ParseError {}
|
||||
|
||||
/// Adds noqa comments to suppress all diagnostics of a file.
|
||||
pub(crate) fn add_noqa(
|
||||
path: &Path,
|
||||
@@ -215,23 +460,23 @@ fn add_noqa_inner(
|
||||
|
||||
// Whether the file is exempted from all checks.
|
||||
// Codes that are globally exempted (within the current file).
|
||||
let exemption = file_exemption(locator.contents(), commented_ranges);
|
||||
let exemption = FileExemption::try_extract(locator.contents(), commented_ranges, locator);
|
||||
let directives = NoqaDirectives::from_commented_ranges(commented_ranges, locator);
|
||||
|
||||
// Mark any non-ignored diagnostics.
|
||||
for diagnostic in diagnostics {
|
||||
match &exemption {
|
||||
FileExemption::All => {
|
||||
Some(FileExemption::All) => {
|
||||
// If the file is exempted, don't add any noqa directives.
|
||||
continue;
|
||||
}
|
||||
FileExemption::Codes(codes) => {
|
||||
Some(FileExemption::Codes(codes)) => {
|
||||
// If the diagnostic is ignored by a global exemption, don't add a noqa directive.
|
||||
if codes.contains(&diagnostic.kind.rule().noqa_code()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
FileExemption::None => {}
|
||||
None => {}
|
||||
}
|
||||
|
||||
// Is the violation ignored by a `noqa` directive on the parent line?
|
||||
@@ -240,28 +485,27 @@ fn add_noqa_inner(
|
||||
directives.find_line_with_directive(noqa_line_for.resolve(parent))
|
||||
{
|
||||
match &directive_line.directive {
|
||||
Directive::All(..) => {
|
||||
Directive::All(_) => {
|
||||
continue;
|
||||
}
|
||||
Directive::Codes(.., codes, _) => {
|
||||
Directive::Codes(Codes { codes, range: _ }) => {
|
||||
if includes(diagnostic.kind.rule(), codes) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Directive::None => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let noqa_offset = noqa_line_for.resolve(diagnostic.start());
|
||||
|
||||
// Or ignored by the directive itself
|
||||
// Or ignored by the directive itself?
|
||||
if let Some(directive_line) = directives.find_line_with_directive(noqa_offset) {
|
||||
match &directive_line.directive {
|
||||
Directive::All(..) => {
|
||||
Directive::All(_) => {
|
||||
continue;
|
||||
}
|
||||
Directive::Codes(.., codes, _) => {
|
||||
Directive::Codes(Codes { codes, range: _ }) => {
|
||||
let rule = diagnostic.kind.rule();
|
||||
if !includes(rule, codes) {
|
||||
matches_by_line
|
||||
@@ -274,7 +518,6 @@ fn add_noqa_inner(
|
||||
}
|
||||
continue;
|
||||
}
|
||||
Directive::None => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,7 +539,7 @@ fn add_noqa_inner(
|
||||
let line = locator.full_line(offset);
|
||||
|
||||
match directive {
|
||||
None | Some(Directive::None) => {
|
||||
None => {
|
||||
// Add existing content.
|
||||
output.push_str(line.trim_end());
|
||||
|
||||
@@ -308,10 +551,10 @@ fn add_noqa_inner(
|
||||
output.push_str(&line_ending);
|
||||
count += 1;
|
||||
}
|
||||
Some(Directive::All(..)) => {
|
||||
Some(Directive::All(_)) => {
|
||||
// Does not get inserted into the map.
|
||||
}
|
||||
Some(Directive::Codes(_, noqa_range, existing, _)) => {
|
||||
Some(Directive::Codes(Codes { range, codes })) => {
|
||||
// Reconstruct the line based on the preserved rule codes.
|
||||
// This enables us to tally the number of edits.
|
||||
let output_start = output.len();
|
||||
@@ -319,7 +562,7 @@ fn add_noqa_inner(
|
||||
// Add existing content.
|
||||
output.push_str(
|
||||
locator
|
||||
.slice(TextRange::new(offset, noqa_range.start()))
|
||||
.slice(TextRange::new(offset, range.start()))
|
||||
.trim_end(),
|
||||
);
|
||||
|
||||
@@ -331,8 +574,8 @@ fn add_noqa_inner(
|
||||
&mut output,
|
||||
rules
|
||||
.iter()
|
||||
.map(|r| r.noqa_code().to_string())
|
||||
.chain(existing.iter().map(ToString::to_string))
|
||||
.map(|rule| rule.noqa_code().to_string())
|
||||
.chain(codes.iter().map(ToString::to_string))
|
||||
.sorted_unstable(),
|
||||
);
|
||||
|
||||
@@ -366,9 +609,11 @@ fn push_codes<I: Display>(str: &mut String, codes: impl Iterator<Item = I>) {
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct NoqaDirectiveLine<'a> {
|
||||
// The range of the text line for which the noqa directive applies.
|
||||
/// The range of the text line for which the noqa directive applies.
|
||||
pub(crate) range: TextRange,
|
||||
/// The noqa directive.
|
||||
pub(crate) directive: Directive<'a>,
|
||||
/// The codes that are ignored by the directive.
|
||||
pub(crate) matches: Vec<NoqaCode>,
|
||||
}
|
||||
|
||||
@@ -384,21 +629,23 @@ impl<'a> NoqaDirectives<'a> {
|
||||
) -> Self {
|
||||
let mut directives = Vec::new();
|
||||
|
||||
for comment_range in comment_ranges {
|
||||
let line_range = locator.line_range(comment_range.start());
|
||||
let directive = match extract_noqa_directive(line_range, locator) {
|
||||
Directive::None => {
|
||||
continue;
|
||||
for range in comment_ranges {
|
||||
match Directive::try_extract(locator.slice(*range), range.start()) {
|
||||
Err(err) => {
|
||||
#[allow(deprecated)]
|
||||
let line = locator.compute_line_index(range.start());
|
||||
warn!("Invalid `# noqa` directive on line {line}: {err}");
|
||||
}
|
||||
directive @ (Directive::All(..) | Directive::Codes(..)) => directive,
|
||||
};
|
||||
|
||||
// noqa comments are guaranteed to be single line.
|
||||
directives.push(NoqaDirectiveLine {
|
||||
range: line_range,
|
||||
directive,
|
||||
matches: Vec::new(),
|
||||
});
|
||||
Ok(Some(directive)) => {
|
||||
// noqa comments are guaranteed to be single line.
|
||||
directives.push(NoqaDirectiveLine {
|
||||
range: locator.line_range(range.start()),
|
||||
directive,
|
||||
matches: Vec::new(),
|
||||
});
|
||||
}
|
||||
Ok(None) => {}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend a mapping at the end of the file to also include the EOF token.
|
||||
@@ -460,7 +707,7 @@ impl NoqaMapping {
|
||||
}
|
||||
|
||||
/// Returns the re-mapped position or `position` if no mapping exists.
|
||||
pub fn resolve(&self, offset: TextSize) -> TextSize {
|
||||
pub(crate) fn resolve(&self, offset: TextSize) -> TextSize {
|
||||
let index = self.ranges.binary_search_by(|range| {
|
||||
if range.end() < offset {
|
||||
std::cmp::Ordering::Less
|
||||
@@ -478,7 +725,7 @@ impl NoqaMapping {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_mapping(&mut self, range: TextRange) {
|
||||
pub(crate) fn push_mapping(&mut self, range: TextRange) {
|
||||
if let Some(last_range) = self.ranges.last_mut() {
|
||||
// Strictly sorted insertion
|
||||
if last_range.end() <= range.start() {
|
||||
@@ -511,28 +758,190 @@ impl FromIterator<TextRange> for NoqaMapping {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use insta::assert_debug_snapshot;
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::source_code::Locator;
|
||||
use ruff_python_whitespace::LineEnding;
|
||||
|
||||
use crate::noqa::{add_noqa_inner, NoqaMapping, NOQA_LINE_REGEX};
|
||||
use crate::noqa::{add_noqa_inner, Directive, NoqaMapping, ParsedFileExemption};
|
||||
use crate::rules::pycodestyle::rules::AmbiguousVariableName;
|
||||
use crate::rules::pyflakes;
|
||||
use crate::rules::pyflakes::rules::UnusedVariable;
|
||||
|
||||
#[test]
|
||||
fn regex() {
|
||||
assert!(NOQA_LINE_REGEX.is_match("# noqa"));
|
||||
assert!(NOQA_LINE_REGEX.is_match("# NoQA"));
|
||||
fn noqa_all() {
|
||||
let source = "# noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
assert!(NOQA_LINE_REGEX.is_match("# noqa: F401"));
|
||||
assert!(NOQA_LINE_REGEX.is_match("# NoQA: F401"));
|
||||
assert!(NOQA_LINE_REGEX.is_match("# noqa: F401, E501"));
|
||||
#[test]
|
||||
fn noqa_code() {
|
||||
let source = "# noqa: F401";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
assert!(NOQA_LINE_REGEX.is_match("# noqa:F401"));
|
||||
assert!(NOQA_LINE_REGEX.is_match("# NoQA:F401"));
|
||||
assert!(NOQA_LINE_REGEX.is_match("# noqa:F401, E501"));
|
||||
#[test]
|
||||
fn noqa_codes() {
|
||||
let source = "# noqa: F401, F841";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_all_case_insensitive() {
|
||||
let source = "# NOQA";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_code_case_insensitive() {
|
||||
let source = "# NOQA: F401";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_codes_case_insensitive() {
|
||||
let source = "# NOQA: F401, F841";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_leading_space() {
|
||||
let source = "# # noqa: F401";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_trailing_space() {
|
||||
let source = "# noqa: F401 #";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_all_no_space() {
|
||||
let source = "#noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_code_no_space() {
|
||||
let source = "#noqa:F401";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_codes_no_space() {
|
||||
let source = "#noqa:F401,F841";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_all_multi_space() {
|
||||
let source = "# noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_code_multi_space() {
|
||||
let source = "# noqa: F401";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_codes_multi_space() {
|
||||
let source = "# noqa: F401, F841";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_all_leading_comment() {
|
||||
let source = "# Some comment describing the noqa # noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_code_leading_comment() {
|
||||
let source = "# Some comment describing the noqa # noqa: F401";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_codes_leading_comment() {
|
||||
let source = "# Some comment describing the noqa # noqa: F401, F841";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_all_trailing_comment() {
|
||||
let source = "# noqa # Some comment describing the noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_code_trailing_comment() {
|
||||
let source = "# noqa: F401 # Some comment describing the noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_codes_trailing_comment() {
|
||||
let source = "# noqa: F401, F841 # Some comment describing the noqa";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn noqa_invalid_codes() {
|
||||
let source = "# noqa: unused-import, F401, some other code";
|
||||
assert_debug_snapshot!(Directive::try_extract(source, TextSize::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flake8_exemption_all() {
|
||||
let source = "# flake8: noqa";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ruff_exemption_all() {
|
||||
let source = "# ruff: noqa";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flake8_exemption_all_no_space() {
|
||||
let source = "#flake8:noqa";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ruff_exemption_all_no_space() {
|
||||
let source = "#ruff:noqa";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flake8_exemption_codes() {
|
||||
// Note: Flake8 doesn't support this; it's treated as a blanket exemption.
|
||||
let source = "# flake8: noqa: F401, F841";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ruff_exemption_codes() {
|
||||
let source = "# ruff: noqa: F401, F841";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flake8_exemption_all_case_insensitive() {
|
||||
let source = "# flake8: NoQa";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ruff_exemption_all_case_insensitive() {
|
||||
let source = "# ruff: NoQa";
|
||||
assert_debug_snapshot!(ParsedFileExemption::try_extract(source));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -550,7 +959,7 @@ mod tests {
|
||||
assert_eq!(output, format!("{contents}"));
|
||||
|
||||
let diagnostics = [Diagnostic::new(
|
||||
pyflakes::rules::UnusedVariable {
|
||||
UnusedVariable {
|
||||
name: "x".to_string(),
|
||||
},
|
||||
TextRange::new(TextSize::from(0), TextSize::from(0)),
|
||||
@@ -574,7 +983,7 @@ mod tests {
|
||||
TextRange::new(TextSize::from(0), TextSize::from(0)),
|
||||
),
|
||||
Diagnostic::new(
|
||||
pyflakes::rules::UnusedVariable {
|
||||
UnusedVariable {
|
||||
name: "x".to_string(),
|
||||
},
|
||||
TextRange::new(TextSize::from(0), TextSize::from(0)),
|
||||
@@ -598,7 +1007,7 @@ mod tests {
|
||||
TextRange::new(TextSize::from(0), TextSize::from(0)),
|
||||
),
|
||||
Diagnostic::new(
|
||||
pyflakes::rules::UnusedVariable {
|
||||
UnusedVariable {
|
||||
name: "x".to_string(),
|
||||
},
|
||||
TextRange::new(TextSize::from(0), TextSize::from(0)),
|
||||
|
||||
@@ -7,7 +7,9 @@ use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::source_code::SourceFile;
|
||||
|
||||
use crate::message::Message;
|
||||
use crate::registry::Rule;
|
||||
use crate::rules::ruff::rules::InvalidPyprojectToml;
|
||||
use crate::settings::Settings;
|
||||
use crate::IOError;
|
||||
|
||||
/// Unlike [`pyproject_toml::PyProjectToml`], in our case `build_system` is also optional
|
||||
@@ -20,9 +22,11 @@ struct PyProjectToml {
|
||||
project: Option<Project>,
|
||||
}
|
||||
|
||||
pub fn lint_pyproject_toml(source_file: SourceFile) -> Result<Vec<Message>> {
|
||||
pub fn lint_pyproject_toml(source_file: SourceFile, settings: &Settings) -> Result<Vec<Message>> {
|
||||
let mut messages = vec![];
|
||||
|
||||
let err = match toml::from_str::<PyProjectToml>(source_file.source_text()) {
|
||||
Ok(_) => return Ok(Vec::default()),
|
||||
Ok(_) => return Ok(messages),
|
||||
Err(err) => err,
|
||||
};
|
||||
|
||||
@@ -32,17 +36,20 @@ pub fn lint_pyproject_toml(source_file: SourceFile) -> Result<Vec<Message>> {
|
||||
None => TextRange::default(),
|
||||
Some(range) => {
|
||||
let Ok(end) = TextSize::try_from(range.end) else {
|
||||
let diagnostic = Diagnostic::new(
|
||||
IOError {
|
||||
message: "pyproject.toml is larger than 4GB".to_string(),
|
||||
},
|
||||
TextRange::default(),
|
||||
);
|
||||
return Ok(vec![Message::from_diagnostic(
|
||||
diagnostic,
|
||||
source_file,
|
||||
TextSize::default(),
|
||||
)]);
|
||||
if settings.rules.enabled(Rule::IOError) {
|
||||
let diagnostic = Diagnostic::new(
|
||||
IOError {
|
||||
message: "pyproject.toml is larger than 4GB".to_string(),
|
||||
},
|
||||
TextRange::default(),
|
||||
);
|
||||
messages.push(Message::from_diagnostic(
|
||||
diagnostic,
|
||||
source_file,
|
||||
TextSize::default(),
|
||||
));
|
||||
}
|
||||
return Ok(messages);
|
||||
};
|
||||
TextRange::new(
|
||||
// start <= end, so if end < 4GB follows start < 4GB
|
||||
@@ -52,11 +59,15 @@ pub fn lint_pyproject_toml(source_file: SourceFile) -> Result<Vec<Message>> {
|
||||
}
|
||||
};
|
||||
|
||||
let toml_err = err.message().to_string();
|
||||
let diagnostic = Diagnostic::new(InvalidPyprojectToml { message: toml_err }, range);
|
||||
Ok(vec![Message::from_diagnostic(
|
||||
diagnostic,
|
||||
source_file,
|
||||
TextSize::default(),
|
||||
)])
|
||||
if settings.rules.enabled(Rule::InvalidPyprojectToml) {
|
||||
let toml_err = err.message().to_string();
|
||||
let diagnostic = Diagnostic::new(InvalidPyprojectToml { message: toml_err }, range);
|
||||
messages.push(Message::from_diagnostic(
|
||||
diagnostic,
|
||||
source_file,
|
||||
TextSize::default(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(messages)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ pub use codes::Rule;
|
||||
use ruff_macros::RuleNamespace;
|
||||
pub use rule_set::{RuleSet, RuleSetIterator};
|
||||
|
||||
use crate::codes::{self, RuleCodePrefix};
|
||||
use crate::codes::{self};
|
||||
|
||||
mod rule_set;
|
||||
|
||||
@@ -18,8 +18,10 @@ pub trait AsRule {
|
||||
impl Rule {
|
||||
pub fn from_code(code: &str) -> Result<Self, FromCodeError> {
|
||||
let (linter, code) = Linter::parse_code(code).ok_or(FromCodeError::Unknown)?;
|
||||
let prefix: RuleCodePrefix = RuleCodePrefix::parse(&linter, code)?;
|
||||
Ok(prefix.rules().next().unwrap())
|
||||
linter
|
||||
.all_rules()
|
||||
.find(|rule| rule.noqa_code().suffix() == code)
|
||||
.ok_or(FromCodeError::Unknown)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,9 +82,9 @@ pub enum Linter {
|
||||
/// [flake8-commas](https://pypi.org/project/flake8-commas/)
|
||||
#[prefix = "COM"]
|
||||
Flake8Commas,
|
||||
/// Copyright-related rules
|
||||
/// [flake8-copyright](https://pypi.org/project/flake8-copyright/)
|
||||
#[prefix = "CPY"]
|
||||
Copyright,
|
||||
Flake8Copyright,
|
||||
/// [flake8-comprehensions](https://pypi.org/project/flake8-comprehensions/)
|
||||
#[prefix = "C4"]
|
||||
Flake8Comprehensions,
|
||||
@@ -110,7 +112,7 @@ pub enum Linter {
|
||||
/// [flake8-import-conventions](https://github.com/joaopalmeiro/flake8-import-conventions)
|
||||
#[prefix = "ICN"]
|
||||
Flake8ImportConventions,
|
||||
/// [flake8-logging-format](https://pypi.org/project/flake8-logging-format/0.9.0/)
|
||||
/// [flake8-logging-format](https://pypi.org/project/flake8-logging-format/)
|
||||
#[prefix = "G"]
|
||||
Flake8LoggingFormat,
|
||||
/// [flake8-no-pep420](https://pypi.org/project/flake8-no-pep420/)
|
||||
@@ -179,7 +181,7 @@ pub enum Linter {
|
||||
/// [Pylint](https://pypi.org/project/pylint/)
|
||||
#[prefix = "PL"]
|
||||
Pylint,
|
||||
/// [tryceratops](https://pypi.org/project/tryceratops/1.1.0/)
|
||||
/// [tryceratops](https://pypi.org/project/tryceratops/)
|
||||
#[prefix = "TRY"]
|
||||
Tryceratops,
|
||||
/// [flynt](https://pypi.org/project/flynt/)
|
||||
@@ -216,30 +218,6 @@ pub trait RuleNamespace: Sized {
|
||||
fn url(&self) -> Option<&'static str>;
|
||||
}
|
||||
|
||||
/// The prefix and name for an upstream linter category.
|
||||
pub struct UpstreamCategory(pub RuleCodePrefix, pub &'static str);
|
||||
|
||||
impl Linter {
|
||||
pub const fn upstream_categories(&self) -> Option<&'static [UpstreamCategory]> {
|
||||
match self {
|
||||
Linter::Pycodestyle => Some(&[
|
||||
UpstreamCategory(RuleCodePrefix::Pycodestyle(codes::Pycodestyle::E), "Error"),
|
||||
UpstreamCategory(
|
||||
RuleCodePrefix::Pycodestyle(codes::Pycodestyle::W),
|
||||
"Warning",
|
||||
),
|
||||
]),
|
||||
Linter::Pylint => Some(&[
|
||||
UpstreamCategory(RuleCodePrefix::Pylint(codes::Pylint::C), "Convention"),
|
||||
UpstreamCategory(RuleCodePrefix::Pylint(codes::Pylint::E), "Error"),
|
||||
UpstreamCategory(RuleCodePrefix::Pylint(codes::Pylint::R), "Refactor"),
|
||||
UpstreamCategory(RuleCodePrefix::Pylint(codes::Pylint::W), "Warning"),
|
||||
]),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(is_macro::Is, Copy, Clone)]
|
||||
pub enum LintSource {
|
||||
Ast,
|
||||
@@ -250,6 +228,7 @@ pub enum LintSource {
|
||||
Imports,
|
||||
Noqa,
|
||||
Filesystem,
|
||||
PyprojectToml,
|
||||
}
|
||||
|
||||
impl Rule {
|
||||
@@ -257,6 +236,7 @@ impl Rule {
|
||||
/// physical lines).
|
||||
pub const fn lint_source(&self) -> LintSource {
|
||||
match self {
|
||||
Rule::InvalidPyprojectToml => LintSource::PyprojectToml,
|
||||
Rule::UnusedNOQA => LintSource::Noqa,
|
||||
Rule::BlanketNOQA
|
||||
| Rule::BlanketTypeIgnore
|
||||
|
||||
@@ -248,8 +248,8 @@ impl Renamer {
|
||||
| BindingKind::LoopVar
|
||||
| BindingKind::Global
|
||||
| BindingKind::Nonlocal(_)
|
||||
| BindingKind::ClassDefinition
|
||||
| BindingKind::FunctionDefinition
|
||||
| BindingKind::ClassDefinition(_)
|
||||
| BindingKind::FunctionDefinition(_)
|
||||
| BindingKind::Deletion
|
||||
| BindingKind::UnboundException(_) => {
|
||||
Some(Edit::range_replacement(target.to_string(), binding.range))
|
||||
|
||||
@@ -51,11 +51,9 @@ pub(crate) fn variable_name_task_id(
|
||||
value: &Expr,
|
||||
) -> Option<Diagnostic> {
|
||||
// If we have more than one target, we can't do anything.
|
||||
if targets.len() != 1 {
|
||||
let [target] = targets else {
|
||||
return None;
|
||||
}
|
||||
|
||||
let target = &targets[0];
|
||||
};
|
||||
let Expr::Name(ast::ExprName { id, .. }) = target else {
|
||||
return None;
|
||||
};
|
||||
|
||||
@@ -48,12 +48,11 @@ fn is_standalone_comment(line: &str) -> bool {
|
||||
|
||||
/// ERA001
|
||||
pub(crate) fn commented_out_code(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
locator: &Locator,
|
||||
indexer: &Indexer,
|
||||
settings: &Settings,
|
||||
) -> Vec<Diagnostic> {
|
||||
let mut diagnostics = vec![];
|
||||
|
||||
) {
|
||||
for range in indexer.comment_ranges() {
|
||||
let line = locator.full_lines(*range);
|
||||
|
||||
@@ -69,6 +68,4 @@ pub(crate) fn commented_out_code(
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
diagnostics
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use rustpython_parser::ast::{ArgWithDefault, Expr, Ranged, Stmt};
|
||||
use rustpython_parser::ast::{self, ArgWithDefault, Constant, Expr, Ranged, Stmt};
|
||||
|
||||
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Fix, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
@@ -6,12 +6,14 @@ use ruff_python_ast::cast;
|
||||
use ruff_python_ast::helpers::ReturnStatementVisitor;
|
||||
use ruff_python_ast::identifier::Identifier;
|
||||
use ruff_python_ast::statement_visitor::StatementVisitor;
|
||||
use ruff_python_ast::typing::parse_type_annotation;
|
||||
use ruff_python_semantic::analyze::visibility;
|
||||
use ruff_python_semantic::{Definition, Member, MemberKind, SemanticModel};
|
||||
use ruff_python_semantic::{Definition, Member, MemberKind};
|
||||
use ruff_python_stdlib::typing::simple_magic_return_type;
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
use crate::rules::ruff::typing::type_hint_resolves_to_any;
|
||||
|
||||
use super::super::fixes;
|
||||
use super::super::helpers::match_function_def;
|
||||
@@ -432,20 +434,46 @@ fn is_none_returning(body: &[Stmt]) -> bool {
|
||||
|
||||
/// ANN401
|
||||
fn check_dynamically_typed<F>(
|
||||
checker: &Checker,
|
||||
annotation: &Expr,
|
||||
func: F,
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
is_overridden: bool,
|
||||
semantic: &SemanticModel,
|
||||
) where
|
||||
F: FnOnce() -> String,
|
||||
{
|
||||
if !is_overridden && semantic.match_typing_expr(annotation, "Any") {
|
||||
diagnostics.push(Diagnostic::new(
|
||||
AnyType { name: func() },
|
||||
annotation.range(),
|
||||
));
|
||||
};
|
||||
if let Expr::Constant(ast::ExprConstant {
|
||||
range,
|
||||
value: Constant::Str(string),
|
||||
..
|
||||
}) = annotation
|
||||
{
|
||||
// Quoted annotations
|
||||
if let Ok((parsed_annotation, _)) = parse_type_annotation(string, *range, checker.locator) {
|
||||
if type_hint_resolves_to_any(
|
||||
&parsed_annotation,
|
||||
checker.semantic(),
|
||||
checker.locator,
|
||||
checker.settings.target_version.minor(),
|
||||
) {
|
||||
diagnostics.push(Diagnostic::new(
|
||||
AnyType { name: func() },
|
||||
annotation.range(),
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if type_hint_resolves_to_any(
|
||||
annotation,
|
||||
checker.semantic(),
|
||||
checker.locator,
|
||||
checker.settings.target_version.minor(),
|
||||
) {
|
||||
diagnostics.push(Diagnostic::new(
|
||||
AnyType { name: func() },
|
||||
annotation.range(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate flake8-annotation checks for a given `Definition`.
|
||||
@@ -500,13 +528,12 @@ pub(crate) fn definition(
|
||||
// ANN401 for dynamically typed arguments
|
||||
if let Some(annotation) = &def.annotation {
|
||||
has_any_typed_arg = true;
|
||||
if checker.enabled(Rule::AnyType) {
|
||||
if checker.enabled(Rule::AnyType) && !is_overridden {
|
||||
check_dynamically_typed(
|
||||
checker,
|
||||
annotation,
|
||||
|| def.arg.to_string(),
|
||||
&mut diagnostics,
|
||||
is_overridden,
|
||||
checker.semantic(),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
@@ -530,15 +557,9 @@ pub(crate) fn definition(
|
||||
if let Some(expr) = &arg.annotation {
|
||||
has_any_typed_arg = true;
|
||||
if !checker.settings.flake8_annotations.allow_star_arg_any {
|
||||
if checker.enabled(Rule::AnyType) {
|
||||
if checker.enabled(Rule::AnyType) && !is_overridden {
|
||||
let name = &arg.arg;
|
||||
check_dynamically_typed(
|
||||
expr,
|
||||
|| format!("*{name}"),
|
||||
&mut diagnostics,
|
||||
is_overridden,
|
||||
checker.semantic(),
|
||||
);
|
||||
check_dynamically_typed(checker, expr, || format!("*{name}"), &mut diagnostics);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -562,14 +583,13 @@ pub(crate) fn definition(
|
||||
if let Some(expr) = &arg.annotation {
|
||||
has_any_typed_arg = true;
|
||||
if !checker.settings.flake8_annotations.allow_star_arg_any {
|
||||
if checker.enabled(Rule::AnyType) {
|
||||
if checker.enabled(Rule::AnyType) && !is_overridden {
|
||||
let name = &arg.arg;
|
||||
check_dynamically_typed(
|
||||
checker,
|
||||
expr,
|
||||
|| format!("**{name}"),
|
||||
&mut diagnostics,
|
||||
is_overridden,
|
||||
checker.semantic(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -629,14 +649,8 @@ pub(crate) fn definition(
|
||||
// ANN201, ANN202, ANN401
|
||||
if let Some(expr) = &returns {
|
||||
has_typed_return = true;
|
||||
if checker.enabled(Rule::AnyType) {
|
||||
check_dynamically_typed(
|
||||
expr,
|
||||
|| name.to_string(),
|
||||
&mut diagnostics,
|
||||
is_overridden,
|
||||
checker.semantic(),
|
||||
);
|
||||
if checker.enabled(Rule::AnyType) && !is_overridden {
|
||||
check_dynamically_typed(checker, expr, || name.to_string(), &mut diagnostics);
|
||||
}
|
||||
} else if !(
|
||||
// Allow omission of return annotation if the function only returns `None`
|
||||
|
||||
@@ -186,4 +186,60 @@ annotation_presence.py:134:13: ANN101 Missing type annotation for `self` in meth
|
||||
135 | pass
|
||||
|
|
||||
|
||||
annotation_presence.py:149:10: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
||||
|
|
||||
148 | # ANN401
|
||||
149 | def f(a: Any | int) -> None: ...
|
||||
| ^^^^^^^^^ ANN401
|
||||
150 | def f(a: int | Any) -> None: ...
|
||||
151 | def f(a: Union[str, bytes, Any]) -> None: ...
|
||||
|
|
||||
|
||||
annotation_presence.py:150:10: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
||||
|
|
||||
148 | # ANN401
|
||||
149 | def f(a: Any | int) -> None: ...
|
||||
150 | def f(a: int | Any) -> None: ...
|
||||
| ^^^^^^^^^ ANN401
|
||||
151 | def f(a: Union[str, bytes, Any]) -> None: ...
|
||||
152 | def f(a: Optional[Any]) -> None: ...
|
||||
|
|
||||
|
||||
annotation_presence.py:151:10: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
||||
|
|
||||
149 | def f(a: Any | int) -> None: ...
|
||||
150 | def f(a: int | Any) -> None: ...
|
||||
151 | def f(a: Union[str, bytes, Any]) -> None: ...
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^ ANN401
|
||||
152 | def f(a: Optional[Any]) -> None: ...
|
||||
153 | def f(a: Annotated[Any, ...]) -> None: ...
|
||||
|
|
||||
|
||||
annotation_presence.py:152:10: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
||||
|
|
||||
150 | def f(a: int | Any) -> None: ...
|
||||
151 | def f(a: Union[str, bytes, Any]) -> None: ...
|
||||
152 | def f(a: Optional[Any]) -> None: ...
|
||||
| ^^^^^^^^^^^^^ ANN401
|
||||
153 | def f(a: Annotated[Any, ...]) -> None: ...
|
||||
154 | def f(a: "Union[str, bytes, Any]") -> None: ...
|
||||
|
|
||||
|
||||
annotation_presence.py:153:10: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
||||
|
|
||||
151 | def f(a: Union[str, bytes, Any]) -> None: ...
|
||||
152 | def f(a: Optional[Any]) -> None: ...
|
||||
153 | def f(a: Annotated[Any, ...]) -> None: ...
|
||||
| ^^^^^^^^^^^^^^^^^^^ ANN401
|
||||
154 | def f(a: "Union[str, bytes, Any]") -> None: ...
|
||||
|
|
||||
|
||||
annotation_presence.py:154:10: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
||||
|
|
||||
152 | def f(a: Optional[Any]) -> None: ...
|
||||
153 | def f(a: Annotated[Any, ...]) -> None: ...
|
||||
154 | def f(a: "Union[str, bytes, Any]") -> None: ...
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^ ANN401
|
||||
|
|
||||
|
||||
|
||||
|
||||
@@ -1,15 +1,39 @@
|
||||
use num_traits::ToPrimitive;
|
||||
use once_cell::sync::Lazy;
|
||||
use rustc_hash::FxHashMap;
|
||||
use rustpython_parser::ast::{self, Constant, Expr, Keyword, Operator, Ranged};
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::call_path::compose_call_path;
|
||||
use ruff_python_ast::call_path::CallPath;
|
||||
use ruff_python_ast::helpers::SimpleCallArgs;
|
||||
use ruff_python_semantic::SemanticModel;
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
|
||||
/// ## What it does
|
||||
/// Checks for files with overly permissive permissions.
|
||||
///
|
||||
/// ## Why is this bad?
|
||||
/// Overly permissive file permissions may allow unintended access and
|
||||
/// arbitrary code execution.
|
||||
///
|
||||
/// ## Example
|
||||
/// ```python
|
||||
/// import os
|
||||
///
|
||||
/// os.chmod("/etc/secrets.txt", 0o666) # rw-rw-rw-
|
||||
/// ```
|
||||
///
|
||||
/// Use instead:
|
||||
/// ```python
|
||||
/// import os
|
||||
///
|
||||
/// os.chmod("/etc/secrets.txt", 0o600) # rw-------
|
||||
/// ```
|
||||
///
|
||||
/// ## References
|
||||
/// - [Python documentation: `os.chmod`](https://docs.python.org/3/library/os.html#os.chmod)
|
||||
/// - [Python documentation: `stat`](https://docs.python.org/3/library/stat.html)
|
||||
/// - [Common Weakness Enumeration: CWE-732](https://cwe.mitre.org/data/definitions/732.html)
|
||||
#[violation]
|
||||
pub struct BadFilePermissions {
|
||||
mask: u16,
|
||||
@@ -19,84 +43,7 @@ impl Violation for BadFilePermissions {
|
||||
#[derive_message_formats]
|
||||
fn message(&self) -> String {
|
||||
let BadFilePermissions { mask } = self;
|
||||
format!("`os.chmod` setting a permissive mask `{mask:#o}` on file or directory",)
|
||||
}
|
||||
}
|
||||
|
||||
const WRITE_WORLD: u16 = 0o2;
|
||||
const EXECUTE_GROUP: u16 = 0o10;
|
||||
|
||||
static PYSTAT_MAPPING: Lazy<FxHashMap<&'static str, u16>> = Lazy::new(|| {
|
||||
FxHashMap::from_iter([
|
||||
("stat.ST_MODE", 0o0),
|
||||
("stat.S_IFDOOR", 0o0),
|
||||
("stat.S_IFPORT", 0o0),
|
||||
("stat.ST_INO", 0o1),
|
||||
("stat.S_IXOTH", 0o1),
|
||||
("stat.UF_NODUMP", 0o1),
|
||||
("stat.ST_DEV", 0o2),
|
||||
("stat.S_IWOTH", 0o2),
|
||||
("stat.UF_IMMUTABLE", 0o2),
|
||||
("stat.ST_NLINK", 0o3),
|
||||
("stat.ST_UID", 0o4),
|
||||
("stat.S_IROTH", 0o4),
|
||||
("stat.UF_APPEND", 0o4),
|
||||
("stat.ST_GID", 0o5),
|
||||
("stat.ST_SIZE", 0o6),
|
||||
("stat.ST_ATIME", 0o7),
|
||||
("stat.S_IRWXO", 0o7),
|
||||
("stat.ST_MTIME", 0o10),
|
||||
("stat.S_IXGRP", 0o10),
|
||||
("stat.UF_OPAQUE", 0o10),
|
||||
("stat.ST_CTIME", 0o11),
|
||||
("stat.S_IWGRP", 0o20),
|
||||
("stat.UF_NOUNLINK", 0o20),
|
||||
("stat.S_IRGRP", 0o40),
|
||||
("stat.UF_COMPRESSED", 0o40),
|
||||
("stat.S_IRWXG", 0o70),
|
||||
("stat.S_IEXEC", 0o100),
|
||||
("stat.S_IXUSR", 0o100),
|
||||
("stat.S_IWRITE", 0o200),
|
||||
("stat.S_IWUSR", 0o200),
|
||||
("stat.S_IREAD", 0o400),
|
||||
("stat.S_IRUSR", 0o400),
|
||||
("stat.S_IRWXU", 0o700),
|
||||
("stat.S_ISVTX", 0o1000),
|
||||
("stat.S_ISGID", 0o2000),
|
||||
("stat.S_ENFMT", 0o2000),
|
||||
("stat.S_ISUID", 0o4000),
|
||||
])
|
||||
});
|
||||
|
||||
fn get_int_value(expr: &Expr) -> Option<u16> {
|
||||
match expr {
|
||||
Expr::Constant(ast::ExprConstant {
|
||||
value: Constant::Int(value),
|
||||
..
|
||||
}) => value.to_u16(),
|
||||
Expr::Attribute(_) => {
|
||||
compose_call_path(expr).and_then(|path| PYSTAT_MAPPING.get(path.as_str()).copied())
|
||||
}
|
||||
Expr::BinOp(ast::ExprBinOp {
|
||||
left,
|
||||
op,
|
||||
right,
|
||||
range: _,
|
||||
}) => {
|
||||
if let (Some(left_value), Some(right_value)) =
|
||||
(get_int_value(left), get_int_value(right))
|
||||
{
|
||||
match op {
|
||||
Operator::BitAnd => Some(left_value & right_value),
|
||||
Operator::BitOr => Some(left_value | right_value),
|
||||
Operator::BitXor => Some(left_value ^ right_value),
|
||||
_ => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
format!("`os.chmod` setting a permissive mask `{mask:#o}` on file or directory")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,7 +63,7 @@ pub(crate) fn bad_file_permissions(
|
||||
{
|
||||
let call_args = SimpleCallArgs::new(args, keywords);
|
||||
if let Some(mode_arg) = call_args.argument("mode", 1) {
|
||||
if let Some(int_value) = get_int_value(mode_arg) {
|
||||
if let Some(int_value) = int_value(mode_arg, checker.semantic()) {
|
||||
if (int_value & WRITE_WORLD > 0) || (int_value & EXECUTE_GROUP > 0) {
|
||||
checker.diagnostics.push(Diagnostic::new(
|
||||
BadFilePermissions { mask: int_value },
|
||||
@@ -127,3 +74,75 @@ pub(crate) fn bad_file_permissions(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const WRITE_WORLD: u16 = 0o2;
|
||||
const EXECUTE_GROUP: u16 = 0o10;
|
||||
|
||||
fn py_stat(call_path: &CallPath) -> Option<u16> {
|
||||
match call_path.as_slice() {
|
||||
["stat", "ST_MODE"] => Some(0o0),
|
||||
["stat", "S_IFDOOR"] => Some(0o0),
|
||||
["stat", "S_IFPORT"] => Some(0o0),
|
||||
["stat", "ST_INO"] => Some(0o1),
|
||||
["stat", "S_IXOTH"] => Some(0o1),
|
||||
["stat", "UF_NODUMP"] => Some(0o1),
|
||||
["stat", "ST_DEV"] => Some(0o2),
|
||||
["stat", "S_IWOTH"] => Some(0o2),
|
||||
["stat", "UF_IMMUTABLE"] => Some(0o2),
|
||||
["stat", "ST_NLINK"] => Some(0o3),
|
||||
["stat", "ST_UID"] => Some(0o4),
|
||||
["stat", "S_IROTH"] => Some(0o4),
|
||||
["stat", "UF_APPEND"] => Some(0o4),
|
||||
["stat", "ST_GID"] => Some(0o5),
|
||||
["stat", "ST_SIZE"] => Some(0o6),
|
||||
["stat", "ST_ATIME"] => Some(0o7),
|
||||
["stat", "S_IRWXO"] => Some(0o7),
|
||||
["stat", "ST_MTIME"] => Some(0o10),
|
||||
["stat", "S_IXGRP"] => Some(0o10),
|
||||
["stat", "UF_OPAQUE"] => Some(0o10),
|
||||
["stat", "ST_CTIME"] => Some(0o11),
|
||||
["stat", "S_IWGRP"] => Some(0o20),
|
||||
["stat", "UF_NOUNLINK"] => Some(0o20),
|
||||
["stat", "S_IRGRP"] => Some(0o40),
|
||||
["stat", "UF_COMPRESSED"] => Some(0o40),
|
||||
["stat", "S_IRWXG"] => Some(0o70),
|
||||
["stat", "S_IEXEC"] => Some(0o100),
|
||||
["stat", "S_IXUSR"] => Some(0o100),
|
||||
["stat", "S_IWRITE"] => Some(0o200),
|
||||
["stat", "S_IWUSR"] => Some(0o200),
|
||||
["stat", "S_IREAD"] => Some(0o400),
|
||||
["stat", "S_IRUSR"] => Some(0o400),
|
||||
["stat", "S_IRWXU"] => Some(0o700),
|
||||
["stat", "S_ISVTX"] => Some(0o1000),
|
||||
["stat", "S_ISGID"] => Some(0o2000),
|
||||
["stat", "S_ENFMT"] => Some(0o2000),
|
||||
["stat", "S_ISUID"] => Some(0o4000),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn int_value(expr: &Expr, model: &SemanticModel) -> Option<u16> {
|
||||
match expr {
|
||||
Expr::Constant(ast::ExprConstant {
|
||||
value: Constant::Int(value),
|
||||
..
|
||||
}) => value.to_u16(),
|
||||
Expr::Attribute(_) => model.resolve_call_path(expr).as_ref().and_then(py_stat),
|
||||
Expr::BinOp(ast::ExprBinOp {
|
||||
left,
|
||||
op,
|
||||
right,
|
||||
range: _,
|
||||
}) => {
|
||||
let left_value = int_value(left, model)?;
|
||||
let right_value = int_value(right, model)?;
|
||||
match op {
|
||||
Operator::BitAnd => Some(left_value & right_value),
|
||||
Operator::BitOr => Some(left_value | right_value),
|
||||
Operator::BitXor => Some(left_value ^ right_value),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,21 @@ use ruff_macros::{derive_message_formats, violation};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
|
||||
/// ## What it does
|
||||
/// Checks for uses of the builtin `exec` function.
|
||||
///
|
||||
/// ## Why is this bad?
|
||||
/// The `exec()` function is insecure as it allows for arbitrary code
|
||||
/// execution.
|
||||
///
|
||||
/// ## Example
|
||||
/// ```python
|
||||
/// exec("print('Hello World')")
|
||||
/// ```
|
||||
///
|
||||
/// ## References
|
||||
/// - [Python documentation: `exec`](https://docs.python.org/3/library/functions.html#exec)
|
||||
/// - [Common Weakness Enumeration: CWE-78](https://cwe.mitre.org/data/definitions/78.html)
|
||||
#[violation]
|
||||
pub struct ExecBuiltin;
|
||||
|
||||
|
||||
@@ -3,6 +3,27 @@ use ruff_text_size::TextRange;
|
||||
use ruff_diagnostics::{Diagnostic, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
|
||||
/// ## What it does
|
||||
/// Checks for hardcoded bindings to all network interfaces (`0.0.0.0`).
|
||||
///
|
||||
/// ## Why is this bad?
|
||||
/// Binding to all network interfaces is insecure as it allows access from
|
||||
/// unintended interfaces, which may be poorly secured or unauthorized.
|
||||
///
|
||||
/// Instead, bind to specific interfaces.
|
||||
///
|
||||
/// ## Example
|
||||
/// ```python
|
||||
/// ALLOWED_HOSTS = ["0.0.0.0"]
|
||||
/// ```
|
||||
///
|
||||
/// Use instead:
|
||||
/// ```python
|
||||
/// ALLOWED_HOSTS = ["127.0.0.1", "localhost"]
|
||||
/// ```
|
||||
///
|
||||
/// ## References
|
||||
/// - [Common Weakness Enumeration: CWE-200](https://cwe.mitre.org/data/definitions/200.html)
|
||||
#[violation]
|
||||
pub struct HardcodedBindAllInterfaces;
|
||||
|
||||
|
||||
@@ -1,11 +1,42 @@
|
||||
use rustpython_parser::ast::{Arg, ArgWithDefault, Arguments, Expr, Ranged};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
use ruff_diagnostics::{Diagnostic, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
|
||||
use super::super::helpers::{matches_password_name, string_literal};
|
||||
|
||||
/// ## What it does
|
||||
/// Checks for potential uses of hardcoded passwords in function argument
|
||||
/// defaults.
|
||||
///
|
||||
/// ## Why is this bad?
|
||||
/// Including a hardcoded password in source code is a security risk, as an
|
||||
/// attacker could discover the password and use it to gain unauthorized
|
||||
/// access.
|
||||
///
|
||||
/// Instead, store passwords and other secrets in configuration files,
|
||||
/// environment variables, or other sources that are excluded from version
|
||||
/// control.
|
||||
///
|
||||
/// ## Example
|
||||
/// ```python
|
||||
/// def connect_to_server(password="hunter2"):
|
||||
/// ...
|
||||
/// ```
|
||||
///
|
||||
/// Use instead:
|
||||
/// ```python
|
||||
/// import os
|
||||
///
|
||||
///
|
||||
/// def connect_to_server(password=os.environ["PASSWORD"]):
|
||||
/// ...
|
||||
/// ```
|
||||
///
|
||||
/// ## References
|
||||
/// - [Common Weakness Enumeration: CWE-259](https://cwe.mitre.org/data/definitions/259.html)
|
||||
#[violation]
|
||||
pub struct HardcodedPasswordDefault {
|
||||
name: String,
|
||||
|
||||
@@ -1,11 +1,38 @@
|
||||
use rustpython_parser::ast::{Keyword, Ranged};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
use ruff_diagnostics::{Diagnostic, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
|
||||
use super::super::helpers::{matches_password_name, string_literal};
|
||||
|
||||
/// ## What it does
|
||||
/// Checks for potential uses of hardcoded passwords in function calls.
|
||||
///
|
||||
/// ## Why is this bad?
|
||||
/// Including a hardcoded password in source code is a security risk, as an
|
||||
/// attacker could discover the password and use it to gain unauthorized
|
||||
/// access.
|
||||
///
|
||||
/// Instead, store passwords and other secrets in configuration files,
|
||||
/// environment variables, or other sources that are excluded from version
|
||||
/// control.
|
||||
///
|
||||
/// ## Example
|
||||
/// ```python
|
||||
/// connect_to_server(password="hunter2")
|
||||
/// ```
|
||||
///
|
||||
/// Use instead:
|
||||
/// ```python
|
||||
/// import os
|
||||
///
|
||||
/// connect_to_server(password=os.environ["PASSWORD"])
|
||||
/// ```
|
||||
///
|
||||
/// ## References
|
||||
/// - [Common Weakness Enumeration: CWE-259](https://cwe.mitre.org/data/definitions/259.html)
|
||||
#[violation]
|
||||
pub struct HardcodedPasswordFuncArg {
|
||||
name: String,
|
||||
|
||||
@@ -7,6 +7,32 @@ use crate::checkers::ast::Checker;
|
||||
|
||||
use super::super::helpers::{matches_password_name, string_literal};
|
||||
|
||||
/// ## What it does
|
||||
/// Checks for potential uses of hardcoded passwords in strings.
|
||||
///
|
||||
/// ## Why is this bad?
|
||||
/// Including a hardcoded password in source code is a security risk, as an
|
||||
/// attacker could discover the password and use it to gain unauthorized
|
||||
/// access.
|
||||
///
|
||||
/// Instead, store passwords and other secrets in configuration files,
|
||||
/// environment variables, or other sources that are excluded from version
|
||||
/// control.
|
||||
///
|
||||
/// ## Example
|
||||
/// ```python
|
||||
/// SECRET_KEY = "hunter2"
|
||||
/// ```
|
||||
///
|
||||
/// Use instead:
|
||||
/// ```python
|
||||
/// import os
|
||||
///
|
||||
/// SECRET_KEY = os.environ["SECRET_KEY"]
|
||||
/// ```
|
||||
///
|
||||
/// ## References
|
||||
/// - [Common Weakness Enumeration: CWE-259](https://cwe.mitre.org/data/definitions/259.html)
|
||||
#[violation]
|
||||
pub struct HardcodedPasswordString {
|
||||
name: String,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user