Compare commits

..

3 Commits

Author SHA1 Message Date
Charlie Marsh
6e97c9438a Merge branch 'main' into github-2923 2023-03-02 15:55:55 -05:00
Charlie Marsh
ea86edf12e Rename, etc. 2023-03-02 15:50:10 -05:00
Martin Kagamino Lehoux
2558384817 Replace tuples with type union in isinstance or issubclass calls (#2923) 2023-03-02 21:07:00 +01:00
1331 changed files with 20203 additions and 37190 deletions

View File

@@ -1,6 +1,5 @@
[alias]
dev = "run --package ruff_dev --bin ruff_dev"
benchmark = "bench -p ruff_benchmark --"
[target.'cfg(all())']
rustflags = [

View File

@@ -1,11 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
day: "monday"
time: "12:00"
timezone: "America/New_York"
commit-message:
prefix: "ci(deps)"

View File

@@ -1,133 +0,0 @@
name: Benchmark
on:
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
jobs:
run-benchmark:
if: github.event_name == 'pull_request'
name: "Run | ${{ matrix.os }}"
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: "PR - Checkout Branch"
uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: "PR - Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v1
- name: "PR - Build benchmarks"
uses: actions-rs/cargo@v1
with:
command: bench
args: -p ruff_benchmark --no-run
- name: "PR - Run benchmarks"
run: cargo benchmark --save-baseline=pr
- name: "Main - Checkout Branch"
uses: actions/checkout@v3
with:
clean: false
ref: main
- name: "Main - Install Rust toolchain"
run: rustup show
- name: "Main - Build benchmarks"
uses: actions-rs/cargo@v1
with:
command: bench
args: -p ruff_benchmark --no-run
- name: "Main - Run benchmarks"
run: cargo benchmark --save-baseline=main
- name: "Upload benchmark results"
uses: actions/upload-artifact@v3
with:
name: benchmark-results-${{ matrix.os }}
path: ./target/criterion
# Cleanup
- name: Remove Criterion Artifact
uses: JesseTG/rm@v1.0.3
with:
path: ./target/criterion
benchmark-compare:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
name: Compare
needs:
- run-benchmark
steps:
- name: "Install Rust toolchain"
run: rustup show
- name: "Install critcmp"
# Use debug build: Building takes much longer than the "slowness" of using the debug build.
run: cargo install --debug critcmp
- name: "Linux | Download PR benchmark results"
uses: actions/download-artifact@v3
with:
name: benchmark-results-ubuntu-latest
path: ./target/criterion
- name: "Linux | Compare benchmark results"
shell: bash
run: |
echo "### Benchmark" >> summary.md
echo "#### Linux" >> summary.md
echo "\`\`\`" >> summary.md
critcmp main pr >> summary.md
echo "\`\`\`" >> summary.md
echo "" >> summary.md
- name: "Linux | Cleanup benchmark results"
run: rm -rf ./target/criterion
- name: "Windows | Download PR benchmark results"
uses: actions/download-artifact@v3
with:
name: benchmark-results-windows-latest
path: ./target/criterion
- name: "Windows | Compare benchmark results"
shell: bash
run: |
echo "#### Windows" >> summary.md
echo "\`\`\`" >> summary.md
critcmp main pr >> summary.md
echo "\`\`\`" >> summary.md
echo "" >> summary.md
echo ${{ github.event.pull_request.number }} > pr-number
cat summary.md > $GITHUB_STEP_SUMMARY
- uses: actions/upload-artifact@v3
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@v3
name: Upload Summary
with:
name: summary
path: summary.md

View File

@@ -17,6 +17,20 @@ env:
RUSTUP_MAX_RETRIES: 10
jobs:
cargo-build:
name: "cargo build"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v1
- run: cargo build --all
- run: ./target/debug/ruff_dev generate-all
- run: git diff --quiet README.md || echo "::error file=README.md::This file is outdated. Run 'cargo dev generate-all'."
- run: git diff --quiet ruff.schema.json || echo "::error file=ruff.schema.json::This file is outdated. Run 'cargo dev generate-all'."
- run: git diff --exit-code -- README.md ruff.schema.json docs
cargo-fmt:
name: "cargo fmt"
runs-on: ubuntu-latest
@@ -34,7 +48,7 @@ jobs:
- name: "Install Rust toolchain"
run: |
rustup component add clippy
- uses: Swatinem/rust-cache@v2
- uses: Swatinem/rust-cache@v1
- run: cargo clippy --workspace --all-targets --all-features -- -D warnings
cargo-clippy-wasm:
@@ -46,8 +60,8 @@ jobs:
run: |
rustup component add clippy
rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
- uses: Swatinem/rust-cache@v1
- run: cargo clippy -p ruff --target wasm32-unknown-unknown --all-features -- -D warnings
cargo-test:
strategy:
@@ -59,9 +73,9 @@ jobs:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- uses: Swatinem/rust-cache@v1
- run: cargo install cargo-insta
- run: pip install black[d]==23.1.0
- run: pip install black[d]==22.12.0
- name: "Run tests (Ubuntu)"
if: ${{ matrix.os == 'ubuntu-latest' }}
run: |
@@ -79,31 +93,6 @@ jobs:
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@v3
if: ${{ matrix.os == 'ubuntu-latest' }}
with:
name: ruff
path: target/debug/ruff
cargo-test-wasm:
runs-on: ubuntu-latest
name: "cargo test (wasm)"
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@v3
with:
node-version: 18
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@v0.4.0
- uses: Swatinem/rust-cache@v2
- name: "Run wasm-pack"
run: |
cd crates/ruff_wasm
wasm-pack test --node
scripts:
name: "test scripts"
@@ -112,7 +101,7 @@ jobs:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- uses: Swatinem/rust-cache@v1
- run: ./scripts/add_rule.py --name DoTheThing --code PLC999 --linter pylint
- run: cargo check
- run: |
@@ -120,6 +109,21 @@ jobs:
./scripts/add_rule.py --name FirstRule --code TST001 --linter test
- run: cargo check
maturin-build:
name: "maturin build"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v1
- uses: actions/setup-python@v4
with:
python-version: "3.11"
- run: pip install maturin
- run: maturin build -b bin
- run: python scripts/transform_readme.py --target pypi
typos:
name: "spell check"
runs-on: ubuntu-latest
@@ -128,51 +132,3 @@ jobs:
- uses: crate-ci/typos@master
with:
files: .
ecosystem:
name: "ecosystem"
runs-on: ubuntu-latest
needs: cargo-test
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.11"
- uses: actions/download-artifact@v3
name: Download Ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v2
name: Download base results
with:
name: ruff
branch: ${{ github.event.pull_request.base.ref }}
check_artifacts: true
- name: Run ecosystem check
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
scripts/check_ecosystem.py ruff ${{ steps.ruff-target.outputs.download-path }}/ruff | tee ecosystem-result
cat ecosystem-result > $GITHUB_STEP_SUMMARY
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@v3
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@v3
name: Upload Results
with:
name: ecosystem-result
path: ecosystem-result

View File

@@ -15,7 +15,7 @@ jobs:
- uses: actions/setup-python@v4
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- uses: Swatinem/rust-cache@v1
- name: "Install dependencies"
run: |
pip install -r docs/requirements.txt

View File

@@ -133,7 +133,7 @@ jobs:
target: ${{ matrix.target }}
manylinux: auto
args: --no-default-features --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- uses: uraimo/run-on-arch-action@v2
- uses: uraimo/run-on-arch-action@v2.5.0
if: matrix.target != 'ppc64'
name: Install built wheel
with:
@@ -206,7 +206,7 @@ jobs:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- uses: uraimo/run-on-arch-action@v2
- uses: uraimo/run-on-arch-action@master
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}

View File

@@ -28,7 +28,7 @@ jobs:
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack"
run: wasm-pack build --target web --out-dir ../../playground/src/pkg crates/ruff_wasm
run: wasm-pack build --target web --out-dir ../../playground/src/pkg crates/ruff
- name: "Install Node dependencies"
run: npm ci
working-directory: playground

View File

@@ -1,83 +0,0 @@
name: PR Check Comment
on:
workflow_run:
workflows: [CI, Benchmark]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The ecosystem workflow that triggers the workflow run
required: true
permissions:
pull-requests: write
jobs:
comment:
runs-on: ubuntu-latest
steps:
- uses: dawidd6/action-download-artifact@v2
name: Download PR Number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
- name: Extract PR Number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@v2
name: "Download Ecosystem Result"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
with:
name: ecosystem-result
workflow: ci.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/ecosystem
if_no_artifact_found: ignore
- uses: dawidd6/action-download-artifact@v2
name: "Download Benchmark Result"
id: download-benchmark-result
if: steps.pr-number.outputs.pr-number
with:
name: summary
workflow: benchmark.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/benchmark
if_no_artifact_found: ignore
- name: Generate Comment
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true' || steps.download-benchmark-result.outputs.found_artifact == 'true'
run: |
echo 'comment<<EOF' >> $GITHUB_OUTPUT
echo '## PR Check Results' >> $GITHUB_OUTPUT
if [[ -f pr/ecosystem/ecosystem-result ]]
then
echo "### Ecosystem" >> $GITHUB_OUTPUT
cat pr/ecosystem/ecosystem-result >> $GITHUB_OUTPUT
fi
if [[ -f pr/benchmark/summary.md ]]
then
cat pr/benchmark/summary.md >> $GITHUB_OUTPUT
fi
echo 'EOF' >> $GITHUB_OUTPUT
- name: Create or update comment
if: steps.generate-comment.outputs.comment
uses: thollander/actions-comment-pull-request@v2
with:
pr_number: ${{ steps.pr-number.outputs.pr-number }}
message: ${{ steps.generate-comment.outputs.comment }}
comment_tag: PR Check Results

View File

@@ -208,7 +208,7 @@ jobs:
target: ${{ matrix.platform.target }}
manylinux: auto
args: --release --out dist
- uses: uraimo/run-on-arch-action@v2
- uses: uraimo/run-on-arch-action@v2.5.0
if: matrix.platform.arch != 'ppc64'
name: Install built wheel
with:
@@ -309,7 +309,7 @@ jobs:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --out dist
- uses: uraimo/run-on-arch-action@v2
- uses: uraimo/run-on-arch-action@master
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}

View File

@@ -1,7 +1,7 @@
fail_fast: true
repos:
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.12.1
rev: v0.10.1
hooks:
- id: validate-pyproject

View File

@@ -104,7 +104,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
1. Determine a name for the new rule as per our [rule naming convention](#rule-naming-convention).
1. Create a file for your rule (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`).
1. In that file, define a violation struct. You can grep for `#[violation]` to see examples.
1. In that file, define a violation struct. You can grep for `define_violation!` to see examples.
1. Map the violation struct to a rule code in `crates/ruff/src/registry.rs` (e.g., `E402`).
1. Define the logic for triggering the violation in `crates/ruff/src/checkers/ast.rs` (for AST-based
checks), `crates/ruff/src/checkers/tokens.rs` (for token-based checks), `crates/ruff/src/checkers/lines.rs`
@@ -115,7 +115,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
To define the violation, start by creating a dedicated file for your rule under the appropriate
rule linter (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`). That file should
contain a struct defined via `#[violation]`, along with a function that creates the violation
contain a struct defined via `define_violation!`, along with a function that creates the violation
based on any required inputs. (Many of the existing examples live in `crates/ruff/src/violations.rs`,
but we're looking to place new rules in their own files.)

365
Cargo.lock generated
View File

@@ -289,9 +289,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.1.8"
version = "4.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d7ae14b20b94cb02149ed21a86c423859cbe18dc7ed69845cace50e52b40a5"
checksum = "ec0b0588d44d4d63a87dbd75c136c166bbfd9a86a31cb89e09906521c7d3f5e3"
dependencies = [
"bitflags",
"clap_derive",
@@ -308,19 +308,18 @@ version = "4.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd125be87bf4c255ebc50de0b7f4d2a6201e8ac3dc86e39c0ad081dc5e7236fe"
dependencies = [
"clap 4.1.8",
"clap 4.1.6",
]
[[package]]
name = "clap_complete_command"
version = "0.5.1"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "183495371ea78d4c9ff638bfc6497d46fed2396e4f9c50aebc1278a4a9919a3d"
checksum = "4160b4a4f72ef58bd766bad27c09e6ef1cc9d82a22f6a0f55d152985a4a48e31"
dependencies = [
"clap 4.1.8",
"clap 4.1.6",
"clap_complete",
"clap_complete_fig",
"clap_complete_nushell",
]
[[package]]
@@ -329,25 +328,15 @@ version = "4.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63a06158a72dbb088f864887b4409fd22600ba379f3cee3040f842234cc5c2d0"
dependencies = [
"clap 4.1.8",
"clap_complete",
]
[[package]]
name = "clap_complete_nushell"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fa41f5e6aa83bd151b70fd0ceaee703d68cd669522795dc812df9edad1252c"
dependencies = [
"clap 4.1.8",
"clap 4.1.6",
"clap_complete",
]
[[package]]
name = "clap_derive"
version = "4.1.8"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44bec8e5c9d09e439c4335b1af0abaab56dcf3b94999a936e1bb47b9134288f0"
checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8"
dependencies = [
"heck",
"proc-macro-error",
@@ -438,9 +427,9 @@ dependencies = [
[[package]]
name = "console_log"
version = "0.2.1"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "878ad067d4089144a36ee412d665916c665430eb84c0057b9987f424a5d15c03"
checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494"
dependencies = [
"log",
"web-sys",
@@ -565,16 +554,6 @@ dependencies = [
"typenum",
]
[[package]]
name = "ctor"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
dependencies = [
"quote",
"syn",
]
[[package]]
name = "cxx"
version = "1.0.91"
@@ -619,6 +598,17 @@ dependencies = [
"syn",
]
[[package]]
name = "derivative"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "diff"
version = "0.1.13"
@@ -780,10 +770,10 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flake8-to-ruff"
version = "0.0.256"
version = "0.0.253"
dependencies = [
"anyhow",
"clap 4.1.8",
"clap 4.1.6",
"colored",
"configparser",
"once_cell",
@@ -1090,6 +1080,12 @@ version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
[[package]]
name = "joinery"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72167d68f5fce3b8655487b8038691a3c9984ee769590f93f2a631f4ad64e4f5"
[[package]]
name = "js-sys"
version = "0.3.61"
@@ -1373,6 +1369,15 @@ dependencies = [
"version_check",
]
[[package]]
name = "nom8"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8"
dependencies = [
"memchr",
]
[[package]]
name = "notify"
version = "5.1.0"
@@ -1400,6 +1405,7 @@ dependencies = [
"autocfg",
"num-integer",
"num-traits",
"serde",
]
[[package]]
@@ -1409,6 +1415,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d"
dependencies = [
"num-traits",
"serde",
]
[[package]]
@@ -1440,6 +1447,27 @@ dependencies = [
"libc",
]
[[package]]
name = "num_enum"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e0072973714303aa6e3631c7e8e777970cf4bdd25dc4932e41031027b8bcc4e"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.5.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0629cbd6b897944899b1f10496d9c4a7ac5878d45fd61bc22e9e79bfbbc29597"
dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "once_cell"
version = "1.17.1"
@@ -1458,15 +1486,6 @@ version = "6.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
[[package]]
name = "output_vt100"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66"
dependencies = [
"winapi",
]
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -1514,12 +1533,6 @@ dependencies = [
"once_cell",
]
[[package]]
name = "pathdiff"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd"
[[package]]
name = "peg"
version = "0.8.1"
@@ -1547,18 +1560,6 @@ version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa00462b37ead6d11a82c9d568b26682d78e0477dc02d1966c013af80969739"
[[package]]
name = "pep440_rs"
version = "0.2.0"
source = "git+https://github.com/konstin/pep440-rs.git?rev=a8fef4ec47f4c25b070b39cdbe6a0b9847e49941#a8fef4ec47f4c25b070b39cdbe6a0b9847e49941"
dependencies = [
"lazy_static",
"regex",
"serde",
"tracing",
"unicode-width",
]
[[package]]
name = "percent-encoding"
version = "2.2.0"
@@ -1757,15 +1758,13 @@ dependencies = [
]
[[package]]
name = "pretty_assertions"
name = "proc-macro-crate"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755"
checksum = "66618389e4ec1c7afe67d51a9bf34ff9236480f8d51e7489b7d5ab0303c13f34"
dependencies = [
"ctor",
"diff",
"output_vt100",
"yansi",
"once_cell",
"toml_edit",
]
[[package]]
@@ -1871,9 +1870,9 @@ dependencies = [
[[package]]
name = "rayon"
version = "1.7.0"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7"
dependencies = [
"either",
"rayon-core",
@@ -1881,9 +1880,9 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.11.0"
version = "1.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
@@ -1982,17 +1981,22 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.0.256"
version = "0.0.253"
dependencies = [
"anyhow",
"bisection",
"bitflags",
"cfg-if",
"chrono",
"clap 4.1.8",
"clap 4.1.6",
"colored",
"console_error_panic_hook",
"console_log",
"criterion",
"derivative",
"dirs",
"fern",
"getrandom",
"glob",
"globset",
"ignore",
@@ -2000,6 +2004,7 @@ dependencies = [
"insta",
"is-macro",
"itertools",
"js-sys",
"libcst",
"log",
"natord",
@@ -2008,16 +2013,10 @@ dependencies = [
"num-traits",
"once_cell",
"path-absolutize",
"pathdiff",
"pep440_rs",
"pretty_assertions",
"regex",
"result-like",
"ruff_cache",
"ruff_diagnostics",
"ruff_macros",
"ruff_python_ast",
"ruff_python_stdlib",
"ruff_python",
"ruff_rustpython",
"rustc-hash",
"rustpython-common",
@@ -2025,6 +2024,7 @@ dependencies = [
"schemars",
"semver",
"serde",
"serde-wasm-bindgen",
"shellexpand",
"smallvec",
"strum",
@@ -2032,38 +2032,15 @@ dependencies = [
"test-case",
"textwrap",
"thiserror",
"titlecase",
"toml",
]
[[package]]
name = "ruff_benchmark"
version = "0.0.0"
dependencies = [
"criterion",
"mimalloc",
"once_cell",
"ruff",
"serde",
"serde_json",
"tikv-jemallocator",
"ureq",
"url",
]
[[package]]
name = "ruff_cache"
version = "0.0.0"
dependencies = [
"filetime",
"globset",
"itertools",
"regex",
"ruff_macros",
"wasm-bindgen",
"wasm-bindgen-test",
]
[[package]]
name = "ruff_cli"
version = "0.0.256"
version = "0.0.253"
dependencies = [
"annotate-snippets 0.9.1",
"anyhow",
@@ -2073,7 +2050,7 @@ dependencies = [
"bitflags",
"cachedir",
"chrono",
"clap 4.1.8",
"clap 4.1.6",
"clap_complete_command",
"clearscreen",
"colored",
@@ -2089,8 +2066,6 @@ dependencies = [
"rayon",
"regex",
"ruff",
"ruff_cache",
"ruff_diagnostics",
"rustc-hash",
"serde",
"serde_json",
@@ -2108,15 +2083,13 @@ name = "ruff_dev"
version = "0.0.0"
dependencies = [
"anyhow",
"clap 4.1.8",
"clap 4.1.6",
"itertools",
"libcst",
"once_cell",
"pretty_assertions",
"regex",
"ruff",
"ruff_cli",
"ruff_diagnostics",
"rustpython-common",
"rustpython-parser",
"schemars",
@@ -2126,15 +2099,6 @@ dependencies = [
"textwrap",
]
[[package]]
name = "ruff_diagnostics"
version = "0.0.0"
dependencies = [
"ruff_python_ast",
"rustpython-parser",
"serde",
]
[[package]]
name = "ruff_formatter"
version = "0.0.0"
@@ -2161,25 +2125,12 @@ dependencies = [
]
[[package]]
name = "ruff_python_ast"
name = "ruff_python"
version = "0.0.0"
dependencies = [
"anyhow",
"bitflags",
"is-macro",
"itertools",
"log",
"nohash-hasher",
"num-bigint",
"num-traits",
"once_cell",
"regex",
"ruff_python_stdlib",
"ruff_rustpython",
"rustc-hash",
"rustpython-common",
"rustpython-parser",
"smallvec",
]
[[package]]
@@ -2187,14 +2138,13 @@ name = "ruff_python_formatter"
version = "0.0.0"
dependencies = [
"anyhow",
"clap 4.1.8",
"clap 4.1.6",
"insta",
"is-macro",
"itertools",
"once_cell",
"ruff_formatter",
"ruff_python_ast",
"ruff_python_stdlib",
"ruff_python",
"ruff_rustpython",
"ruff_testing_macros",
"ruff_text_size",
@@ -2205,15 +2155,6 @@ dependencies = [
"test-case",
]
[[package]]
name = "ruff_python_stdlib"
version = "0.0.0"
dependencies = [
"once_cell",
"regex",
"rustc-hash",
]
[[package]]
name = "ruff_rustpython"
version = "0.0.0"
@@ -2229,6 +2170,7 @@ name = "ruff_testing_macros"
version = "0.0.0"
dependencies = [
"glob",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
@@ -2244,25 +2186,6 @@ dependencies = [
"static_assertions",
]
[[package]]
name = "ruff_wasm"
version = "0.0.0"
dependencies = [
"console_error_panic_hook",
"console_log",
"getrandom",
"js-sys",
"log",
"ruff",
"ruff_python_ast",
"ruff_rustpython",
"rustpython-parser",
"serde",
"serde-wasm-bindgen",
"wasm-bindgen",
"wasm-bindgen-test",
]
[[package]]
name = "rust-stemmers"
version = "1.2.0"
@@ -2308,7 +2231,7 @@ dependencies = [
[[package]]
name = "rustpython-ast"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/RustPython/RustPython.git?rev=aa8336ee94492b52458ed8e1517238e5c6c2914c#aa8336ee94492b52458ed8e1517238e5c6c2914c"
dependencies = [
"num-bigint",
"rustpython-compiler-core",
@@ -2317,7 +2240,7 @@ dependencies = [
[[package]]
name = "rustpython-common"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/RustPython/RustPython.git?rev=aa8336ee94492b52458ed8e1517238e5c6c2914c#aa8336ee94492b52458ed8e1517238e5c6c2914c"
dependencies = [
"ascii",
"bitflags",
@@ -2342,21 +2265,24 @@ dependencies = [
[[package]]
name = "rustpython-compiler-core"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/RustPython/RustPython.git?rev=aa8336ee94492b52458ed8e1517238e5c6c2914c#aa8336ee94492b52458ed8e1517238e5c6c2914c"
dependencies = [
"bincode",
"bitflags",
"bstr 0.2.17",
"itertools",
"lz4_flex",
"num-bigint",
"num-complex",
"num_enum",
"serde",
"thiserror",
]
[[package]]
name = "rustpython-parser"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=c15f670f2c30cfae6b41a1874893590148c74bc4#c15f670f2c30cfae6b41a1874893590148c74bc4"
source = "git+https://github.com/RustPython/RustPython.git?rev=aa8336ee94492b52458ed8e1517238e5c6c2914c#aa8336ee94492b52458ed8e1517238e5c6c2914c"
dependencies = [
"ahash",
"anyhow",
@@ -2371,7 +2297,7 @@ dependencies = [
"rustc-hash",
"rustpython-ast",
"rustpython-compiler-core",
"serde",
"thiserror",
"tiny-keccak",
"unic-emoji-char",
"unic-ucd-ident",
@@ -2401,9 +2327,9 @@ dependencies = [
[[package]]
name = "schemars"
version = "0.8.12"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
checksum = "2a5fb6c61f29e723026dc8e923d94c694313212abbecbbe5f55a7748eec5b307"
dependencies = [
"dyn-clone",
"schemars_derive",
@@ -2413,9 +2339,9 @@ dependencies = [
[[package]]
name = "schemars_derive"
version = "0.8.12"
version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c"
checksum = "f188d036977451159430f3b8dc82ec76364a42b7e289c2b18a9a18f4470058e9"
dependencies = [
"proc-macro2",
"quote",
@@ -2468,9 +2394,9 @@ dependencies = [
[[package]]
name = "serde-wasm-bindgen"
version = "0.5.0"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3b143e2833c57ab9ad3ea280d21fd34e285a42837aeb0ee301f4f41890fa00e"
checksum = "e3b4c031cd0d9014307d82b8abf653c0290fbdaeb4c02d00c63cf52f728628bf"
dependencies = [
"js-sys",
"serde",
@@ -2627,9 +2553,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.109"
version = "1.0.108"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
checksum = "d56e159d99e6c2b93995d171050271edb50ecc5288fbc7cc17de8fdce4e58c14"
dependencies = [
"proc-macro2",
"quote",
@@ -2700,18 +2626,18 @@ checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8"
[[package]]
name = "test-case"
version = "3.0.0"
version = "2.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "679b019fb241da62cc449b33b224d19ebe1c6767b495569765115dd7f7f9fba4"
checksum = "21d6cf5a7dffb3f9dceec8e6b8ca528d9bd71d36c9f074defb548ce161f598c0"
dependencies = [
"test-case-macros",
]
[[package]]
name = "test-case-core"
version = "3.0.0"
name = "test-case-macros"
version = "2.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72dc21b5887f4032c4656502d085dc28f2afbb686f25f216472bb0526f4b1b88"
checksum = "e45b7bf6e19353ddd832745c8fcf77a17a93171df7151187f26623f2b75b5b26"
dependencies = [
"cfg-if",
"proc-macro-error",
@@ -2720,19 +2646,6 @@ dependencies = [
"syn",
]
[[package]]
name = "test-case-macros"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3786898e0be151a96f730fd529b0e8a10f5990fa2a7ea14e37ca27613c05190"
dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
"test-case-core",
]
[[package]]
name = "textwrap"
version = "0.16.0"
@@ -2840,10 +2753,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "toml"
version = "0.7.2"
name = "titlecase"
version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7afcae9e3f0fe2c370fd4657108972cbb2fa9db1b9f84849cefd80741b01cb6"
checksum = "38397a8cdb017cfeb48bf6c154d6de975ac69ffeed35980fde199d2ee0842042"
dependencies = [
"joinery",
"lazy_static",
"regex",
]
[[package]]
name = "toml"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb9d890e4dc9298b70f740f615f2e05b9db37dce531f6b24fb77ac993f9f217"
dependencies = [
"serde",
"serde_spanned",
@@ -2853,24 +2777,24 @@ dependencies = [
[[package]]
name = "toml_datetime"
version = "0.6.1"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622"
checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.19.4"
version = "0.18.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a1eb0622d28f4b9c90adc4ea4b2b46b47663fde9ac5fafcb14a1369d5508825"
checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b"
dependencies = [
"indexmap",
"nom8",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
@@ -2881,21 +2805,9 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8"
dependencies = [
"cfg-if",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.30"
@@ -3037,7 +2949,7 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "unicode_names2"
version = "0.6.0"
source = "git+https://github.com/youknowone/unicode_names2.git?rev=4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde#4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde"
source = "git+https://github.com/youknowone/unicode_names2.git?tag=v0.6.0+character-alias#4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde"
dependencies = [
"phf",
]
@@ -3373,15 +3285,6 @@ version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
[[package]]
name = "winnow"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "faf09497b8f8b5ac5d3bb4d05c0a99be20f26fd3d5f2db7b0716e946d5103658"
dependencies = [
"memchr",
]
[[package]]
name = "yaml-rust"
version = "0.4.5"
@@ -3391,12 +3294,6 @@ dependencies = [
"linked-hash-map",
]
[[package]]
name = "yansi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
[[package]]
name = "yansi-term"
version = "0.1.2"

View File

@@ -3,49 +3,25 @@ members = ["crates/*"]
[workspace.package]
edition = "2021"
rust-version = "1.67"
homepage = "https://beta.ruff.rs/docs/"
documentation = "https://beta.ruff.rs/docs/"
repository = "https://github.com/charliermarsh/ruff"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
rust-version = "1.67.0"
[workspace.dependencies]
anyhow = { version = "1.0.69" }
bitflags = { version = "1.3.2" }
chrono = { version = "0.4.23", default-features = false, features = ["clock"] }
clap = { version = "4.1.8", features = ["derive"] }
colored = { version = "2.0.0" }
filetime = { version = "0.2.20" }
glob = { version = "0.3.1" }
globset = { version = "0.4.10" }
ignore = { version = "0.4.20" }
insta = { version = "1.28.0" }
is-macro = { version = "0.2.2" }
anyhow = { version = "1.0.66" }
clap = { version = "4.0.1", features = ["derive"] }
itertools = { version = "0.10.5" }
is-macro = { version = "0.2.2" }
libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "80e4c1399f95e5beb532fdd1e209ad2dbb470438" }
log = { version = "0.4.17" }
once_cell = { version = "1.17.1" }
path-absolutize = { version = "3.0.14" }
proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" }
regex = { version = "1.7.1" }
once_cell = { version = "1.16.0" }
regex = { version = "1.6.0" }
rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
rustpython-parser = { features = [
"lalrpop",
"serde",
], git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93" }
shellexpand = { version = "3.0.0" }
similar = { version = "2.2.1" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "aa8336ee94492b52458ed8e1517238e5c6c2914c" }
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "aa8336ee94492b52458ed8e1517238e5c6c2914c" }
schemars = { version = "0.8.11" }
serde = { version = "1.0.147", features = ["derive"] }
serde_json = { version = "1.0.87" }
strum = { version = "0.24.1", features = ["strum_macros"] }
strum_macros = { version = "0.24.3" }
syn = { version = "1.0.109" }
test-case = { version = "3.0.0" }
textwrap = { version = "0.16.0" }
toml = { version = "0.7.2" }
toml = { version = "0.6.0" }
[profile.release]
panic = "abort"
@@ -63,9 +39,3 @@ opt-level = 3
# https://github.com/bytecodealliance/wasm-tools/blob/b5c3d98e40590512a3b12470ef358d5c7b983b15/crates/wasmparser/src/limits.rs#L29
[profile.dev.package.rustpython-parser]
opt-level = 1
# Use the `--profile release-debug` flag to show symbols in release mode.
# e.g. `cargo build --profile release-debug`
[profile.release-debug]
inherits = "release"
debug = 1

View File

@@ -137,7 +137,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.256'
rev: 'v0.0.253'
hooks:
- id: ruff
```
@@ -306,13 +306,6 @@ Ruff is used in a number of major open-source projects, including:
- [meson-python](https://github.com/mesonbuild/meson-python)
- [ZenML](https://github.com/zenml-io/zenml)
- [delta-rs](https://github.com/delta-io/delta-rs)
- [Starlite](https://github.com/starlite-api/starlite)
- [telemetry-airflow (Mozilla)](https://github.com/mozilla/telemetry-airflow)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [nox](https://github.com/wntrblm/nox)
- [Neon](https://github.com/neondatabase/neon)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
## License

View File

@@ -1,18 +1,17 @@
[package]
name = "flake8-to-ruff"
version = "0.0.256"
version = "0.0.253"
edition = { workspace = true }
rust-version = { workspace = true }
[dependencies]
ruff = { path = "../ruff", default-features = false }
anyhow = { workspace = true }
clap = { workspace = true }
colored = { workspace = true }
colored = { version = "2.0.0" }
configparser = { version = "3.0.2" }
once_cell = { workspace = true }
regex = { workspace = true }
ruff = { path = "../ruff", default-features = false }
rustc-hash = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@@ -46,15 +46,8 @@ fn main() -> Result<()> {
.map(|tool| ExternalConfig {
black: tool.black.as_ref(),
isort: tool.isort.as_ref(),
..Default::default()
})
.unwrap_or_default();
let external_config = ExternalConfig {
project: pyproject
.as_ref()
.and_then(|pyproject| pyproject.project.as_ref()),
..external_config
};
// Create Ruff's pyproject.toml section.
let pyproject = flake8_to_ruff::convert(&config, &external_config, args.plugin)?;

View File

@@ -1,76 +1,82 @@
[package]
name = "ruff"
version = "0.0.256"
authors.workspace = true
edition.workspace = true
rust-version.workspace = true
documentation.workspace = true
homepage.workspace = true
repository.workspace = true
version = "0.0.253"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
edition = { workspace = true }
rust-version = { workspace = true }
documentation = "https://github.com/charliermarsh/ruff"
homepage = "https://github.com/charliermarsh/ruff"
repository = "https://github.com/charliermarsh/ruff"
readme = "README.md"
license = "MIT"
[lib]
name = "ruff"
crate-type = ["cdylib", "rlib"]
doctest = false
[dependencies]
ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_macros = { path = "../ruff_macros" }
ruff_python_ast = { path = "../ruff_python_ast" }
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
ruff_python = { path = "../ruff_python" }
ruff_rustpython = { path = "../ruff_rustpython" }
anyhow = { workspace = true }
bisection = { version = "0.1.0" }
bitflags = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["derive", "string"], optional = true }
colored = { workspace = true }
bitflags = { version = "1.3.2" }
cfg-if = { version = "1.0.0" }
chrono = { version = "0.4.21", default-features = false, features = ["clock"] }
clap = { workspace = true, features = ["derive", "env", "string"] }
colored = { version = "2.0.0" }
derivative = { version = "2.2.0" }
dirs = { version = "4.0.0" }
fern = { version = "0.6.1" }
glob = { workspace = true }
globset = { workspace = true }
ignore = { workspace = true }
imperative = { version = "1.0.4" }
glob = { version = "0.3.0" }
globset = { version = "0.4.9" }
ignore = { version = "0.4.18" }
imperative = { version = "1.0.3" }
is-macro = { workspace = true }
itertools = { workspace = true }
libcst = { workspace = true }
log = { workspace = true }
log = { version = "0.4.17" }
natord = { version = "1.0.9" }
nohash-hasher = { version = "0.2.0" }
num-bigint = { version = "0.4.3" }
num-traits = { version = "0.2.15" }
num-traits = "0.2.15"
once_cell = { workspace = true }
path-absolutize = { workspace = true, features = [
"once_cell_cache",
"use_unix_paths_on_wasm",
] }
pathdiff = { version = "0.2.1" }
pep440_rs = { git = "https://github.com/konstin/pep440-rs.git", features = [
"serde",
], rev = "a8fef4ec47f4c25b070b39cdbe6a0b9847e49941" }
path-absolutize = { version = "3.0.14", features = ["once_cell_cache", "use_unix_paths_on_wasm"] }
regex = { workspace = true }
result-like = { version = "0.4.6" }
result-like = "0.4.6"
rustc-hash = { workspace = true }
rustpython-common = { workspace = true }
rustpython-parser = { workspace = true }
schemars = { workspace = true }
semver = { version = "1.0.16" }
serde = { workspace = true }
shellexpand = { workspace = true }
shellexpand = { version = "3.0.0" }
smallvec = { version = "1.10.0" }
strum = { workspace = true }
strum_macros = { workspace = true }
textwrap = { workspace = true }
thiserror = { version = "1.0.38" }
textwrap = { version = "0.16.0" }
thiserror = { version = "1.0" }
titlecase = { version = "2.2.1" }
toml = { workspace = true }
# https://docs.rs/getrandom/0.2.7/getrandom/#webassembly-support
[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies]
getrandom = { version = "0.2.7", features = ["js"] }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "0.2.0" }
serde-wasm-bindgen = { version = "0.4" }
js-sys = { version = "0.3.60" }
wasm-bindgen = { version = "0.2.83" }
[dev-dependencies]
insta = { version = "1.19.0", features = ["yaml", "redactions"] }
test-case = { version = "2.2.2" }
wasm-bindgen-test = { version = "0.3.33" }
[target.'cfg(not(target_family = "wasm"))'.dev-dependencies]
criterion = { version = "0.4.0" }
insta = { workspace = true, features = ["yaml", "redactions"] }
pretty_assertions = "1.3.0"
test-case = { workspace = true }
[features]
default = []

View File

@@ -1,11 +0,0 @@
import warnings
"""
Should emit:
B028 - on lines 8 and 9
"""
warnings.warn(DeprecationWarning("test"))
warnings.warn(DeprecationWarning("test"), source=None)
warnings.warn(DeprecationWarning("test"), source=None, stacklevel=2)
warnings.warn(DeprecationWarning("test"), stacklevel=1)

View File

@@ -1,47 +0,0 @@
"""
Should emit:
B030:
- line 12, column 8
- line 17, column 9
- line 22, column 21
- line 27, column 37
"""
try:
pass
except 1: # error
pass
try:
pass
except (1, ValueError): # error
pass
try:
pass
except (ValueError, (RuntimeError, (KeyError, TypeError))): # error
pass
try:
pass
except (ValueError, *(RuntimeError, (KeyError, TypeError))): # error
pass
try:
pass
except (ValueError, *(RuntimeError, TypeError)): # ok
pass
try:
pass
except (ValueError, *[RuntimeError, *(TypeError,)]): # ok
pass
def what_to_catch():
return ...
try:
pass
except what_to_catch(): # ok
pass

View File

@@ -8,9 +8,3 @@ any({x.id for x in bar})
# PIE 802
any([x.id for x in bar])
all([x.id for x in bar])
any( # first comment
[x.id for x in bar], # second comment
) # third comment
all( # first comment
[x.id for x in bar], # second comment
) # third comment

View File

@@ -6,8 +6,6 @@ obj.endswith("foo") or obj.endswith("bar")
obj.startswith(foo) or obj.startswith(bar)
# error
obj.startswith(foo) or obj.startswith("foo")
# error
obj.endswith(foo) or obj.startswith(foo) or obj.startswith("foo")
# ok
obj.startswith(("foo", "bar"))

View File

@@ -1,10 +1,3 @@
import math
import os
import sys
from math import inf
import numpy as np
def f12(
x,
y: str = os.pathsep, # Error PYI011 Only simple default values allowed for typed arguments
@@ -68,49 +61,3 @@ def f22(
x: complex = -42.5j # Error PYI011 Only simple default values allowed for typed arguments
+ 4.3j,
) -> None: ...
def f23(
x: bool = True, # OK
) -> None: ...
def f24(
x: float = 3.14, # OK
) -> None: ...
def f25(
x: float = -3.14, # OK
) -> None: ...
def f26(
x: complex = -3.14j, # OK
) -> None: ...
def f27(
x: complex = -3 - 3.14j, # OK
) -> None: ...
def f28(
x: float = math.tau, # OK
) -> None: ...
def f29(
x: float = math.inf, # OK
) -> None: ...
def f30(
x: float = -math.inf, # OK
) -> None: ...
def f31(
x: float = inf, # Error PYI011 Only simple default values allowed for typed arguments
) -> None: ...
def f32(
x: float = np.inf, # Error PYI011 Only simple default values allowed for typed arguments
) -> None: ...
def f33(
x: float = math.nan, # OK
) -> None: ...
def f34(
x: float = -math.nan, # Error PYI011 Only simple default values allowed for typed arguments
) -> None: ...
def f35(
x: complex = math.inf # Error PYI011 Only simple default values allowed for typed arguments
+ 1j,
) -> None: ...
def f36(
*, x: str = sys.version, # OK
) -> None: ...
def f37(
*, x: str = "" + "", # Error PYI011 Only simple default values allowed for typed arguments
) -> None: ...

View File

@@ -43,6 +43,3 @@ def f21(
def f22(
x=-42.5j + 4.3j, # Error PYI014
) -> None: ...
def f23(
x=True, # OK
) -> None: ...

View File

@@ -1,33 +0,0 @@
# From https://github.com/PyCQA/flake8-pyi/blob/4212bec43dbc4020a59b90e2957c9488575e57ba/tests/type_comments.pyi
from collections.abc import Sequence
from typing import TypeAlias
A: TypeAlias = None # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
B: TypeAlias = None # type: str # And here's an extra comment about why it's that type # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
C: TypeAlias = None #type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
D: TypeAlias = None # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
E: TypeAlias = None# type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
F: TypeAlias = None#type:int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
def func(
arg1, # type: dict[str, int] # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
arg2 # type: Sequence[bytes] # And here's some more info about this arg # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
): ...
class Foo:
Attr: TypeAlias = None # type: set[str] # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
G: TypeAlias = None # type: ignore
H: TypeAlias = None # type: ignore[attr-defined]
I: TypeAlias = None #type: ignore
J: TypeAlias = None # type: ignore
K: TypeAlias = None# type: ignore
L: TypeAlias = None#type:ignore
# Whole line commented out # type: int
M: TypeAlias = None # type: can't parse me!
class Bar:
N: TypeAlias = None # type: can't parse me either!
# This whole line is commented out and indented # type: str

View File

@@ -1,33 +0,0 @@
# From https://github.com/PyCQA/flake8-pyi/blob/4212bec43dbc4020a59b90e2957c9488575e57ba/tests/type_comments.pyi
from collections.abc import Sequence
from typing import TypeAlias
A: TypeAlias = None # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
B: TypeAlias = None # type: str # And here's an extra comment about why it's that type # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
C: TypeAlias = None #type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
D: TypeAlias = None # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
E: TypeAlias = None# type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
F: TypeAlias = None#type:int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
def func(
arg1, # type: dict[str, int] # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
arg2 # type: Sequence[bytes] # And here's some more info about this arg # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
): ...
class Foo:
Attr: TypeAlias = None # type: set[str] # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
G: TypeAlias = None # type: ignore
H: TypeAlias = None # type: ignore[attr-defined]
I: TypeAlias = None #type: ignore
J: TypeAlias = None # type: ignore
K: TypeAlias = None# type: ignore
L: TypeAlias = None#type:ignore
# Whole line commented out # type: int
M: TypeAlias = None # type: can't parse me!
class Bar:
N: TypeAlias = None # type: can't parse me either!
# This whole line is commented out and indented # type: str

View File

@@ -293,30 +293,3 @@ def x(y):
def foo(baz: str) -> str:
return baz
def end_of_statement():
def example():
if True:
return ""
def example():
if True:
return ""
def example():
if True:
return "" # type: ignore
def example():
if True:
return "" ;
def example():
if True:
return "" \
; # type: ignore

View File

@@ -6,6 +6,22 @@ def x():
return a # error
def x():
a = 1
print(a)
a = 2
return a # error
def x():
a = 1
if True:
return a # error
a = 2
print(a)
return a
# Can be refactored false positives
# https://github.com/afonasev/flake8-return/issues/47#issuecomment-1122571066
def get_bar_if_exists(obj):
@@ -149,6 +165,44 @@ def my_func():
return foo
# Refactored incorrect false positives
# See test cases above marked: "Can be refactored false positives"
# https://github.com/afonasev/flake8-return/issues/47#issuecomment-1122571066
def get_bar_if_exists(obj):
if hasattr(obj, "bar"):
return str(obj.bar)
return ""
# https://github.com/afonasev/flake8-return/issues/47#issue-641117366
def x():
formatted = _USER_AGENT_FORMATTER.format(format_string, **values)
# clean up after any blank components
return formatted.replace("()", "").replace(" ", " ").strip()
# https://github.com/afonasev/flake8-return/issues/47#issue-641117366
def user_agent_username(username=None):
if not username:
return ""
username = username.replace(" ", "_") # Avoid spaces or %20.
try:
username.encode("ascii") # just test,
# but not actually use it
except UnicodeEncodeError:
username = quote(username.encode("utf-8"))
else:
# % is legal in the default $wgLegalTitleChars
# This is so that ops know the real pywikibot will not
# allow a useragent in the username to allow through a
# hand-coded percent-encoded value.
if "%" in username:
username = quote(username)
return username
# https://github.com/afonasev/flake8-return/issues/116#issue-1367575481
def no_exception_loop():
success = False
@@ -206,44 +260,3 @@ def nonlocal_assignment():
nonlocal X
X = 1
return X
def decorator() -> Flask:
app = Flask(__name__)
@app.route('/hello')
def hello() -> str:
"""Hello endpoint."""
return 'Hello, World!'
return app
def default():
y = 1
def f(x = y) -> X:
return x
return y
def get_queryset(option_1, option_2):
queryset: Any = None
queryset = queryset.filter(a=1)
if option_1:
queryset = queryset.annotate(b=Value(2))
if option_2:
queryset = queryset.filter(c=3)
return queryset
def get_queryset():
queryset = Model.filter(a=1)
queryset = queryset.filter(c=3)
return queryset
def get_queryset():
queryset = Model.filter(a=1)
return queryset # error

View File

@@ -6,8 +6,6 @@
"yoda" <= compare # SIM300
"yoda" < compare # SIM300
42 > age # SIM300
-42 > age # SIM300
+42 > age # SIM300
YODA == age # SIM300
YODA > age # SIM300
YODA >= age # SIM300

View File

@@ -1,8 +1,7 @@
from typing import TYPE_CHECKING, Any, ClassVar
import attrs
from ....import unknown
from ..protocol import commands, definitions, responses
from ..server import example
from .. import server

View File

@@ -1,3 +0,0 @@
from typing import Tuple
x: Tuple

View File

@@ -1,38 +0,0 @@
from __future__ import annotations
import pathlib
from typing import TYPE_CHECKING
import pydantic
from pydantic import BaseModel
if TYPE_CHECKING:
import datetime # TCH004
from array import array # TCH004
import pandas # TCH004
import pyproj
class A(pydantic.BaseModel):
x: datetime.datetime
class B(BaseModel):
x: pandas.DataFrame
class C(BaseModel):
x: pathlib.Path
class E:
pass
class F(E):
x: pyproj.Transformer
class G(BaseModel):
x: array

View File

@@ -1,28 +0,0 @@
from __future__ import annotations
import geopandas as gpd # TCH002
import pydantic
import pyproj # TCH002
from pydantic import BaseModel
import numpy
class A(BaseModel):
x: pandas.DataFrame
class B(pydantic.BaseModel):
x: numpy.ndarray
class C:
pass
class D(C):
x: pyproj.Transformer
class E(C):
x: gpd.GeoDataFrame

View File

@@ -1,24 +0,0 @@
from __future__ import annotations
import datetime
import pathlib
from uuid import UUID # TCH003
import pydantic
from pydantic import BaseModel
class A(pydantic.BaseModel):
x: datetime.datetime
class B(BaseModel):
x: pathlib.Path
class C:
pass
class D(C):
x: UUID

View File

@@ -1,46 +0,0 @@
from __future__ import annotations
import pathlib
from typing import TYPE_CHECKING
import attrs
from attrs import frozen
import numpy
if TYPE_CHECKING:
import datetime # TCH004
from array import array # TCH004
import pandas # TCH004
import pyproj
@attrs.define(auto_attribs=True)
class A:
x: datetime.datetime
@attrs.define
class B:
x: pandas.DataFrame
@frozen(auto_attribs=True)
class C:
x: pathlib.Path
@frozen
class D:
x: array
@dataclass
class E:
x: pyproj.Transformer
@attrs.define
class F:
x: numpy.ndarray

View File

@@ -1,30 +0,0 @@
from __future__ import annotations
from dataclasses import dataclass
import attrs
import pandas
import pyproj
from attrs import frozen
import numpy # TCH002
@attrs.define(auto_attribs=True)
class A:
x: pyproj.Transformer
@attrs.define
class B:
x: pandas.DataFrame
@frozen
class C:
x: pandas.DataFrame
@dataclass
class D:
x: numpy.ndarray

View File

@@ -1,24 +0,0 @@
from __future__ import annotations
import datetime
from array import array
from dataclasses import dataclass
from uuid import UUID # TCH003
import attrs
from attrs import frozen
@attrs.define(auto_attribs=True)
class A:
x: datetime.datetime
@frozen
class B:
x: array
@dataclass
class C:
x: UUID

View File

@@ -1,10 +1,5 @@
import sys, math
from os import path, uname
from json import detect_encoding
from json import dump
from json import dumps as json_dumps
from json import load
from json import loads as json_loads
from logging.handlers import StreamHandler, FileHandler
# comment 1
@@ -15,10 +10,9 @@ from third_party import lib4
from foo import bar # comment 3
from foo2 import bar2 # comment 4
from foo3 import bar3, baz3 # comment 5
# comment 6
# comment 5
from bar import (
a, # comment 7
b, # comment 8
)
a, # comment 6
b, # comment 7
)

View File

@@ -1,10 +0,0 @@
# ruff: isort: skip_file
import e
import f
# isort: split
import a
import b
import c
import d

View File

@@ -6,14 +6,6 @@ def f():
# isort: on
def f():
# ruff: isort: off
import sys
import os
import collections
# ruff: isort: on
def f():
import sys
import os # isort: skip

View File

@@ -1,14 +0,0 @@
#: E211
spam (1)
#: E211 E211
dict ['key'] = list [index]
#: E211
dict['key'] ['subkey'] = list[index]
#: Okay
spam(1)
dict['key'] = list[index]
# This is not prohibited by PEP8, but avoid it.
class Foo (Bar, Baz):
pass

View File

@@ -1,15 +0,0 @@
#: E231
a = (1,2)
#: E231
a[b1,:]
#: E231
a = [{'a':''}]
#: Okay
a = (4,)
b = (5, )
c = {'text': text[5:]}
result = {
'key1': 'value',
'key2': 'value',
}

View File

@@ -54,7 +54,3 @@ if type(a) != type(b) or type(a) == type(ccc):
pass
assert type(res) == type(None)
types = StrEnum
if x == types.X:
pass

View File

@@ -1,3 +0,0 @@
def lorem():
"""lorem ipsum dolor sit amet consectetur adipiscing elit
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua"""

View File

@@ -2,8 +2,6 @@
from functools import cached_property
from gi.repository import GObject
# Bad examples
def bad_liouiwnlkjl():
@@ -78,11 +76,6 @@ class Thingy:
"""This property method docstring does not need to be written in imperative mood."""
return self._beep
@GObject.Property
def good_custom_property(self):
"""This property method docstring does not need to be written in imperative mood."""
return self._beep
@cached_property
def good_cached_property(self):
"""This property method docstring does not need to be written in imperative mood."""

View File

@@ -1 +0,0 @@
''' SAM macro definitions '''

View File

@@ -58,15 +58,6 @@ def no_underline_and_no_description(): # noqa: D416
"""
@expect(_D213)
@expect("D407: Missing dashed underline after section ('Returns')")
@expect("D414: Section has no content ('Returns')")
def no_underline_and_no_newline(): # noqa: D416
"""Toggle the gizmo.
Returns"""
@expect(_D213)
@expect("D410: Missing blank line after section ('Returns')")
@expect("D414: Section has no content ('Returns')")

View File

@@ -1,23 +0,0 @@
"""Test case: strings used within calls within type annotations."""
from typing import Callable
import bpy
from mypy_extensions import VarArg
from foo import Bar
class LightShow(bpy.types.Operator):
label = "Create Character"
name = "lightshow.letter_creation"
filepath: bpy.props.StringProperty(subtype="FILE_PATH") # OK
def f(x: Callable[[VarArg("os")], None]): # F821
pass
f(Callable[["Bar"], None])
f(Callable[["Baz"], None])

View File

@@ -1,25 +0,0 @@
"""Test case: strings used within calls within type annotations."""
from __future__ import annotations
from typing import Callable
import bpy
from mypy_extensions import VarArg
from foo import Bar
class LightShow(bpy.types.Operator):
label = "Create Character"
name = "lightshow.letter_creation"
filepath: bpy.props.StringProperty(subtype="FILE_PATH") # OK
def f(x: Callable[[VarArg("os")], None]): # F821
pass
f(Callable[["Bar"], None])
f(Callable[["Baz"], None])

View File

@@ -1,23 +0,0 @@
x = "a string"
y = "another string"
z = ""
def errors():
if x is "" or x == "":
print("x is an empty string")
if y is not "" or y != "":
print("y is not an empty string")
if "" != z:
print("z is an empty string")
def ok():
if x and not y:
print("x is not an empty string, but y is an empty string")
data.loc[data["a"] != ""]
data.loc[data["a"] != "", :]

View File

@@ -1,95 +0,0 @@
while True:
try:
pass
finally:
continue # [continue-in-finally]
while True:
try:
pass
except Exception:
continue
finally:
try:
pass
finally:
continue # [continue-in-finally]
pass
while True:
try:
pass
finally:
test = "aa"
match test:
case "aa":
continue # [continue-in-finally]
while True:
try:
pass
finally:
with "aa" as f:
continue # [continue-in-finally]
while True:
try:
pass
finally:
if True:
continue # [continue-in-finally]
continue # [continue-in-finally]
def test():
while True:
continue
try:
pass
finally:
continue # [continue-in-finally]
while True:
try:
pass
finally:
continue # [continue-in-finally]
def test():
while True:
continue
while True:
try:
pass
finally:
for i in range(12):
continue
continue # [continue-in-finally]
while True:
pass
else:
continue # [continue-in-finally]
def test():
continue
while True:
continue
while True:
try:
pass
finally:
if True:
pass
elif False:
continue # [continue-in-finally]
else:
continue # [continue-in-finally]
for i in range(10):
pass
else:
continue # [continue-in-finally]

View File

@@ -1,12 +0,0 @@
import os
tempVar = os.getenv("TEST", 12) # [invalid-envvar-default]
goodVar = os.getenv("TESTING", None)
dictVarBad = os.getenv("AAA", {"a", 7}) # [invalid-envvar-default]
print(os.getenv("TEST", False)) # [invalid-envvar-default]
os.getenv("AA", "GOOD")
os.getenv("AA", f"GOOD")
os.getenv("AA", "GOOD" + "BAD")
os.getenv("AA", "GOOD" + 1)
os.getenv("AA", "GOOD %s" % "BAD")
os.getenv("B", Z)

View File

@@ -1,15 +0,0 @@
import os
os.getenv(1) # [invalid-envvar-value]
os.getenv("a")
os.getenv("test")
os.getenv(key="testingAgain")
os.getenv(key=11) # [invalid-envvar-value]
os.getenv(["hello"]) # [invalid-envvar-value]
os.getenv(key="foo", default="bar")
os.getenv(key=f"foo", default="bar")
os.getenv(key="foo" + "bar", default=1)
os.getenv(key=1 + "bar", default=1) # [invalid-envvar-value]
AA = "aa"
os.getenv(AA)

View File

@@ -11,9 +11,6 @@ if 10 == 100: # [comparison-of-constants] R0133
if 1 == 3: # [comparison-of-constants] R0133
pass
if 1 == -3: # [comparison-of-constants] R0133
pass
x = 0
if 4 == 3 == x: # [comparison-of-constants] R0133
pass
@@ -38,12 +35,6 @@ if argc != 1: # correct
if argc != 2: # [magic-value-comparison]
pass
if argc != -2: # [magic-value-comparison]
pass
if argc != +2: # [magic-value-comparison]
pass
if __name__ == "__main__": # correct
pass

View File

@@ -1,39 +0,0 @@
import sys
def print_python_version():
print(sys.version)
return None # [useless-return]
def print_python_version():
print(sys.version)
return None # [useless-return]
def print_python_version():
print(sys.version)
return None # [useless-return]
class SomeClass:
def print_python_version(self):
print(sys.version)
return None # [useless-return]
def print_python_version():
if 2 * 2 == 4:
return
print(sys.version)
def print_python_version():
if 2 * 2 == 4:
return None
return
def print_python_version():
if 2 * 2 == 4:
return None

View File

@@ -25,7 +25,22 @@ avoid-escape = true
max-complexity = 10
[tool.ruff.pep8-naming]
classmethod-decorators = ["pydantic.validator"]
ignore-names = [
"setUp",
"tearDown",
"setUpClass",
"tearDownClass",
"setUpModule",
"tearDownModule",
"asyncSetUp",
"asyncTearDown",
"setUpTestData",
"failureException",
"longMessage",
"maxDiff",
]
classmethod-decorators = ["classmethod", "pydantic.validator"]
staticmethod-decorators = ["staticmethod"]
[tool.ruff.flake8-tidy-imports]
ban-relative-imports = "parents"

View File

@@ -12,17 +12,11 @@ Ipsum
""".encode(
"utf-8"
)
(
"Lorem "
"Ipsum".encode()
)
(
"Lorem " # Comment
"Ipsum".encode() # Comment
)
(
"Lorem " "Ipsum".encode()
)
# b"""
# Lorem
#
# Ipsum
# """
# `encode` on variables should not be processed.
string = "hello there"

View File

@@ -1,10 +0,0 @@
import socket
from kombu import Connection, exceptions
try:
conn = Connection(settings.CELERY_BROKER_URL)
conn.ensure_connection(max_retries=2)
conn._close()
except (socket.error, exceptions.OperationalError):
return HttpResponseServerError("cache: cannot connect to broker.")

View File

@@ -1,29 +1,24 @@
# Error (`from unittest import mock`)
# These should be changed
if True:
import mock
# Error (`from unittest import mock`)
if True:
import mock, sys
# Error (`from unittest.mock import *`)
if True:
from mock import *
# Error (`from unittest import mock`)
# This goes to from unitest import mock
import mock.mock
# Error (`from unittest import mock`)
# Mock should go on a new line as `from unittest import mock`
import contextlib, mock, sys
# Error (`from unittest import mock`)
# Mock should go on a new line as `from unittest import mock`
import mock, sys
x = "This code should be preserved one line below the mock"
# Error (`from unittest import mock`)
# Mock should go on a new line as `from unittest import mock`
from mock import mock
# Error (keep trailing comma)
# Should keep trailing comma
from mock import (
mock,
a,
@@ -37,7 +32,7 @@ from mock import (
mock,
)
# Error (avoid trailing comma)
# Should not get a trailing comma
from mock import (
mock,
a,
@@ -62,16 +57,16 @@ if True:
c
)
# OK
# These should not change:
import os, io
# Error (`from unittest import mock`)
# Mock should go on a new line as `from unittest import mock`
import mock, mock
# Error (`from unittest import mock as foo`)
# Mock should go on a new line as `from unittest import mock as foo`
import mock as foo
# Error (`from unittest import mock as foo`)
# Mock should go on a new line as `from unittest import mock as foo`
from mock import mock as foo
if True:
@@ -86,8 +81,8 @@ if True:
from mock import mock as foo, mock as bar, mock
# OK.
# This should be unchanged.
x = mock.Mock()
# Error (`mock.Mock()`).
# This should change to `mock.Mock`().
x = mock.mock.Mock()

View File

@@ -41,7 +41,7 @@ if True:
Good,
)
from typing import Callable, Match, Pattern, List, OrderedDict, AbstractSet, ContextManager
from typing import Callable, Match, Pattern, List
if True: from collections import (
Mapping, Counter)

View File

@@ -5,5 +5,3 @@ isinstance(1, int) # OK
issubclass("yes", int) # OK
isinstance(1, int | float) # OK
issubclass("yes", int | str) # OK
isinstance(1, ()) # OK
isinstance(1, (int, *(str, bytes))) # OK

View File

@@ -1,19 +0,0 @@
input = [1, 2, 3]
otherInput = [2, 3, 4]
# OK
zip(input, otherInput) # different inputs
zip(input, otherInput[1:]) # different inputs
zip(input, input[2:]) # not successive
zip(input[:-1], input[2:]) # not successive
list(zip(input, otherInput)) # nested call
zip(input, input[1::2]) # not successive
# Errors
zip(input, input[1:])
zip(input, input[1::1])
zip(input[:-1], input[1:])
zip(input[1:], input[2:])
zip(input[1:-1], input[2:])
list(zip(input, input[1:]))
list(zip(input[:-1], input[1:]))

View File

@@ -1,13 +0,0 @@
# noqa
# noqa # comment
print() # noqa
print() # noqa # comment
print(a) # noqa
print(a) # noqa # comment
# noqa: E501, F821
# noqa: E501, F821 # comment
print() # noqa: E501, F821
print() # noqa: E501, F821 # comment
print(a) # noqa: E501, F821
print(a) # noqa: E501, F821 # comment

View File

@@ -4,7 +4,7 @@ use rustc_hash::FxHashMap;
use rustpython_parser::ast::ExcepthandlerKind::ExceptHandler;
use rustpython_parser::ast::{Stmt, StmtKind};
use crate::types::RefEquality;
use crate::ast::types::RefEquality;
/// Return the common ancestor of `left` and `right` below `stop`, or `None`.
fn common_ancestor<'a>(

View File

@@ -1,14 +1,13 @@
//! An equivalent object hierarchy to the [`Expr`] hierarchy, but with the
//! ability to compare expressions for equality (via [`Eq`] and [`Hash`]).
use num_bigint::BigInt;
use rustpython_parser::ast::{
Alias, Arg, Arguments, Boolop, Cmpop, Comprehension, Constant, Excepthandler,
ExcepthandlerKind, Expr, ExprContext, ExprKind, Keyword, MatchCase, Operator, Pattern,
PatternKind, Stmt, StmtKind, Unaryop, Withitem,
};
use num_bigint::BigInt;
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableExprContext {
Load,

View File

@@ -1,8 +1,8 @@
use rustpython_parser::ast::Expr;
use crate::context::Context;
use crate::helpers::{map_callable, to_call_path};
use crate::scope::{Scope, ScopeKind};
use crate::ast::helpers::{map_callable, to_call_path};
use crate::ast::types::{Scope, ScopeKind};
use crate::checkers::ast::Checker;
const CLASS_METHODS: [&str; 3] = ["__new__", "__init_subclass__", "__class_getitem__"];
const METACLASS_BASES: [(&str, &str); 2] = [("", "type"), ("abc", "ABCMeta")];
@@ -16,7 +16,7 @@ pub enum FunctionType {
/// Classify a function based on its scope, name, and decorators.
pub fn classify(
ctx: &Context,
checker: &Checker,
scope: &Scope,
name: &str,
decorator_list: &[Expr],
@@ -29,12 +29,12 @@ pub fn classify(
if decorator_list.iter().any(|expr| {
// The method is decorated with a static method decorator (like
// `@staticmethod`).
ctx.resolve_call_path(map_callable(expr))
checker
.resolve_call_path(map_callable(expr))
.map_or(false, |call_path| {
call_path.as_slice() == ["", "staticmethod"]
|| staticmethod_decorators
.iter()
.any(|decorator| call_path == to_call_path(decorator))
staticmethod_decorators
.iter()
.any(|decorator| call_path == to_call_path(decorator))
})
}) {
FunctionType::StaticMethod
@@ -42,7 +42,7 @@ pub fn classify(
// Special-case class method, like `__new__`.
|| scope.bases.iter().any(|expr| {
// The class itself extends a known metaclass, so all methods are class methods.
ctx.resolve_call_path(map_callable(expr)).map_or(false, |call_path| {
checker.resolve_call_path(map_callable(expr)).map_or(false, |call_path| {
METACLASS_BASES
.iter()
.any(|(module, member)| call_path.as_slice() == [*module, *member])
@@ -50,8 +50,7 @@ pub fn classify(
})
|| decorator_list.iter().any(|expr| {
// The method is decorated with a class method decorator (like `@classmethod`).
ctx.resolve_call_path(map_callable(expr)).map_or(false, |call_path| {
call_path.as_slice() == ["", "classmethod"] ||
checker.resolve_call_path(map_callable(expr)).map_or(false, |call_path| {
classmethod_decorators
.iter()
.any(|decorator| call_path == to_call_path(decorator))

View File

@@ -2,7 +2,7 @@ use std::hash::Hash;
use rustpython_parser::ast::Expr;
use crate::comparable::ComparableExpr;
use crate::ast::comparable::ComparableExpr;
/// Wrapper around `Expr` that implements `Hash` and `PartialEq`.
pub struct HashableExpr<'a>(&'a Expr);
@@ -30,11 +30,11 @@ impl<'a> From<&'a Expr> for HashableExpr<'a> {
}
impl<'a> HashableExpr<'a> {
pub const fn from_expr(expr: &'a Expr) -> Self {
pub(crate) const fn from_expr(expr: &'a Expr) -> Self {
Self(expr)
}
pub const fn as_expr(&self) -> &'a Expr {
pub(crate) const fn as_expr(&self) -> &'a Expr {
self.0
}
}

View File

@@ -13,13 +13,11 @@ use rustpython_parser::ast::{
use rustpython_parser::{lexer, Mode, StringKind, Tok};
use smallvec::{smallvec, SmallVec};
use crate::context::Context;
use crate::newlines::StrExt;
use crate::scope::{Binding, BindingKind};
use crate::ast::types::{Binding, BindingKind, CallPath, Range};
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
use crate::checkers::ast::Checker;
use crate::source_code::{Generator, Indexer, Locator, Stylist};
use crate::types::{CallPath, Range};
use crate::visitor;
use crate::visitor::Visitor;
/// Create an `Expr` with default location from an `ExprKind`.
pub fn create_expr(node: ExprKind) -> Expr {
@@ -101,16 +99,17 @@ pub fn format_call_path(call_path: &[&str]) -> String {
}
/// Return `true` if the `Expr` contains a reference to `${module}.${target}`.
pub fn contains_call_path(ctx: &Context, expr: &Expr, target: &[&str]) -> bool {
pub fn contains_call_path(checker: &Checker, expr: &Expr, target: &[&str]) -> bool {
any_over_expr(expr, &|expr| {
ctx.resolve_call_path(expr)
checker
.resolve_call_path(expr)
.map_or(false, |call_path| call_path.as_slice() == target)
})
}
/// Return `true` if the `Expr` contains an expression that appears to include a
/// side-effect (like a function call).
pub fn contains_effect(ctx: &Context, expr: &Expr) -> bool {
pub fn contains_effect(checker: &Checker, expr: &Expr) -> bool {
any_over_expr(expr, &|expr| {
// Accept empty initializers.
if let ExprKind::Call {
@@ -126,7 +125,7 @@ pub fn contains_effect(ctx: &Context, expr: &Expr) -> bool {
|| id == "tuple"
|| id == "dict"
|| id == "frozenset")
&& ctx.is_builtin(id);
&& checker.is_builtin(id);
return !is_empty_initializer;
}
}
@@ -654,7 +653,7 @@ pub fn has_comments<T>(located: &Located<T>, locator: &Locator) -> bool {
/// Returns `true` if a [`Range`] includes at least one comment.
pub fn has_comments_in(range: Range, locator: &Locator) -> bool {
for tok in lexer::lex_located(locator.slice(range), Mode::Module, range.location) {
for tok in lexer::lex_located(locator.slice(&range), Mode::Module, range.location) {
match tok {
Ok((_, tok, _)) => {
if matches!(tok, Tok::Comment(..)) {
@@ -670,10 +669,10 @@ pub fn has_comments_in(range: Range, locator: &Locator) -> bool {
}
/// Return `true` if the body uses `locals()`, `globals()`, `vars()`, `eval()`.
pub fn uses_magic_variable_access(ctx: &Context, body: &[Stmt]) -> bool {
pub fn uses_magic_variable_access(checker: &Checker, body: &[Stmt]) -> bool {
any_over_body(body, &|expr| {
if let ExprKind::Call { func, .. } = &expr.node {
ctx.resolve_call_path(func).map_or(false, |call_path| {
checker.resolve_call_path(func).map_or(false, |call_path| {
call_path.as_slice() == ["", "locals"]
|| call_path.as_slice() == ["", "globals"]
|| call_path.as_slice() == ["", "vars"]
@@ -743,7 +742,7 @@ pub fn to_module_path(package: &Path, path: &Path) -> Option<Vec<String>> {
.ok()?
.iter()
.map(Path::new)
.map(Path::file_stem)
.map(std::path::Path::file_stem)
.map(|path| path.and_then(|path| path.to_os_string().into_string().ok()))
.collect::<Option<Vec<String>>>()
}
@@ -801,7 +800,7 @@ where
match &stmt.node {
StmtKind::Raise { exc, cause } => {
self.raises
.push((Range::from(stmt), exc.as_deref(), cause.as_deref()));
.push((Range::from_located(stmt), exc.as_deref(), cause.as_deref()));
}
StmtKind::ClassDef { .. }
| StmtKind::FunctionDef { .. }
@@ -856,7 +855,7 @@ pub fn to_relative(absolute: Location, base: Location) -> Location {
/// Return `true` if a [`Located`] has leading content.
pub fn match_leading_content<T>(located: &Located<T>, locator: &Locator) -> bool {
let range = Range::new(Location::new(located.location.row(), 0), located.location);
let prefix = locator.slice(range);
let prefix = locator.slice(&range);
prefix.chars().any(|char| !char.is_whitespace())
}
@@ -866,7 +865,7 @@ pub fn match_trailing_content<T>(located: &Located<T>, locator: &Locator) -> boo
located.end_location.unwrap(),
Location::new(located.end_location.unwrap().row() + 1, 0),
);
let suffix = locator.slice(range);
let suffix = locator.slice(&range);
for char in suffix.chars() {
if char == '#' {
return false;
@@ -884,7 +883,7 @@ pub fn match_trailing_comment<T>(located: &Located<T>, locator: &Locator) -> Opt
located.end_location.unwrap(),
Location::new(located.end_location.unwrap().row() + 1, 0),
);
let suffix = locator.slice(range);
let suffix = locator.slice(&range);
for (i, char) in suffix.chars().enumerate() {
if char == '#' {
return Some(i);
@@ -942,7 +941,7 @@ pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> Range {
| StmtKind::FunctionDef { .. }
| StmtKind::AsyncFunctionDef { .. }
) {
let contents = locator.slice(stmt);
let contents = locator.slice(&Range::from_located(stmt));
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten()
{
if matches!(tok, Tok::Name { .. }) {
@@ -951,7 +950,7 @@ pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> Range {
}
error!("Failed to find identifier for {:?}", stmt);
}
Range::from(stmt)
Range::from_located(stmt)
}
/// Like `identifier_range`, but accepts a `Binding`.
@@ -969,12 +968,12 @@ pub fn binding_range(binding: &Binding, locator: &Locator) -> Range {
}
}
/// Return the ranges of [`Tok::Name`] tokens within a specified node.
pub fn find_names<'a, T>(
located: &'a Located<T>,
// Return the ranges of `Name` tokens within a specified node.
pub fn find_names<'a, T, U>(
located: &'a Located<T, U>,
locator: &'a Locator,
) -> impl Iterator<Item = Range> + 'a {
let contents = locator.slice(located);
let contents = locator.slice(&Range::from_located(located));
lexer::lex_located(contents, Mode::Module, located.location)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::Name { .. }))
@@ -992,7 +991,7 @@ pub fn excepthandler_name_range(handler: &Excepthandler, locator: &Locator) -> O
match (name, type_) {
(Some(_), Some(type_)) => {
let type_end_location = type_.end_location.unwrap();
let contents = locator.slice(Range::new(type_end_location, body[0].location));
let contents = locator.slice(&Range::new(type_end_location, body[0].location));
let range = lexer::lex_located(contents, Mode::Module, type_end_location)
.flatten()
.tuple_windows()
@@ -1016,7 +1015,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
.expect("Expected body to be non-empty")
.location
};
let contents = locator.slice(Range {
let contents = locator.slice(&Range {
location: handler.location,
end_location: end,
});
@@ -1033,7 +1032,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
/// Find f-strings that don't contain any formatted values in a `JoinedStr`.
pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Range)> {
let contents = locator.slice(expr);
let contents = locator.slice(&Range::from_located(expr));
lexer::lex_located(contents, Mode::Module, expr.location)
.flatten()
.filter_map(|(location, tok, end_location)| match tok {
@@ -1041,7 +1040,7 @@ pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Ran
kind: StringKind::FString | StringKind::RawFString,
..
} => {
let first_char = locator.slice(Range {
let first_char = locator.slice(&Range {
location,
end_location: Location::new(location.row(), location.column() + 1),
});
@@ -1081,7 +1080,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
.expect("Expected body to be non-empty")
.end_location
.unwrap();
let contents = locator.slice(Range {
let contents = locator.slice(&Range {
location: body_end,
end_location: orelse
.first()
@@ -1103,7 +1102,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
/// Return the `Range` of the first `Tok::Colon` token in a `Range`.
pub fn first_colon_range(range: Range, locator: &Locator) -> Option<Range> {
let contents = locator.slice(range);
let contents = locator.slice(&range);
let range = lexer::lex_located(contents, Mode::Module, range.location)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Colon))
@@ -1114,32 +1113,6 @@ pub fn first_colon_range(range: Range, locator: &Locator) -> Option<Range> {
range
}
/// Given a statement, find its "logical end".
///
/// For example: the statement could be following by a trailing semicolon, by an end-of-line
/// comment, or by any number of continuation lines (and then by a comment, and so on).
pub fn end_of_statement(stmt: &Stmt, locator: &Locator) -> Location {
let contents = locator.skip(stmt.end_location.unwrap());
// End-of-file, so just return the end of the statement.
if contents.is_empty() {
return stmt.end_location.unwrap();
}
// Otherwise, find the end of the last line that's "part of" the statement.
for (lineno, line) in contents.universal_newlines().enumerate() {
if line.ends_with('\\') {
continue;
}
return to_absolute(
Location::new(lineno + 1, line.chars().count()),
stmt.end_location.unwrap(),
);
}
unreachable!("Expected to find end-of-statement")
}
/// Return the `Range` of the first `Elif` or `Else` token in an `If` statement.
pub fn elif_else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
let StmtKind::If { body, orelse, .. } = &stmt.node else {
@@ -1159,7 +1132,7 @@ pub fn elif_else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
[stmt, ..] => stmt.location,
_ => return None,
};
let contents = locator.slice(Range::new(start, end));
let contents = locator.slice(&Range::new(start, end));
let range = lexer::lex_located(contents, Mode::Module, start)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Elif | Tok::Else))
@@ -1214,50 +1187,48 @@ pub struct SimpleCallArgs<'a> {
}
impl<'a> SimpleCallArgs<'a> {
pub fn new(
args: impl IntoIterator<Item = &'a Expr>,
keywords: impl IntoIterator<Item = &'a Keyword>,
) -> Self {
let args = args
.into_iter()
.take_while(|arg| !matches!(arg.node, ExprKind::Starred { .. }))
.collect();
pub fn new(args: &'a [Expr], keywords: &'a [Keyword]) -> Self {
let mut result = SimpleCallArgs::default();
let kwargs = keywords
.into_iter()
.filter_map(|keyword| {
let node = &keyword.node;
node.arg.as_ref().map(|arg| (arg.as_ref(), &node.value))
})
.collect();
for arg in args {
match &arg.node {
ExprKind::Starred { .. } => {
break;
}
_ => {
result.args.push(arg);
}
}
}
SimpleCallArgs { args, kwargs }
}
for keyword in keywords {
if let Some(arg) = &keyword.node.arg {
result.kwargs.insert(arg, &keyword.node.value);
}
}
/// Get the argument with the given name.
/// If the argument is not found by name, return
/// `None`.
pub fn keyword_argument(&self, name: &str) -> Option<&'a Expr> {
self.kwargs.get(name).copied()
result
}
/// Get the argument with the given name or position.
/// If the argument is not found with either name or position, return
/// `None`.
pub fn argument(&self, name: &str, position: usize) -> Option<&'a Expr> {
self.keyword_argument(name)
.or_else(|| self.args.get(position).copied())
pub fn get_argument(&self, name: &'a str, position: Option<usize>) -> Option<&'a Expr> {
if let Some(kwarg) = self.kwargs.get(name) {
return Some(kwarg);
}
if let Some(position) = position {
if position < self.args.len() {
return Some(self.args[position]);
}
}
None
}
/// Return the number of positional and keyword arguments.
/// Get the number of positional and keyword arguments used.
pub fn len(&self) -> usize {
self.args.len() + self.kwargs.len()
}
/// Return `true` if there are no positional or keyword arguments.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// Return `true` if the given `Expr` is a potential logging call. Matches
@@ -1281,11 +1252,11 @@ mod tests {
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use crate::helpers::{
use crate::ast::helpers::{
elif_else_range, else_range, first_colon_range, identifier_range, match_trailing_content,
};
use crate::ast::types::Range;
use crate::source_code::Locator;
use crate::types::Range;
#[test]
fn trailing_content() -> Result<()> {

View File

@@ -9,7 +9,7 @@ pub enum LoggingLevel {
}
impl LoggingLevel {
pub fn from_attribute(level: &str) -> Option<Self> {
pub fn from_str(level: &str) -> Option<Self> {
match level {
"debug" => Some(LoggingLevel::Debug),
"critical" => Some(LoggingLevel::Critical),

View File

@@ -1,19 +1,13 @@
pub mod branch_detection;
pub mod cast;
pub mod comparable;
pub mod context;
pub mod function_type;
pub mod hashable;
pub mod helpers;
pub mod logging;
pub mod newlines;
pub mod operations;
pub mod relocate;
pub mod scope;
pub mod source_code;
pub mod str;
pub mod types;
pub mod typing;
pub mod visibility;
pub mod visitor;
pub mod whitespace;

View File

@@ -3,11 +3,11 @@ use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok};
use crate::context::Context;
use crate::helpers::any_over_expr;
use crate::scope::{BindingKind, Scope};
use crate::visitor;
use crate::visitor::Visitor;
use crate::ast::helpers::any_over_expr;
use crate::ast::types::{BindingKind, Scope};
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
use crate::checkers::ast::Checker;
bitflags! {
#[derive(Default)]
@@ -19,7 +19,7 @@ bitflags! {
/// Extract the names bound to a given __all__ assignment.
pub fn extract_all_names(
ctx: &Context,
checker: &Checker,
stmt: &Stmt,
scope: &Scope,
) -> (Vec<String>, AllNamesFlags) {
@@ -38,7 +38,7 @@ pub fn extract_all_names(
}
fn extract_elts<'a>(
ctx: &'a Context,
checker: &'a Checker,
expr: &'a Expr,
) -> (Option<&'a Vec<Expr>>, AllNamesFlags) {
match &expr.node {
@@ -60,7 +60,7 @@ pub fn extract_all_names(
} => {
// Allow `tuple()` and `list()` calls.
if keywords.is_empty() && args.len() <= 1 {
if ctx.resolve_call_path(func).map_or(false, |call_path| {
if checker.resolve_call_path(func).map_or(false, |call_path| {
call_path.as_slice() == ["", "tuple"]
|| call_path.as_slice() == ["", "list"]
}) {
@@ -95,8 +95,8 @@ pub fn extract_all_names(
// Grab the existing bound __all__ values.
if let StmtKind::AugAssign { .. } = &stmt.node {
if let Some(index) = scope.get("__all__") {
if let BindingKind::Export(existing) = &ctx.bindings[*index].kind {
if let Some(index) = scope.bindings.get("__all__") {
if let BindingKind::Export(existing) = &checker.bindings[*index].kind {
names.extend_from_slice(existing);
}
}
@@ -113,7 +113,7 @@ pub fn extract_all_names(
let mut current_right = right;
loop {
// Process the right side, which should be a "real" value.
let (elts, new_flags) = extract_elts(ctx, current_right);
let (elts, new_flags) = extract_elts(checker, current_right);
flags |= new_flags;
if let Some(elts) = elts {
add_to_names(&mut names, elts, &mut flags);
@@ -125,7 +125,7 @@ pub fn extract_all_names(
current_left = left;
current_right = right;
} else {
let (elts, new_flags) = extract_elts(ctx, current_left);
let (elts, new_flags) = extract_elts(checker, current_left);
flags |= new_flags;
if let Some(elts) = elts {
add_to_names(&mut names, elts, &mut flags);
@@ -134,7 +134,7 @@ pub fn extract_all_names(
}
}
} else {
let (elts, new_flags) = extract_elts(ctx, value);
let (elts, new_flags) = extract_elts(checker, value);
flags |= new_flags;
if let Some(elts) = elts {
add_to_names(&mut names, elts, &mut flags);
@@ -348,7 +348,7 @@ pub fn locate_cmpops(contents: &str) -> Vec<LocatedCmpop> {
mod tests {
use rustpython_parser::ast::{Cmpop, Location};
use crate::operations::{locate_cmpops, LocatedCmpop};
use crate::ast::operations::{locate_cmpops, LocatedCmpop};
#[test]
fn locates_cmpops() {

View File

@@ -1,6 +1,6 @@
use rustpython_parser::ast::{Expr, ExprKind, Keyword};
use crate::types::Range;
use crate::ast::types::Range;
fn relocate_keyword(keyword: &mut Keyword, location: Range) {
keyword.location = location.location;

View File

@@ -0,0 +1,279 @@
use std::ops::Deref;
use std::sync::atomic::{AtomicUsize, Ordering};
use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Arguments, Expr, Keyword, Located, Location, Stmt};
fn id() -> usize {
static COUNTER: AtomicUsize = AtomicUsize::new(1);
COUNTER.fetch_add(1, Ordering::Relaxed)
}
#[derive(Clone)]
pub enum Node<'a> {
Stmt(&'a Stmt),
Expr(&'a Expr),
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct Range {
pub location: Location,
pub end_location: Location,
}
impl Range {
pub const fn new(location: Location, end_location: Location) -> Self {
Self {
location,
end_location,
}
}
pub fn from_located<T, U>(located: &Located<T, U>) -> Self {
Range::new(located.location, located.end_location.unwrap())
}
pub fn contains(&self, other: &Range) -> bool {
self.location <= other.location && self.end_location >= other.end_location
}
}
#[derive(Debug)]
pub struct FunctionDef<'a> {
// Properties derived from StmtKind::FunctionDef.
pub name: &'a str,
pub args: &'a Arguments,
pub body: &'a [Stmt],
pub decorator_list: &'a [Expr],
// pub returns: Option<&'a Expr>,
// pub type_comment: Option<&'a str>,
// Scope-specific properties.
// TODO(charlie): Create AsyncFunctionDef to mirror the AST.
pub async_: bool,
pub globals: FxHashMap<&'a str, &'a Stmt>,
}
#[derive(Debug)]
pub struct ClassDef<'a> {
// Properties derived from StmtKind::ClassDef.
pub name: &'a str,
pub bases: &'a [Expr],
pub keywords: &'a [Keyword],
// pub body: &'a [Stmt],
pub decorator_list: &'a [Expr],
// Scope-specific properties.
pub globals: FxHashMap<&'a str, &'a Stmt>,
}
#[derive(Debug)]
pub struct Lambda<'a> {
pub args: &'a Arguments,
pub body: &'a Expr,
}
#[derive(Debug)]
pub enum ScopeKind<'a> {
Class(ClassDef<'a>),
Function(FunctionDef<'a>),
Generator,
Module,
Lambda(Lambda<'a>),
}
#[derive(Debug)]
pub struct Scope<'a> {
pub id: usize,
pub kind: ScopeKind<'a>,
pub import_starred: bool,
pub uses_locals: bool,
/// A map from bound name to binding index, for live bindings.
pub bindings: FxHashMap<&'a str, usize>,
/// A map from bound name to binding index, for bindings that were created
/// in the scope but rebound (and thus overridden) later on in the same
/// scope.
pub rebounds: FxHashMap<&'a str, Vec<usize>>,
}
impl<'a> Scope<'a> {
pub fn new(kind: ScopeKind<'a>) -> Self {
Scope {
id: id(),
kind,
import_starred: false,
uses_locals: false,
bindings: FxHashMap::default(),
rebounds: FxHashMap::default(),
}
}
}
// Pyflakes defines the following binding hierarchy (via inheritance):
// Binding
// ExportBinding
// Annotation
// Argument
// Assignment
// NamedExprAssignment
// Definition
// FunctionDefinition
// ClassDefinition
// Builtin
// Importation
// SubmoduleImportation
// ImportationFrom
// StarImportation
// FutureImportation
#[derive(Clone, Debug, is_macro::Is)]
pub enum BindingKind<'a> {
Annotation,
Argument,
Assignment,
Binding,
LoopVar,
Global,
Nonlocal,
Builtin,
ClassDefinition,
FunctionDefinition,
Export(Vec<String>),
FutureImportation,
StarImportation(Option<usize>, Option<String>),
Importation(&'a str, &'a str),
FromImportation(&'a str, String),
SubmoduleImportation(&'a str, &'a str),
}
#[derive(Debug, Clone)]
pub struct Binding<'a> {
pub kind: BindingKind<'a>,
pub range: Range,
/// The context in which the binding was created.
pub context: ExecutionContext,
/// The statement in which the [`Binding`] was defined.
pub source: Option<RefEquality<'a, Stmt>>,
/// Tuple of (scope index, range) indicating the scope and range at which
/// the binding was last used in a runtime context.
pub runtime_usage: Option<(usize, Range)>,
/// Tuple of (scope index, range) indicating the scope and range at which
/// the binding was last used in a typing-time context.
pub typing_usage: Option<(usize, Range)>,
/// Tuple of (scope index, range) indicating the scope and range at which
/// the binding was last used in a synthetic context. This is used for
/// (e.g.) `__future__` imports, explicit re-exports, and other bindings
/// that should be considered used even if they're never referenced.
pub synthetic_usage: Option<(usize, Range)>,
}
#[derive(Copy, Debug, Clone)]
pub enum ExecutionContext {
Runtime,
Typing,
}
impl<'a> Binding<'a> {
pub fn mark_used(&mut self, scope: usize, range: Range, context: ExecutionContext) {
match context {
ExecutionContext::Runtime => self.runtime_usage = Some((scope, range)),
ExecutionContext::Typing => self.typing_usage = Some((scope, range)),
}
}
pub const fn used(&self) -> bool {
self.runtime_usage.is_some()
|| self.synthetic_usage.is_some()
|| self.typing_usage.is_some()
}
pub const fn is_definition(&self) -> bool {
matches!(
self.kind,
BindingKind::ClassDefinition
| BindingKind::FunctionDefinition
| BindingKind::Builtin
| BindingKind::FutureImportation
| BindingKind::StarImportation(..)
| BindingKind::Importation(..)
| BindingKind::FromImportation(..)
| BindingKind::SubmoduleImportation(..)
)
}
pub fn redefines(&self, existing: &'a Binding) -> bool {
match &self.kind {
BindingKind::Importation(.., full_name) => {
if let BindingKind::SubmoduleImportation(.., existing) = &existing.kind {
return full_name == existing;
}
}
BindingKind::FromImportation(.., full_name) => {
if let BindingKind::SubmoduleImportation(.., existing) = &existing.kind {
return full_name == existing;
}
}
BindingKind::SubmoduleImportation(.., full_name) => match &existing.kind {
BindingKind::Importation(.., existing)
| BindingKind::SubmoduleImportation(.., existing) => {
return full_name == existing;
}
BindingKind::FromImportation(.., existing) => {
return full_name == existing;
}
_ => {}
},
BindingKind::Annotation => {
return false;
}
BindingKind::FutureImportation => {
return false;
}
BindingKind::StarImportation(..) => {
return false;
}
_ => {}
}
existing.is_definition()
}
}
#[derive(Debug, Copy, Clone)]
pub struct RefEquality<'a, T>(pub &'a T);
impl<'a, T> std::hash::Hash for RefEquality<'a, T> {
fn hash<H>(&self, state: &mut H)
where
H: std::hash::Hasher,
{
(self.0 as *const T).hash(state);
}
}
impl<'a, 'b, T> PartialEq<RefEquality<'b, T>> for RefEquality<'a, T> {
fn eq(&self, other: &RefEquality<'b, T>) -> bool {
std::ptr::eq(self.0, other.0)
}
}
impl<'a, T> Eq for RefEquality<'a, T> {}
impl<'a, T> Deref for RefEquality<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.0
}
}
impl<'a> From<&RefEquality<'a, Stmt>> for &'a Stmt {
fn from(r: &RefEquality<'a, Stmt>) -> Self {
r.0
}
}
impl<'a> From<&RefEquality<'a, Expr>> for &'a Expr {
fn from(r: &RefEquality<'a, Expr>) -> Self {
r.0
}
}
pub type CallPath<'a> = smallvec::SmallVec<[&'a str; 8]>;

View File

@@ -1,7 +1,7 @@
use ruff_python_stdlib::typing::{PEP_585_BUILTINS_ELIGIBLE, PEP_593_SUBSCRIPTS, SUBSCRIPTS};
use ruff_python::typing::{PEP_585_BUILTINS_ELIGIBLE, PEP_593_SUBSCRIPTS, SUBSCRIPTS};
use rustpython_parser::ast::{Expr, ExprKind};
use crate::types::CallPath;
use crate::ast::types::CallPath;
pub enum Callable {
ForwardRef,

View File

@@ -1,12 +1,14 @@
use std::str::Lines;
use rustpython_parser::ast::{Located, Location};
use crate::ast::types::Range;
use crate::source_code::Locator;
use crate::types::Range;
/// Extract the leading indentation from a line.
pub fn indentation<'a, T>(locator: &'a Locator, located: &'a Located<T>) -> Option<&'a str> {
let range = Range::from(located);
let indentation = locator.slice(Range::new(
let range = Range::from_located(located);
let indentation = locator.slice(&Range::new(
Location::new(range.location.row(), 0),
Location::new(range.location.row(), range.location.column()),
));
@@ -37,3 +39,38 @@ pub fn clean(indentation: &str) -> String {
.map(|char| if char.is_whitespace() { char } else { ' ' })
.collect()
}
/// Like `str#lines`, but includes a trailing newline as an empty line.
pub struct LinesWithTrailingNewline<'a> {
trailing: Option<&'a str>,
underlying: Lines<'a>,
}
impl<'a> LinesWithTrailingNewline<'a> {
pub fn from(input: &'a str) -> LinesWithTrailingNewline<'a> {
LinesWithTrailingNewline {
underlying: input.lines(),
trailing: if input.ends_with('\n') {
Some("")
} else {
None
},
}
}
}
impl<'a> Iterator for LinesWithTrailingNewline<'a> {
type Item = &'a str;
#[inline]
fn next(&mut self) -> Option<&'a str> {
let mut next = self.underlying.next();
if next.is_none() {
if self.trailing.is_some() {
next = self.trailing;
self.trailing = None;
}
}
next
}
}

View File

@@ -6,14 +6,14 @@ use libcst_native::{
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Fix;
use ruff_python_ast::helpers;
use ruff_python_ast::helpers::to_absolute;
use ruff_python_ast::newlines::NewlineWithTrailingNewline;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use crate::ast::helpers;
use crate::ast::helpers::to_absolute;
use crate::ast::types::Range;
use crate::ast::whitespace::LinesWithTrailingNewline;
use crate::cst::helpers::compose_module_path;
use crate::cst::matchers::match_module;
use crate::fix::Fix;
use crate::source_code::{Indexer, Locator, Stylist};
/// Determine if a body contains only a single statement, taking into account
/// deleted.
@@ -100,7 +100,7 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
/// of a multi-statement line.
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
let contents = locator.skip(stmt.end_location.unwrap());
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
for (row, line) in LinesWithTrailingNewline::from(contents).enumerate() {
let trimmed = line.trim();
if trimmed.starts_with(';') {
let column = line
@@ -123,7 +123,7 @@ fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
fn next_stmt_break(semicolon: Location, locator: &Locator) -> Location {
let start_location = Location::new(semicolon.row(), semicolon.column() + 1);
let contents = locator.skip(start_location);
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
for (row, line) in LinesWithTrailingNewline::from(contents).enumerate() {
let trimmed = line.trim();
// Skip past any continuations.
if trimmed.starts_with('\\') {
@@ -227,7 +227,7 @@ pub fn remove_unused_imports<'a>(
indexer: &Indexer,
stylist: &Stylist,
) -> Result<Fix> {
let module_text = locator.slice(stmt);
let module_text = locator.slice(&Range::from_located(stmt));
let mut tree = match_module(module_text)?;
let Some(Statement::Simple(body)) = tree.body.first_mut() else {
@@ -450,9 +450,8 @@ mod tests {
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use ruff_python_ast::source_code::Locator;
use crate::autofix::helpers::{next_stmt_break, trailing_semicolon};
use crate::source_code::Locator;
#[test]
fn find_semicolon() -> Result<()> {

View File

@@ -4,12 +4,11 @@ use itertools::Itertools;
use rustc_hash::FxHashMap;
use rustpython_parser::ast::Location;
use ruff_diagnostics::{Diagnostic, Fix};
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::ast::types::Range;
use crate::fix::Fix;
use crate::linter::FixTable;
use crate::registry::{AsRule, Rule};
use crate::registry::Diagnostic;
use crate::source_code::Locator;
pub mod helpers;
@@ -39,7 +38,7 @@ fn apply_fixes<'a>(
.as_ref()
.map(|fix| (diagnostic.kind.rule(), fix))
})
.sorted_by(|(rule1, fix1), (rule2, fix2)| cmp_fix(*rule1, *rule2, fix1, fix2))
.sorted_by_key(|(.., fix)| fix.location)
{
// If we already applied an identical fix as part of another correction, skip
// any re-application.
@@ -55,7 +54,7 @@ fn apply_fixes<'a>(
}
// Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(Range::new(last_pos, fix.location));
let slice = locator.slice(&Range::new(last_pos, fix.location));
output.push_str(slice);
// Add the patch itself.
@@ -79,7 +78,7 @@ pub(crate) fn apply_fix(fix: &Fix, locator: &Locator) -> String {
let mut output = String::with_capacity(locator.len());
// Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(Range::new(Location::new(1, 0), fix.location));
let slice = locator.slice(&Range::new(Location::new(1, 0), fix.location));
output.push_str(slice);
// Add the patch itself.
@@ -92,54 +91,47 @@ pub(crate) fn apply_fix(fix: &Fix, locator: &Locator) -> String {
output
}
/// Compare two fixes.
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
fix1.location
.cmp(&fix2.location)
.then_with(|| match (&rule1, &rule2) {
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
(Rule::NewLineAfterLastParagraph, Rule::EndsInPeriod) => std::cmp::Ordering::Greater,
_ => std::cmp::Ordering::Equal,
})
}
#[cfg(test)]
mod tests {
use rustpython_parser::ast::Location;
use ruff_diagnostics::Diagnostic;
use ruff_diagnostics::Fix;
use ruff_python_ast::source_code::Locator;
use crate::autofix::{apply_fix, apply_fixes};
use crate::fix::Fix;
use crate::registry::Diagnostic;
use crate::rules::pycodestyle::rules::NoNewLineAtEndOfFile;
fn create_diagnostics(fixes: impl IntoIterator<Item = Fix>) -> Vec<Diagnostic> {
fixes
.into_iter()
.map(|fix| Diagnostic {
use crate::source_code::Locator;
#[test]
fn empty_file() {
let fixes: Vec<Diagnostic> = vec![];
let locator = Locator::new(r#""#);
let (contents, fixed) = apply_fixes(fixes.iter(), &locator);
assert_eq!(contents, "");
assert_eq!(fixed.values().sum::<usize>(), 0);
}
impl From<Fix> for Diagnostic {
fn from(fix: Fix) -> Self {
Diagnostic {
// The choice of rule here is arbitrary.
kind: NoNewLineAtEndOfFile.into(),
location: fix.location,
end_location: fix.end_location,
fix: Some(fix),
parent: None,
})
.collect()
}
#[test]
fn empty_file() {
let locator = Locator::new(r#""#);
let diagnostics = create_diagnostics([]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(contents, "");
assert_eq!(fixed.values().sum::<usize>(), 0);
}
}
}
#[test]
fn apply_one_replacement() {
let fixes: Vec<Diagnostic> = vec![Fix {
content: "Bar".to_string(),
location: Location::new(1, 8),
end_location: Location::new(1, 14),
}
.into()];
let locator = Locator::new(
r#"
class A(object):
@@ -147,12 +139,7 @@ class A(object):
"#
.trim(),
);
let diagnostics = create_diagnostics([Fix {
content: "Bar".to_string(),
location: Location::new(1, 8),
end_location: Location::new(1, 14),
}]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
let (contents, fixed) = apply_fixes(fixes.iter(), &locator);
assert_eq!(
contents,
r#"
@@ -166,6 +153,12 @@ class A(Bar):
#[test]
fn apply_one_removal() {
let fixes: Vec<Diagnostic> = vec![Fix {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 15),
}
.into()];
let locator = Locator::new(
r#"
class A(object):
@@ -173,12 +166,7 @@ class A(object):
"#
.trim(),
);
let diagnostics = create_diagnostics([Fix {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 15),
}]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
let (contents, fixed) = apply_fixes(fixes.iter(), &locator);
assert_eq!(
contents,
r#"
@@ -192,6 +180,20 @@ class A:
#[test]
fn apply_two_removals() {
let fixes: Vec<Diagnostic> = vec![
Fix {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 16),
}
.into(),
Fix {
content: String::new(),
location: Location::new(1, 16),
end_location: Location::new(1, 23),
}
.into(),
];
let locator = Locator::new(
r#"
class A(object, object):
@@ -199,19 +201,7 @@ class A(object, object):
"#
.trim(),
);
let diagnostics = create_diagnostics([
Fix {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 16),
},
Fix {
content: String::new(),
location: Location::new(1, 16),
end_location: Location::new(1, 23),
},
]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
let (contents, fixed) = apply_fixes(fixes.iter(), &locator);
assert_eq!(
contents,
@@ -226,6 +216,20 @@ class A:
#[test]
fn ignore_overlapping_fixes() {
let fixes: Vec<Diagnostic> = vec![
Fix {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 15),
}
.into(),
Fix {
content: "ignored".to_string(),
location: Location::new(1, 9),
end_location: Location::new(1, 11),
}
.into(),
];
let locator = Locator::new(
r#"
class A(object):
@@ -233,19 +237,7 @@ class A(object):
"#
.trim(),
);
let diagnostics = create_diagnostics([
Fix {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 15),
},
Fix {
content: "ignored".to_string(),
location: Location::new(1, 9),
end_location: Location::new(1, 11),
},
]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
let (contents, fixed) = apply_fixes(fixes.iter(), &locator);
assert_eq!(
contents,
r#"

View File

@@ -1,9 +1,3 @@
mod cache_key;
pub mod filetime;
pub mod globset;
pub use cache_key::{CacheKey, CacheKeyHasher};
use std::path::{Path, PathBuf};
pub const CACHE_DIR_NAME: &str = ".ruff_cache";

View File

@@ -1,25 +0,0 @@
use ruff_python_ast::scope::ScopeStack;
use rustpython_parser::ast::{Expr, Stmt};
use ruff_python_ast::types::Range;
use ruff_python_ast::types::RefEquality;
use ruff_python_ast::visibility::{Visibility, VisibleScope};
use crate::checkers::ast::AnnotationContext;
use crate::docstrings::definition::Definition;
type Context<'a> = (ScopeStack, Vec<RefEquality<'a, Stmt>>);
/// A collection of AST nodes that are deferred for later analysis.
/// Used to, e.g., store functions, whose bodies shouldn't be analyzed until all
/// module-level definitions have been analyzed.
#[derive(Default)]
pub struct Deferred<'a> {
pub definitions: Vec<(Definition<'a>, Visibility, Context<'a>)>,
pub string_type_definitions: Vec<(Range, &'a str, AnnotationContext, Context<'a>)>,
pub type_definitions: Vec<(&'a Expr, AnnotationContext, Context<'a>)>,
pub functions: Vec<(&'a Stmt, Context<'a>, VisibleScope)>,
pub lambdas: Vec<(&'a Expr, Context<'a>)>,
pub for_loops: Vec<(&'a Stmt, Context<'a>)>,
pub assignments: Vec<Context<'a>>,
}

View File

@@ -1,8 +1,6 @@
use std::path::Path;
use ruff_diagnostics::Diagnostic;
use crate::registry::Rule;
use crate::registry::{Diagnostic, Rule};
use crate::rules::flake8_no_pep420::rules::implicit_namespace_package;
use crate::rules::pep8_naming::rules::invalid_module_name;
use crate::settings::Settings;
@@ -15,7 +13,7 @@ pub fn check_file_path(
let mut diagnostics: Vec<Diagnostic> = vec![];
// flake8-no-pep420
if settings.rules.enabled(Rule::ImplicitNamespacePackage) {
if settings.rules.enabled(&Rule::ImplicitNamespacePackage) {
if let Some(diagnostic) =
implicit_namespace_package(path, package, &settings.project_root, &settings.src)
{
@@ -24,7 +22,7 @@ pub fn check_file_path(
}
// pep8-naming
if settings.rules.enabled(Rule::InvalidModuleName) {
if settings.rules.enabled(&Rule::InvalidModuleName) {
if let Some(diagnostic) = invalid_module_name(path, package) {
diagnostics.push(diagnostic);
}

View File

@@ -4,15 +4,13 @@ use std::path::Path;
use rustpython_parser::ast::Suite;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use ruff_python_ast::visitor::Visitor;
use crate::ast::visitor::Visitor;
use crate::directives::IsortDirectives;
use crate::registry::Rule;
use crate::registry::{Diagnostic, Rule};
use crate::rules::isort;
use crate::rules::isort::track::{Block, ImportTracker};
use crate::settings::{flags, Settings};
use crate::source_code::{Indexer, Locator, Stylist};
#[allow(clippy::too_many_arguments)]
pub fn check_imports(
@@ -38,7 +36,7 @@ pub fn check_imports(
// Enforce import rules.
let mut diagnostics = vec![];
if settings.rules.enabled(Rule::UnsortedImports) {
if settings.rules.enabled(&Rule::UnsortedImports) {
for block in &blocks {
if !block.imports.is_empty() {
if let Some(diagnostic) = isort::rules::organize_imports(
@@ -49,7 +47,7 @@ pub fn check_imports(
}
}
}
if settings.rules.enabled(Rule::MissingRequiredImport) {
if settings.rules.enabled(&Rule::MissingRequiredImport) {
diagnostics.extend(isort::rules::add_required_imports(
&blocks, python_ast, locator, stylist, settings, autofix,
));

View File

@@ -1,23 +1,18 @@
#![allow(dead_code, unused_imports, unused_variables)]
use bisection::bisect_left;
use itertools::Itertools;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::LexResult;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_ast::types::Range;
use crate::registry::{AsRule, Rule};
use crate::ast::types::Range;
use crate::registry::Diagnostic;
use crate::rules::pycodestyle::logical_lines::{iter_logical_lines, TokenFlags};
use crate::rules::pycodestyle::rules::{
extraneous_whitespace, indentation, missing_whitespace, missing_whitespace_after_keyword,
missing_whitespace_around_operator, space_around_operator, whitespace_around_keywords,
whitespace_around_named_parameter_equals, whitespace_before_comment,
whitespace_before_parameters,
extraneous_whitespace, indentation, missing_whitespace_after_keyword, space_around_operator,
whitespace_around_keywords, whitespace_around_named_parameter_equals,
whitespace_before_comment,
};
use crate::settings::{flags, Settings};
use crate::settings::Settings;
use crate::source_code::{Locator, Stylist};
/// Return the amount of indentation, expanding tabs to the next multiple of 8.
fn expand_indent(mut line: &str) -> usize {
@@ -45,7 +40,6 @@ pub fn check_logical_lines(
locator: &Locator,
stylist: &Stylist,
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<Diagnostic> {
let mut diagnostics = vec![];
@@ -59,7 +53,7 @@ pub fn check_logical_lines(
// Extract the indentation level.
let start_loc = line.mapping[0].1;
let start_line = locator.slice(Range::new(Location::new(start_loc.row(), 0), start_loc));
let start_line = locator.slice(&Range::new(Location::new(start_loc.row(), 0), start_loc));
let indent_level = expand_indent(start_line);
let indent_size = 4;
@@ -153,44 +147,6 @@ pub fn check_logical_lines(
});
}
}
for (location, kind) in missing_whitespace_around_operator(&line.tokens) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: None,
parent: None,
});
}
}
#[cfg(feature = "logical_lines")]
let should_fix = autofix.into() && settings.rules.should_fix(Rule::MissingWhitespace);
#[cfg(not(feature = "logical_lines"))]
let should_fix = false;
for diagnostic in missing_whitespace(&line.text, start_loc.row(), should_fix) {
if settings.rules.enabled(diagnostic.kind.rule()) {
diagnostics.push(diagnostic);
}
}
}
if line.flags.contains(TokenFlags::BRACKET) {
#[cfg(feature = "logical_lines")]
let should_fix =
autofix.into() && settings.rules.should_fix(Rule::WhitespaceBeforeParameters);
#[cfg(not(feature = "logical_lines"))]
let should_fix = false;
for diagnostic in whitespace_before_parameters(&line.tokens, should_fix) {
if settings.rules.enabled(diagnostic.kind.rule()) {
diagnostics.push(diagnostic);
}
}
}
for (index, kind) in indentation(
@@ -227,9 +183,8 @@ mod tests {
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use ruff_python_ast::source_code::Locator;
use crate::checkers::logical_lines::iter_logical_lines;
use crate::source_code::Locator;
#[test]
fn split_logical_lines() {
@@ -306,7 +261,7 @@ f()"#;
.into_iter()
.map(|line| line.text)
.collect();
let expected = vec!["def f():", "\"xxxxxxxxxxxxxxxxxxxx\"", "", "x = 1", "f()"];
let expected = vec!["def f():", "\"xxx\"", "", "x = 1", "f()"];
assert_eq!(actual, expected);
}
}

View File

@@ -4,14 +4,12 @@ use log::warn;
use nohash_hasher::IntMap;
use rustpython_parser::ast::Location;
use ruff_diagnostics::{Diagnostic, Fix};
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::types::Range;
use crate::ast::types::Range;
use crate::codes::NoqaCode;
use crate::fix::Fix;
use crate::noqa;
use crate::noqa::{extract_file_exemption, Directive, Exemption};
use crate::registry::{AsRule, Rule};
use crate::registry::{Diagnostic, DiagnosticKind, Rule};
use crate::rule_redirects::get_redirect_target;
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
use crate::settings::{flags, Settings};
@@ -23,8 +21,8 @@ pub fn check_noqa(
noqa_line_for: &IntMap<usize, usize>,
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<usize> {
let enforce_noqa = settings.rules.enabled(Rule::UnusedNOQA);
) {
let enforce_noqa = settings.rules.enabled(&Rule::UnusedNOQA);
// Whether the file is exempted from all checks.
let mut file_exempted = false;
@@ -39,7 +37,7 @@ pub fn check_noqa(
// Indices of diagnostics that were ignored by a `noqa` directive.
let mut ignored_diagnostics = vec![];
let lines: Vec<&str> = contents.universal_newlines().collect();
let lines: Vec<&str> = contents.lines().collect();
for lineno in commented_lines {
match extract_file_exemption(lines[lineno - 1]) {
Exemption::All => {
@@ -67,7 +65,7 @@ pub fn check_noqa(
// Remove any ignored diagnostics.
for (index, diagnostic) in diagnostics.iter().enumerate() {
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
if matches!(diagnostic.kind, DiagnosticKind::BlanketNOQA(..)) {
continue;
}
@@ -98,7 +96,7 @@ pub fn check_noqa(
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
(Directive::Codes(.., codes), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
@@ -125,7 +123,7 @@ pub fn check_noqa(
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
(Directive::Codes(.., codes), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
@@ -141,7 +139,7 @@ pub fn check_noqa(
if enforce_noqa {
for (row, (directive, matches)) in noqa_directives {
match directive {
Directive::All(leading_spaces, start_byte, end_byte, trailing_spaces) => {
Directive::All(spaces, start_byte, end_byte) => {
if matches.is_empty() {
let start = lines[row][..start_byte].chars().count();
let end = start + lines[row][start_byte..end_byte].chars().count();
@@ -151,27 +149,15 @@ pub fn check_noqa(
Range::new(Location::new(row + 1, start), Location::new(row + 1, end)),
);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
if start - leading_spaces == 0 && end == lines[row].chars().count() {
diagnostic.amend(Fix::deletion(
Location::new(row + 1, 0),
Location::new(row + 2, 0),
));
} else if end == lines[row].chars().count() {
diagnostic.amend(Fix::deletion(
Location::new(row + 1, start - leading_spaces),
Location::new(row + 1, end + trailing_spaces),
));
} else {
diagnostic.amend(Fix::deletion(
Location::new(row + 1, start),
Location::new(row + 1, end + trailing_spaces),
));
}
diagnostic.amend(Fix::deletion(
Location::new(row + 1, start - spaces),
Location::new(row + 1, lines[row].chars().count()),
));
}
diagnostics.push(diagnostic);
}
}
Directive::Codes(leading_spaces, start_byte, end_byte, codes, trailing_spaces) => {
Directive::Codes(spaces, start_byte, end_byte, codes) => {
let mut disabled_codes = vec![];
let mut unknown_codes = vec![];
let mut unmatched_codes = vec![];
@@ -188,7 +174,7 @@ pub fn check_noqa(
valid_codes.push(code);
} else {
if let Ok(rule) = Rule::from_code(code) {
if settings.rules.enabled(rule) {
if settings.rules.enabled(&rule) {
unmatched_codes.push(code);
} else {
disabled_codes.push(code);
@@ -231,28 +217,15 @@ pub fn check_noqa(
);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
if valid_codes.is_empty() {
if start - leading_spaces == 0 && end == lines[row].chars().count()
{
diagnostic.amend(Fix::deletion(
Location::new(row + 1, 0),
Location::new(row + 2, 0),
));
} else if end == lines[row].chars().count() {
diagnostic.amend(Fix::deletion(
Location::new(row + 1, start - leading_spaces),
Location::new(row + 1, end + trailing_spaces),
));
} else {
diagnostic.amend(Fix::deletion(
Location::new(row + 1, start),
Location::new(row + 1, end + trailing_spaces),
));
}
diagnostic.amend(Fix::deletion(
Location::new(row + 1, start - spaces),
Location::new(row + 1, lines[row].chars().count()),
));
} else {
diagnostic.amend(Fix::replacement(
format!("# noqa: {}", valid_codes.join(", ")),
Location::new(row + 1, start),
Location::new(row + 1, end),
Location::new(row + 1, lines[row].chars().count()),
));
}
}
@@ -265,5 +238,7 @@ pub fn check_noqa(
}
ignored_diagnostics.sort_unstable();
ignored_diagnostics
for index in ignored_diagnostics.iter().rev() {
diagnostics.swap_remove(*index);
}
}

View File

@@ -2,11 +2,7 @@
use std::path::Path;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::source_code::{Locator, Stylist};
use crate::registry::Rule;
use crate::registry::{Diagnostic, Rule};
use crate::rules::flake8_executable::helpers::{extract_shebang, ShebangDirective};
use crate::rules::flake8_executable::rules::{
shebang_missing, shebang_newline, shebang_not_executable, shebang_python, shebang_whitespace,
@@ -19,11 +15,12 @@ use crate::rules::pygrep_hooks::rules::{blanket_noqa, blanket_type_ignore};
use crate::rules::pylint;
use crate::rules::pyupgrade::rules::unnecessary_coding_comment;
use crate::settings::{flags, Settings};
use crate::source_code::Stylist;
pub fn check_physical_lines(
path: &Path,
locator: &Locator,
stylist: &Stylist,
contents: &str,
commented_lines: &[usize],
doc_lines: &[usize],
settings: &Settings,
@@ -32,32 +29,32 @@ pub fn check_physical_lines(
let mut diagnostics: Vec<Diagnostic> = vec![];
let mut has_any_shebang = false;
let enforce_blanket_noqa = settings.rules.enabled(Rule::BlanketNOQA);
let enforce_shebang_not_executable = settings.rules.enabled(Rule::ShebangNotExecutable);
let enforce_shebang_missing = settings.rules.enabled(Rule::ShebangMissingExecutableFile);
let enforce_shebang_whitespace = settings.rules.enabled(Rule::ShebangWhitespace);
let enforce_shebang_newline = settings.rules.enabled(Rule::ShebangNewline);
let enforce_shebang_python = settings.rules.enabled(Rule::ShebangPython);
let enforce_blanket_type_ignore = settings.rules.enabled(Rule::BlanketTypeIgnore);
let enforce_doc_line_too_long = settings.rules.enabled(Rule::DocLineTooLong);
let enforce_line_too_long = settings.rules.enabled(Rule::LineTooLong);
let enforce_no_newline_at_end_of_file = settings.rules.enabled(Rule::NoNewLineAtEndOfFile);
let enforce_unnecessary_coding_comment = settings.rules.enabled(Rule::UTF8EncodingDeclaration);
let enforce_mixed_spaces_and_tabs = settings.rules.enabled(Rule::MixedSpacesAndTabs);
let enforce_bidirectional_unicode = settings.rules.enabled(Rule::BidirectionalUnicode);
let enforce_trailing_whitespace = settings.rules.enabled(Rule::TrailingWhitespace);
let enforce_blanket_noqa = settings.rules.enabled(&Rule::BlanketNOQA);
let enforce_shebang_not_executable = settings.rules.enabled(&Rule::ShebangNotExecutable);
let enforce_shebang_missing = settings.rules.enabled(&Rule::ShebangMissingExecutableFile);
let enforce_shebang_whitespace = settings.rules.enabled(&Rule::ShebangWhitespace);
let enforce_shebang_newline = settings.rules.enabled(&Rule::ShebangNewline);
let enforce_shebang_python = settings.rules.enabled(&Rule::ShebangPython);
let enforce_blanket_type_ignore = settings.rules.enabled(&Rule::BlanketTypeIgnore);
let enforce_doc_line_too_long = settings.rules.enabled(&Rule::DocLineTooLong);
let enforce_line_too_long = settings.rules.enabled(&Rule::LineTooLong);
let enforce_no_newline_at_end_of_file = settings.rules.enabled(&Rule::NoNewLineAtEndOfFile);
let enforce_unnecessary_coding_comment = settings.rules.enabled(&Rule::UTF8EncodingDeclaration);
let enforce_mixed_spaces_and_tabs = settings.rules.enabled(&Rule::MixedSpacesAndTabs);
let enforce_bidirectional_unicode = settings.rules.enabled(&Rule::BidirectionalUnicode);
let enforce_trailing_whitespace = settings.rules.enabled(&Rule::TrailingWhitespace);
let enforce_blank_line_contains_whitespace =
settings.rules.enabled(Rule::BlankLineContainsWhitespace);
let enforce_indentation_contains_tabs = settings.rules.enabled(Rule::IndentationContainsTabs);
settings.rules.enabled(&Rule::BlankLineContainsWhitespace);
let enforce_indentation_contains_tabs = settings.rules.enabled(&Rule::IndentationContainsTabs);
let fix_unnecessary_coding_comment =
autofix.into() && settings.rules.should_fix(Rule::UTF8EncodingDeclaration);
autofix.into() && settings.rules.should_fix(&Rule::UTF8EncodingDeclaration);
let fix_shebang_whitespace =
autofix.into() && settings.rules.should_fix(Rule::ShebangWhitespace);
autofix.into() && settings.rules.should_fix(&Rule::ShebangWhitespace);
let mut commented_lines_iter = commented_lines.iter().peekable();
let mut doc_lines_iter = doc_lines.iter().peekable();
for (index, line) in locator.contents().universal_newlines().enumerate() {
for (index, line) in contents.lines().enumerate() {
while commented_lines_iter
.next_if(|lineno| &(index + 1) == *lineno)
.is_some()
@@ -163,9 +160,9 @@ pub fn check_physical_lines(
if enforce_no_newline_at_end_of_file {
if let Some(diagnostic) = no_newline_at_end_of_file(
locator,
stylist,
autofix.into() && settings.rules.should_fix(Rule::NoNewLineAtEndOfFile),
contents,
autofix.into() && settings.rules.should_fix(&Rule::NoNewLineAtEndOfFile),
) {
diagnostics.push(diagnostic);
}
@@ -182,14 +179,13 @@ pub fn check_physical_lines(
#[cfg(test)]
mod tests {
use std::path::Path;
use ruff_python_ast::source_code::{Locator, Stylist};
use super::check_physical_lines;
use crate::registry::Rule;
use crate::settings::{flags, Settings};
use super::check_physical_lines;
use crate::source_code::{Locator, Stylist};
#[test]
fn e501_non_ascii_char() {
@@ -200,8 +196,8 @@ mod tests {
let check_with_max_line_length = |line_length: usize| {
check_physical_lines(
Path::new("foo.py"),
&locator,
&stylist,
line,
&[],
&[],
&Settings {

View File

@@ -4,62 +4,57 @@ use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::lex::docstring_detection::StateMachine;
use crate::registry::{AsRule, Rule};
use crate::registry::{Diagnostic, Rule};
use crate::rules::ruff::rules::Context;
use crate::rules::{
eradicate, flake8_commas, flake8_implicit_str_concat, flake8_pyi, flake8_quotes, pycodestyle,
pylint, pyupgrade, ruff,
eradicate, flake8_commas, flake8_implicit_str_concat, flake8_quotes, pycodestyle, pyupgrade,
ruff,
};
use crate::settings::{flags, Settings};
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::source_code::Locator;
use crate::source_code::Locator;
pub fn check_tokens(
locator: &Locator,
tokens: &[LexResult],
settings: &Settings,
autofix: flags::Autofix,
is_stub: bool,
) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = vec![];
let enforce_ambiguous_unicode_character = settings.rules.any_enabled(&[
Rule::AmbiguousUnicodeCharacterString,
Rule::AmbiguousUnicodeCharacterDocstring,
Rule::AmbiguousUnicodeCharacterComment,
]);
let enforce_invalid_string_character = settings.rules.any_enabled(&[
Rule::InvalidCharacterBackspace,
Rule::InvalidCharacterSub,
Rule::InvalidCharacterEsc,
Rule::InvalidCharacterNul,
Rule::InvalidCharacterZeroWidthSpace,
]);
let enforce_quotes = settings.rules.any_enabled(&[
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
]);
let enforce_commented_out_code = settings.rules.enabled(Rule::CommentedOutCode);
let enforce_compound_statements = settings.rules.any_enabled(&[
Rule::MultipleStatementsOnOneLineColon,
Rule::MultipleStatementsOnOneLineSemicolon,
Rule::UselessSemicolon,
]);
let enforce_invalid_escape_sequence = settings.rules.enabled(Rule::InvalidEscapeSequence);
let enforce_implicit_string_concatenation = settings.rules.any_enabled(&[
Rule::SingleLineImplicitStringConcatenation,
Rule::MultiLineImplicitStringConcatenation,
]);
let enforce_trailing_comma = settings.rules.any_enabled(&[
Rule::TrailingCommaMissing,
Rule::TrailingCommaOnBareTupleProhibited,
Rule::TrailingCommaProhibited,
]);
let enforce_extraneous_parenthesis = settings.rules.enabled(Rule::ExtraneousParentheses);
let enforce_type_comment_in_stub = settings.rules.enabled(Rule::TypeCommentInStub);
let enforce_ambiguous_unicode_character = settings
.rules
.enabled(&Rule::AmbiguousUnicodeCharacterString)
|| settings
.rules
.enabled(&Rule::AmbiguousUnicodeCharacterDocstring)
|| settings
.rules
.enabled(&Rule::AmbiguousUnicodeCharacterComment);
let enforce_quotes = settings.rules.enabled(&Rule::BadQuotesInlineString)
|| settings.rules.enabled(&Rule::BadQuotesMultilineString)
|| settings.rules.enabled(&Rule::BadQuotesDocstring)
|| settings.rules.enabled(&Rule::AvoidableEscapedQuote);
let enforce_commented_out_code = settings.rules.enabled(&Rule::CommentedOutCode);
let enforce_compound_statements = settings
.rules
.enabled(&Rule::MultipleStatementsOnOneLineColon)
|| settings
.rules
.enabled(&Rule::MultipleStatementsOnOneLineSemicolon)
|| settings.rules.enabled(&Rule::UselessSemicolon);
let enforce_invalid_escape_sequence = settings.rules.enabled(&Rule::InvalidEscapeSequence);
let enforce_implicit_string_concatenation = settings
.rules
.enabled(&Rule::SingleLineImplicitStringConcatenation)
|| settings
.rules
.enabled(&Rule::MultiLineImplicitStringConcatenation);
let enforce_trailing_comma = settings.rules.enabled(&Rule::TrailingCommaMissing)
|| settings
.rules
.enabled(&Rule::TrailingCommaOnBareTupleProhibited)
|| settings.rules.enabled(&Rule::TrailingCommaProhibited);
let enforce_extraneous_parenthesis = settings.rules.enabled(&Rule::ExtraneousParentheses);
// RUF001, RUF002, RUF003
if enforce_ambiguous_unicode_character {
@@ -113,23 +108,11 @@ pub fn check_tokens(
locator,
*start,
*end,
autofix.into() && settings.rules.should_fix(Rule::InvalidEscapeSequence),
autofix.into() && settings.rules.should_fix(&Rule::InvalidEscapeSequence),
));
}
}
}
// PLE2510, PLE2512, PLE2513
if enforce_invalid_string_character {
for (start, tok, end) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) {
diagnostics.extend(
pylint::rules::invalid_string_characters(locator, *start, *end, autofix.into())
.into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
);
}
}
}
// E701, E702, E703
if enforce_compound_statements {
@@ -178,10 +161,5 @@ pub fn check_tokens(
);
}
// PYI033
if enforce_type_comment_in_stub && is_stub {
diagnostics.extend(flake8_pyi::rules::type_comment_in_stub(tokens));
}
diagnostics
}

View File

@@ -1,23 +1,5 @@
use crate::registry::{Linter, Rule};
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct NoqaCode(&'static str, &'static str);
impl std::fmt::Display for NoqaCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{}{}", self.0, self.1)
}
}
impl PartialEq<&str> for NoqaCode {
fn eq(&self, other: &&str) -> bool {
match other.strip_prefix(self.0) {
Some(suffix) => suffix == self.1,
None => false,
}
}
}
#[ruff_macros::map_codes]
pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
#[allow(clippy::enum_glob_use)]
@@ -47,8 +29,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E203") => Rule::WhitespaceBeforePunctuation,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E211") => Rule::WhitespaceBeforeParameters,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E221") => Rule::MultipleSpacesBeforeOperator,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E222") => Rule::MultipleSpacesAfterOperator,
@@ -57,16 +37,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E224") => Rule::TabAfterOperator,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E225") => Rule::MissingWhitespaceAroundOperator,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E226") => Rule::MissingWhitespaceAroundArithmeticOperator,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E227") => Rule::MissingWhitespaceAroundBitwiseOrShiftOperator,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E228") => Rule::MissingWhitespaceAroundModuloOperator,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E231") => Rule::MissingWhitespace,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E251") => Rule::UnexpectedSpacesAroundKeywordParameterEquals,
#[cfg(feature = "logical_lines")]
(Pycodestyle, "E252") => Rule::MissingWhitespaceAroundParameterEquals,
@@ -161,44 +131,34 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Pyflakes, "901") => Rule::RaiseNotImplemented,
// pylint
(Pylint, "C0414") => Rule::UselessImportAlias,
(Pylint, "C1901") => Rule::CompareToEmptyString,
(Pylint, "C3002") => Rule::UnnecessaryDirectLambdaCall,
(Pylint, "E0100") => Rule::YieldInInit,
(Pylint, "E0101") => Rule::ReturnInInit,
(Pylint, "E0116") => Rule::ContinueInFinally,
(Pylint, "E0117") => Rule::NonlocalWithoutBinding,
(Pylint, "E0118") => Rule::UsedPriorGlobalDeclaration,
(Pylint, "E0604") => Rule::InvalidAllObject,
(Pylint, "E0605") => Rule::InvalidAllFormat,
(Pylint, "E1142") => Rule::AwaitOutsideAsync,
(Pylint, "E1205") => Rule::LoggingTooManyArgs,
(Pylint, "E1206") => Rule::LoggingTooFewArgs,
(Pylint, "E1307") => Rule::BadStringFormatType,
(Pylint, "E1310") => Rule::BadStrStripCall,
(Pylint, "E1507") => Rule::InvalidEnvvarValue,
(Pylint, "E2502") => Rule::BidirectionalUnicode,
(Pylint, "E2510") => Rule::InvalidCharacterBackspace,
(Pylint, "E2512") => Rule::InvalidCharacterSub,
(Pylint, "E2513") => Rule::InvalidCharacterEsc,
(Pylint, "E2514") => Rule::InvalidCharacterNul,
(Pylint, "E2515") => Rule::InvalidCharacterZeroWidthSpace,
(Pylint, "R0133") => Rule::ComparisonOfConstant,
(Pylint, "E1310") => Rule::BadStrStripCall,
(Pylint, "C0414") => Rule::UselessImportAlias,
(Pylint, "C3002") => Rule::UnnecessaryDirectLambdaCall,
(Pylint, "E0117") => Rule::NonlocalWithoutBinding,
(Pylint, "E0118") => Rule::UsedPriorGlobalDeclaration,
(Pylint, "E1142") => Rule::AwaitOutsideAsync,
(Pylint, "R0206") => Rule::PropertyWithParameters,
(Pylint, "R0402") => Rule::ConsiderUsingFromImport,
(Pylint, "R0911") => Rule::TooManyReturnStatements,
(Pylint, "R0912") => Rule::TooManyBranches,
(Pylint, "R0913") => Rule::TooManyArguments,
(Pylint, "R0915") => Rule::TooManyStatements,
(Pylint, "R0133") => Rule::ComparisonOfConstant,
(Pylint, "R1701") => Rule::ConsiderMergingIsinstance,
(Pylint, "R1711") => Rule::UselessReturn,
(Pylint, "R1722") => Rule::ConsiderUsingSysExit,
(Pylint, "R2004") => Rule::MagicValueComparison,
(Pylint, "R5501") => Rule::CollapsibleElseIf,
(Pylint, "R2004") => Rule::MagicValueComparison,
(Pylint, "W0120") => Rule::UselessElseOnLoop,
(Pylint, "W0602") => Rule::GlobalVariableNotAssigned,
(Pylint, "W0603") => Rule::GlobalStatement,
(Pylint, "W1508") => Rule::InvalidEnvvarDefault,
(Pylint, "R0911") => Rule::TooManyReturnStatements,
(Pylint, "R0913") => Rule::TooManyArguments,
(Pylint, "R0912") => Rule::TooManyBranches,
(Pylint, "R0915") => Rule::TooManyStatements,
(Pylint, "W2901") => Rule::RedefinedLoopName,
// flake8-builtins
@@ -233,9 +193,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Flake8Bugbear, "025") => Rule::DuplicateTryBlockException,
(Flake8Bugbear, "026") => Rule::StarArgUnpackingAfterKeywordArg,
(Flake8Bugbear, "027") => Rule::EmptyMethodWithoutAbstractDecorator,
(Flake8Bugbear, "028") => Rule::NoExplicitStacklevel,
(Flake8Bugbear, "029") => Rule::ExceptWithEmptyTuple,
(Flake8Bugbear, "030") => Rule::ExceptWithNonExceptionClasses,
(Flake8Bugbear, "032") => Rule::UnintentionalTypeAnnotation,
(Flake8Bugbear, "904") => Rule::RaiseWithoutFromInsideExcept,
(Flake8Bugbear, "905") => Rule::ZipWithoutExplicitStrict,
@@ -383,7 +341,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Pyupgrade, "032") => Rule::FString,
(Pyupgrade, "033") => Rule::FunctoolsCache,
(Pyupgrade, "034") => Rule::ExtraneousParentheses,
(Pyupgrade, "035") => Rule::DeprecatedImport,
(Pyupgrade, "035") => Rule::ImportReplacements,
(Pyupgrade, "036") => Rule::OutdatedVersionBlock,
(Pyupgrade, "037") => Rule::QuotedAnnotation,
(Pyupgrade, "038") => Rule::IsinstanceWithTuple,
@@ -543,7 +501,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Flake8Pyi, "011") => Rule::TypedArgumentSimpleDefaults,
(Flake8Pyi, "014") => Rule::ArgumentSimpleDefaults,
(Flake8Pyi, "021") => Rule::DocstringInStub,
(Flake8Pyi, "033") => Rule::TypeCommentInStub,
// flake8-pytest-style
(Flake8PytestStyle, "001") => Rule::IncorrectFixtureParenthesesStyle,
@@ -668,7 +625,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Ruff, "003") => Rule::AmbiguousUnicodeCharacterComment,
(Ruff, "005") => Rule::UnpackInsteadOfConcatenatingToCollectionLiteral,
(Ruff, "006") => Rule::AsyncioDanglingTask,
(Ruff, "007") => Rule::PairwiseOverZipped,
(Ruff, "100") => Rule::UnusedNOQA,
// flake8-django

View File

@@ -107,21 +107,15 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
// omit a space after the colon. The remaining action comments are
// required to include the space, and must appear on their own lines.
let comment_text = comment_text.trim_end();
if matches!(comment_text, "# isort: split" | "# ruff: isort: split") {
if comment_text == "# isort: split" {
splits.push(start.row());
} else if matches!(
comment_text,
"# isort: skip_file"
| "# isort:skip_file"
| "# ruff: isort: skip_file"
| "# ruff: isort:skip_file"
) {
} else if comment_text == "# isort: skip_file" || comment_text == "# isort:skip_file" {
return IsortDirectives {
skip_file: true,
..IsortDirectives::default()
};
} else if off.is_some() {
if comment_text == "# isort: on" || comment_text == "# ruff: isort: on" {
if comment_text == "# isort: on" {
if let Some(start) = off {
for row in start.row() + 1..=end.row() {
exclusions.insert(row);
@@ -132,7 +126,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
} else {
if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") {
exclusions.insert(start.row());
} else if comment_text == "# isort: off" || comment_text == "# ruff: isort: off" {
} else if comment_text == "# isort: off" {
off = Some(start);
}
}

View File

@@ -7,8 +7,8 @@ use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
/// Extract doc lines (standalone comments) from a token sequence.
pub fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {

View File

@@ -1,6 +1,3 @@
use ruff_python_ast::visibility::{
class_visibility, function_visibility, method_visibility, Modifier, Visibility, VisibleScope,
};
use rustpython_parser::ast::{Expr, Stmt};
#[derive(Debug, Clone)]
@@ -33,36 +30,3 @@ pub enum Documentable {
Class,
Function,
}
pub fn transition_scope(scope: &VisibleScope, stmt: &Stmt, kind: &Documentable) -> VisibleScope {
match kind {
Documentable::Function => VisibleScope {
modifier: Modifier::Function,
visibility: match scope {
VisibleScope {
modifier: Modifier::Module,
visibility: Visibility::Public,
} => function_visibility(stmt),
VisibleScope {
modifier: Modifier::Class,
visibility: Visibility::Public,
} => method_visibility(stmt),
_ => Visibility::Private,
},
},
Documentable::Class => VisibleScope {
modifier: Modifier::Class,
visibility: match scope {
VisibleScope {
modifier: Modifier::Module,
visibility: Visibility::Public,
} => class_visibility(stmt),
VisibleScope {
modifier: Modifier::Class,
visibility: Visibility::Public,
} => class_visibility(stmt),
_ => Visibility::Private,
},
},
}
}

View File

@@ -3,7 +3,7 @@
use rustpython_parser::ast::{Constant, Expr, ExprKind, Stmt, StmtKind};
use crate::docstrings::definition::{Definition, DefinitionKind, Documentable};
use ruff_python_ast::visibility::{Modifier, VisibleScope};
use crate::visibility::{Modifier, VisibleScope};
/// Extract a docstring from a function or class body.
pub fn docstring_from(suite: &[Stmt]) -> Option<&Expr> {

View File

@@ -1,36 +1,70 @@
//! Abstractions for Google-style docstrings.
use crate::docstrings::sections::SectionKind;
use once_cell::sync::Lazy;
use rustc_hash::FxHashSet;
pub(crate) static GOOGLE_SECTIONS: &[SectionKind] = &[
SectionKind::Attributes,
SectionKind::Examples,
SectionKind::Methods,
SectionKind::Notes,
SectionKind::Raises,
SectionKind::References,
SectionKind::Returns,
SectionKind::SeeAlso,
SectionKind::Yields,
// Google-only
SectionKind::Args,
SectionKind::Arguments,
SectionKind::Attention,
SectionKind::Caution,
SectionKind::Danger,
SectionKind::Error,
SectionKind::Example,
SectionKind::Hint,
SectionKind::Important,
SectionKind::KeywordArgs,
SectionKind::KeywordArguments,
SectionKind::Note,
SectionKind::Notes,
SectionKind::Return,
SectionKind::Tip,
SectionKind::Todo,
SectionKind::Warning,
SectionKind::Warnings,
SectionKind::Warns,
SectionKind::Yield,
];
pub(crate) static GOOGLE_SECTION_NAMES: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
FxHashSet::from_iter([
"Args",
"Arguments",
"Attention",
"Attributes",
"Caution",
"Danger",
"Error",
"Example",
"Examples",
"Hint",
"Important",
"Keyword Args",
"Keyword Arguments",
"Methods",
"Note",
"Notes",
"Return",
"Returns",
"Raises",
"References",
"See Also",
"Tip",
"Todo",
"Warning",
"Warnings",
"Warns",
"Yield",
"Yields",
])
});
pub(crate) static LOWERCASE_GOOGLE_SECTION_NAMES: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
FxHashSet::from_iter([
"args",
"arguments",
"attention",
"attributes",
"caution",
"danger",
"error",
"example",
"examples",
"hint",
"important",
"keyword args",
"keyword arguments",
"methods",
"note",
"notes",
"return",
"returns",
"raises",
"references",
"see also",
"tip",
"todo",
"warning",
"warnings",
"warns",
"yield",
"yields",
])
});

View File

@@ -1,20 +1,40 @@
//! Abstractions for NumPy-style docstrings.
use crate::docstrings::sections::SectionKind;
use once_cell::sync::Lazy;
use rustc_hash::FxHashSet;
pub(crate) static NUMPY_SECTIONS: &[SectionKind] = &[
SectionKind::Attributes,
SectionKind::Examples,
SectionKind::Methods,
SectionKind::Notes,
SectionKind::Raises,
SectionKind::References,
SectionKind::Returns,
SectionKind::SeeAlso,
SectionKind::Yields,
// NumPy-only
SectionKind::ExtendedSummary,
SectionKind::OtherParameters,
SectionKind::Parameters,
SectionKind::ShortSummary,
];
pub(crate) static LOWERCASE_NUMPY_SECTION_NAMES: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
FxHashSet::from_iter([
"short summary",
"extended summary",
"parameters",
"returns",
"yields",
"other parameters",
"raises",
"see also",
"notes",
"references",
"examples",
"attributes",
"methods",
])
});
pub(crate) static NUMPY_SECTION_NAMES: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
FxHashSet::from_iter([
"Short Summary",
"Extended Summary",
"Parameters",
"Returns",
"Yields",
"Other Parameters",
"Raises",
"See Also",
"Notes",
"References",
"Examples",
"Attributes",
"Methods",
])
});

View File

@@ -1,126 +1,8 @@
use strum_macros::EnumIter;
use crate::ast::whitespace;
use crate::docstrings::styles::SectionStyle;
use ruff_python_ast::whitespace;
#[derive(EnumIter, PartialEq, Eq, Debug, Clone, Copy)]
pub enum SectionKind {
Args,
Arguments,
Attention,
Attributes,
Caution,
Danger,
Error,
Example,
Examples,
ExtendedSummary,
Hint,
Important,
KeywordArgs,
KeywordArguments,
Methods,
Note,
Notes,
OtherParameters,
Parameters,
Raises,
References,
Return,
Returns,
SeeAlso,
ShortSummary,
Tip,
Todo,
Warning,
Warnings,
Warns,
Yield,
Yields,
}
impl SectionKind {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_ascii_lowercase().as_str() {
"args" => Some(Self::Args),
"arguments" => Some(Self::Arguments),
"attention" => Some(Self::Attention),
"attributes" => Some(Self::Attributes),
"caution" => Some(Self::Caution),
"danger" => Some(Self::Danger),
"error" => Some(Self::Error),
"example" => Some(Self::Example),
"examples" => Some(Self::Examples),
"extended summary" => Some(Self::ExtendedSummary),
"hint" => Some(Self::Hint),
"important" => Some(Self::Important),
"keyword args" => Some(Self::KeywordArgs),
"keyword arguments" => Some(Self::KeywordArguments),
"methods" => Some(Self::Methods),
"note" => Some(Self::Note),
"notes" => Some(Self::Notes),
"other parameters" => Some(Self::OtherParameters),
"parameters" => Some(Self::Parameters),
"raises" => Some(Self::Raises),
"references" => Some(Self::References),
"return" => Some(Self::Return),
"returns" => Some(Self::Returns),
"see also" => Some(Self::SeeAlso),
"short summary" => Some(Self::ShortSummary),
"tip" => Some(Self::Tip),
"todo" => Some(Self::Todo),
"warning" => Some(Self::Warning),
"warnings" => Some(Self::Warnings),
"warns" => Some(Self::Warns),
"yield" => Some(Self::Yield),
"yields" => Some(Self::Yields),
_ => None,
}
}
pub fn as_str(self) -> &'static str {
match self {
Self::Args => "Args",
Self::Arguments => "Arguments",
Self::Attention => "Attention",
Self::Attributes => "Attributes",
Self::Caution => "Caution",
Self::Danger => "Danger",
Self::Error => "Error",
Self::Example => "Example",
Self::Examples => "Examples",
Self::ExtendedSummary => "Extended Summary",
Self::Hint => "Hint",
Self::Important => "Important",
Self::KeywordArgs => "Keyword Args",
Self::KeywordArguments => "Keyword Arguments",
Self::Methods => "Methods",
Self::Note => "Note",
Self::Notes => "Notes",
Self::OtherParameters => "Other Parameters",
Self::Parameters => "Parameters",
Self::Raises => "Raises",
Self::References => "References",
Self::Return => "Return",
Self::Returns => "Returns",
Self::SeeAlso => "See Also",
Self::ShortSummary => "Short Summary",
Self::Tip => "Tip",
Self::Todo => "Todo",
Self::Warning => "Warning",
Self::Warnings => "Warnings",
Self::Warns => "Warns",
Self::Yield => "Yield",
Self::Yields => "Yields",
}
}
}
#[derive(Debug)]
pub(crate) struct SectionContext<'a> {
/// The "kind" of the section, e.g. "SectionKind::Args" or "SectionKind::Returns".
pub(crate) kind: SectionKind,
/// The name of the section as it appears in the docstring, e.g. "Args" or "Returns".
pub(crate) section_name: &'a str,
pub(crate) previous_line: &'a str,
pub(crate) line: &'a str,
@@ -129,13 +11,10 @@ pub(crate) struct SectionContext<'a> {
pub(crate) original_index: usize,
}
fn suspected_as_section(line: &str, style: &SectionStyle) -> Option<SectionKind> {
if let Some(kind) = SectionKind::from_str(whitespace::leading_words(line)) {
if style.sections().contains(&kind) {
return Some(kind);
}
}
None
fn suspected_as_section(line: &str, style: &SectionStyle) -> bool {
style
.lowercase_section_names()
.contains(&whitespace::leading_words(line).to_lowercase().as_str())
}
/// Check if the suspected context is really a section header.
@@ -170,15 +49,21 @@ pub(crate) fn section_contexts<'a>(
lines: &'a [&'a str],
style: &SectionStyle,
) -> Vec<SectionContext<'a>> {
let mut contexts = vec![];
for (kind, lineno) in lines
let suspected_section_indices: Vec<usize> = lines
.iter()
.enumerate()
.skip(1)
.filter_map(|(lineno, line)| suspected_as_section(line, style).map(|kind| (kind, lineno)))
{
.filter_map(|(lineno, line)| {
if lineno > 0 && suspected_as_section(line, style) {
Some(lineno)
} else {
None
}
})
.collect();
let mut contexts = vec![];
for lineno in suspected_section_indices {
let context = SectionContext {
kind,
section_name: whitespace::leading_words(lines[lineno]),
previous_line: lines[lineno - 1],
line: lines[lineno],
@@ -191,12 +76,11 @@ pub(crate) fn section_contexts<'a>(
}
}
let mut truncated_contexts = Vec::with_capacity(contexts.len());
let mut truncated_contexts = vec![];
let mut end: Option<usize> = None;
for context in contexts.into_iter().rev() {
let next_end = context.original_index;
truncated_contexts.push(SectionContext {
kind: context.kind,
section_name: context.section_name,
previous_line: context.previous_line,
line: context.line,

View File

@@ -1,6 +1,8 @@
use crate::docstrings::google::GOOGLE_SECTIONS;
use crate::docstrings::numpy::NUMPY_SECTIONS;
use crate::docstrings::sections::SectionKind;
use once_cell::sync::Lazy;
use rustc_hash::FxHashSet;
use crate::docstrings::google::{GOOGLE_SECTION_NAMES, LOWERCASE_GOOGLE_SECTION_NAMES};
use crate::docstrings::numpy::{LOWERCASE_NUMPY_SECTION_NAMES, NUMPY_SECTION_NAMES};
pub(crate) enum SectionStyle {
Numpy,
@@ -8,10 +10,17 @@ pub(crate) enum SectionStyle {
}
impl SectionStyle {
pub(crate) fn sections(&self) -> &[SectionKind] {
pub(crate) fn section_names(&self) -> &Lazy<FxHashSet<&'static str>> {
match self {
SectionStyle::Numpy => NUMPY_SECTIONS,
SectionStyle::Google => GOOGLE_SECTIONS,
SectionStyle::Numpy => &NUMPY_SECTION_NAMES,
SectionStyle::Google => &GOOGLE_SECTION_NAMES,
}
}
pub(crate) fn lowercase_section_names(&self) -> &Lazy<FxHashSet<&'static str>> {
match self {
SectionStyle::Numpy => &LOWERCASE_NUMPY_SECTION_NAMES,
SectionStyle::Google => &LOWERCASE_GOOGLE_SECTION_NAMES,
}
}
}

View File

@@ -1,3 +1,6 @@
use rustpython_parser::ast::Location;
use serde::{Deserialize, Serialize};
#[derive(Debug, Copy, Clone, Hash)]
pub enum FixMode {
Generate,
@@ -15,3 +18,40 @@ impl From<bool> for FixMode {
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Fix {
pub content: String,
pub location: Location,
pub end_location: Location,
}
impl Fix {
pub const fn deletion(start: Location, end: Location) -> Self {
Self {
content: String::new(),
location: start,
end_location: end,
}
}
pub fn replacement(content: String, start: Location, end: Location) -> Self {
debug_assert!(!content.is_empty(), "Prefer `Fix::deletion`");
Self {
content,
location: start,
end_location: end,
}
}
pub fn insertion(content: String, at: Location) -> Self {
debug_assert!(!content.is_empty(), "Insert content is empty");
Self {
content,
location: at,
end_location: at,
}
}
}

View File

@@ -20,7 +20,6 @@ use crate::rules::{
};
use crate::settings::options::Options;
use crate::settings::pyproject::Pyproject;
use crate::settings::types::PythonVersion;
use crate::warn_user;
const DEFAULT_SELECTORS: &[RuleSelector] = &[
@@ -425,15 +424,6 @@ pub fn convert(
}
}
if let Some(project) = &external_config.project {
if let Some(requires_python) = &project.requires_python {
if options.target_version.is_none() {
options.target_version =
PythonVersion::get_minimum_supported_version(requires_python);
}
}
}
// Create the pyproject.toml.
Ok(Pyproject::new(options))
}
@@ -449,17 +439,13 @@ fn resolve_select(plugins: &[Plugin]) -> HashSet<RuleSelector> {
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::str::FromStr;
use anyhow::Result;
use itertools::Itertools;
use pep440_rs::VersionSpecifiers;
use pretty_assertions::assert_eq;
use super::super::plugin::Plugin;
use super::convert;
use crate::flake8_to_ruff::converter::DEFAULT_SELECTORS;
use crate::flake8_to_ruff::pep621::Project;
use crate::flake8_to_ruff::ExternalConfig;
use crate::registry::Linter;
use crate::rule_selector::RuleSelector;
@@ -467,7 +453,6 @@ mod tests {
use crate::rules::{flake8_quotes, pydocstyle};
use crate::settings::options::Options;
use crate::settings::pyproject::Pyproject;
use crate::settings::types::PythonVersion;
fn default_options(plugins: impl IntoIterator<Item = RuleSelector>) -> Options {
Options {
@@ -592,7 +577,6 @@ mod tests {
pydocstyle: Some(pydocstyle::settings::Options {
convention: Some(Convention::Numpy),
ignore_decorators: None,
property_decorators: None,
}),
..default_options([Linter::Pydocstyle.into()])
});
@@ -624,25 +608,4 @@ mod tests {
Ok(())
}
#[test]
fn it_converts_project_requires_python() -> Result<()> {
let actual = convert(
&HashMap::from([("flake8".to_string(), HashMap::default())]),
&ExternalConfig {
project: Some(&Project {
requires_python: Some(VersionSpecifiers::from_str(">=3.8.16, <3.11")?),
}),
..ExternalConfig::default()
},
Some(vec![]),
)?;
let expected = Pyproject::new(Options {
target_version: Some(PythonVersion::Py38),
..default_options([])
});
assert_eq!(actual, expected);
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More