Compare commits

..

3 Commits

Author SHA1 Message Date
Brent Westbrook
cabdd969ec convert to serializable diagnostics 2025-07-21 16:09:52 -04:00
Brent Westbrook
2e5c8b9799 Revert "custom serializers"
This reverts commit e1219bc27c.
2025-07-21 16:09:43 -04:00
Brent Westbrook
e1219bc27c custom serializers 2025-07-21 16:09:27 -04:00
582 changed files with 12528 additions and 28699 deletions

View File

@@ -143,12 +143,12 @@ jobs:
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
# NOTE: Do not exclude all Markdown files here, but rather use
# specific exclude patterns like 'docs/**'), because tests for
# 'ty' are written in Markdown.
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':**' \
':!**/*.md' \
':crates/ty_python_semantic/resources/mdtest/**/*.md' \
':!docs/**' \
':!assets/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
@@ -429,7 +429,7 @@ jobs:
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@808dcb1b503398677d089d3216c51ac7cc11e7ab # v1.14.2
uses: cargo-bins/cargo-binstall@8aac5aa2bf0dfaa2863eccad9f43c68fe40e5ec8 # v1.14.1
with:
tool: cargo-fuzz@0.11.2
- name: "Install cargo-fuzz"
@@ -451,7 +451,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
name: Download Ruff binary to test
id: download-cached-binary
@@ -652,7 +652,7 @@ jobs:
branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
check_artifacts: true
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: Fuzz
env:
FORCE_COLOR: 1
@@ -682,7 +682,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: cargo-bins/cargo-binstall@808dcb1b503398677d089d3216c51ac7cc11e7ab # v1.14.2
- uses: cargo-bins/cargo-binstall@8aac5aa2bf0dfaa2863eccad9f43c68fe40e5ec8 # v1.14.1
- run: cargo binstall --no-confirm cargo-shear
- run: cargo shear
@@ -722,7 +722,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
with:
@@ -765,7 +765,7 @@ jobs:
- name: "Install Rust toolchain"
run: rustup show
- name: Install uv
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: uv pip install -r docs/requirements-insiders.txt --system
@@ -897,7 +897,7 @@ jobs:
persist-credentials: false
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: "Install Rust toolchain"
run: rustup show
@@ -911,7 +911,7 @@ jobs:
run: cargo codspeed build --features "codspeed,instrumented" --no-default-features -p ruff_benchmark
- name: "Run benchmarks"
uses: CodSpeedHQ/action@0b6e7a3d96c9d2a6057e7bcea6b45aaf2f7ce60b # v3.8.0
uses: CodSpeedHQ/action@c28fe9fbe7d57a3da1b7834ae3761c1d8217612d # v3.7.0
with:
run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}
@@ -930,7 +930,7 @@ jobs:
persist-credentials: false
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: "Install Rust toolchain"
run: rustup show
@@ -944,7 +944,7 @@ jobs:
run: cargo codspeed build --features "codspeed,walltime" --no-default-features -p ruff_benchmark
- name: "Run benchmarks"
uses: CodSpeedHQ/action@0b6e7a3d96c9d2a6057e7bcea6b45aaf2f7ce60b # v3.8.0
uses: CodSpeedHQ/action@c28fe9fbe7d57a3da1b7834ae3761c1d8217612d # v3.7.0
with:
run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}

View File

@@ -34,7 +34,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"

View File

@@ -38,7 +38,7 @@ jobs:
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
with:
@@ -81,7 +81,7 @@ jobs:
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:

View File

@@ -22,7 +22,7 @@ jobs:
id-token: write
steps:
- name: "Install uv"
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
pattern: wheels-*

View File

@@ -65,7 +65,7 @@ jobs:
run: |
git config --global user.name typeshedbot
git config --global user.email '<>'
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: Sync typeshed stubs
run: |
rm -rf "ruff/${VENDORED_TYPESHED}"
@@ -117,7 +117,7 @@ jobs:
with:
persist-credentials: true
ref: ${{ env.UPSTREAM_BRANCH}}
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: Setup git
run: |
git config --global user.name typeshedbot
@@ -155,7 +155,7 @@ jobs:
with:
persist-credentials: true
ref: ${{ env.UPSTREAM_BRANCH}}
- uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- name: Setup git
run: |
git config --global user.name typeshedbot

View File

@@ -33,7 +33,7 @@ jobs:
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
with:
@@ -64,7 +64,7 @@ jobs:
cd ..
uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@27dd66d9e397d986ef9c631119ee09556eab8af9"
uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@f0eec0e549684d8e1d7b8bc3e351202124b63bda"
ecosystem-analyzer \
--repository ruff \

View File

@@ -29,7 +29,7 @@ jobs:
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v6.3.1
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
with:
@@ -49,7 +49,7 @@ jobs:
cd ..
uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@27dd66d9e397d986ef9c631119ee09556eab8af9"
uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@f0eec0e549684d8e1d7b8bc3e351202124b63bda"
ecosystem-analyzer \
--verbose \

View File

@@ -1,109 +0,0 @@
name: Run typing conformance
permissions: {}
on:
pull_request:
paths:
- "crates/ty*/**"
- "crates/ruff_db"
- "crates/ruff_python_ast"
- "crates/ruff_python_parser"
- ".github/workflows/typing_conformance.yaml"
- ".github/workflows/typing_conformance_comment.yaml"
- "Cargo.lock"
- "!**.md"
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
RUST_BACKTRACE: 1
jobs:
typing_conformance:
name: Compute diagnostic diff
runs-on: depot-ubuntu-22.04-32
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: ruff
fetch-depth: 0
persist-credentials: false
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: python/typing
ref: d4f39b27a4a47aac8b6d4019e1b0b5b3156fabdc
path: typing
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v6.4.3
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
with:
workspaces: "ruff"
- name: Install Rust toolchain
run: rustup show
- name: Compute diagnostic diff
shell: bash
run: |
RUFF_DIR="$GITHUB_WORKSPACE/ruff"
# Build the executable for the old and new commit
(
cd ruff
echo "new commit"
git checkout -b new_commit "${{ github.event.pull_request.head.sha }}"
git rev-list --format=%s --max-count=1 new_commit
cargo build --release --bin ty
mv target/release/ty ty-new
echo "old commit (merge base)"
MERGE_BASE="$(git merge-base "$GITHUB_SHA" "origin/$GITHUB_BASE_REF")"
git checkout -b old_commit "$MERGE_BASE"
git rev-list --format=%s --max-count=1 old_commit
cargo build --release --bin ty
mv target/release/ty ty-old
)
(
cd typing/conformance/tests
echo "Running ty on old commit (merge base)"
"$RUFF_DIR/ty-old" check --color=never --output-format=concise . > "$GITHUB_WORKSPACE/old-output.txt" 2>&1 || true
echo "Running ty on new commit"
"$RUFF_DIR/ty-new" check --color=never --output-format=concise . > "$GITHUB_WORKSPACE/new-output.txt" 2>&1 || true
)
if ! diff -u old-output.txt new-output.txt > typing_conformance_diagnostics.diff; then
echo "Differences found between base and PR"
else
echo "No differences found"
touch typing_conformance_diagnostics.diff
fi
echo ${{ github.event.number }} > pr-number
- name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: typing_conformance_diagnostics_diff
path: typing_conformance_diagnostics.diff
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number

View File

@@ -1,97 +0,0 @@
name: PR comment (typing_conformance)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run typing conformance]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The typing_conformance workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download typing_conformance results"
id: download-typing_conformance_diff
if: steps.pr-number.outputs.pr-number
with:
name: typing_conformance_diagnostics_diff
workflow: typing_conformance.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/typing_conformance_diagnostics_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-typing_conformance_diff.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious typing_conformance results that symlink to a secret
# file on this runner
if [[ -L pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff ]]
then
echo "Error: typing_conformance_diagnostics.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment typing_conformance_diagnostics_diff -->' >> comment.txt
echo '## Diagnostic diff on typing conformance tests' >> comment.txt
if [ -s "pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running ty on typing conformance tests</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No changes detected when running ty on typing conformance tests ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment typing_conformance_diagnostics_diff -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@@ -81,10 +81,10 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.12.5
rev: v0.12.4
hooks:
- id: ruff-format
- id: ruff-check
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
types_or: [python, pyi]
require_serial: true

View File

@@ -1,23 +1,5 @@
# Changelog
## 0.12.5
### Preview features
- \[`flake8-use-pathlib`\] Add autofix for `PTH101`, `PTH104`, `PTH105`, `PTH121` ([#19404](https://github.com/astral-sh/ruff/pull/19404))
- \[`ruff`\] Support byte strings (`RUF055`) ([#18926](https://github.com/astral-sh/ruff/pull/18926))
### Bug fixes
- Fix `unreachable` panic in parser ([#19183](https://github.com/astral-sh/ruff/pull/19183))
- \[`flake8-pyi`\] Skip fix if all `Union` members are `None` (`PYI016`) ([#19416](https://github.com/astral-sh/ruff/pull/19416))
- \[`perflint`\] Parenthesize generator expressions (`PERF401`) ([#19325](https://github.com/astral-sh/ruff/pull/19325))
- \[`pylint`\] Handle empty comments after line continuation (`PLR2044`) ([#19405](https://github.com/astral-sh/ruff/pull/19405))
### Rule changes
- \[`pep8-naming`\] Fix `N802` false positives for `CGIHTTPRequestHandler` and `SimpleHTTPRequestHandler` ([#19432](https://github.com/astral-sh/ruff/pull/19432))
## 0.12.4
### Preview features

84
Cargo.lock generated
View File

@@ -261,18 +261,6 @@ version = "2.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
[[package]]
name = "bitvec"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "block-buffer"
version = "0.10.4"
@@ -530,7 +518,7 @@ dependencies = [
"ciborium",
"clap",
"codspeed",
"criterion-plot 0.5.0",
"criterion-plot",
"is-terminal",
"itertools 0.10.5",
"num-traits",
@@ -713,15 +701,15 @@ dependencies = [
[[package]]
name = "criterion"
version = "0.7.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928"
checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot 0.6.0",
"criterion-plot",
"itertools 0.13.0",
"num-traits",
"oorandom",
@@ -742,16 +730,6 @@ dependencies = [
"itertools 0.10.5",
]
[[package]]
name = "criterion-plot"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338"
dependencies = [
"cast",
"itertools 0.13.0",
]
[[package]]
name = "crossbeam"
version = "0.8.4"
@@ -1143,12 +1121,6 @@ dependencies = [
"libc",
]
[[package]]
name = "funty"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "generic-array"
version = "0.14.7"
@@ -1161,9 +1133,9 @@ dependencies = [
[[package]]
name = "get-size-derive2"
version = "0.6.1"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca171f9f8ed2f416ac044de2dc4acde3e356662a14ac990345639653bdc7fc28"
checksum = "028f3cfad7c3e3b1d8d04ef0a1c03576f2d62800803fe1301a4cd262849f2dea"
dependencies = [
"attribute-derive",
"quote",
@@ -1172,9 +1144,9 @@ dependencies = [
[[package]]
name = "get-size2"
version = "0.6.1"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "965bc5c1c5fe05c5bbd398bb9b3f0f14d750261ebdd1af959f2c8a603fedb5ad"
checksum = "3a09c2043819a3def7bfbb4927e7df96aab0da4cfd8824484b22d0c94e84458e"
dependencies = [
"compact_str",
"get-size-derive2",
@@ -2576,12 +2548,6 @@ version = "5.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5"
[[package]]
name = "radium"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
[[package]]
name = "rand"
version = "0.8.5"
@@ -2744,7 +2710,7 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.12.5"
version = "0.12.4"
dependencies = [
"anyhow",
"argfile",
@@ -2861,6 +2827,7 @@ dependencies = [
"anstyle",
"arc-swap",
"camino",
"countme",
"dashmap",
"dunce",
"etcetera",
@@ -2891,7 +2858,6 @@ dependencies = [
"tracing",
"tracing-subscriber",
"ty_static",
"unicode-width 0.2.1",
"web-time",
"zip",
]
@@ -2971,7 +2937,6 @@ version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"memchr",
"ruff_cache",
"ruff_db",
"ruff_linter",
@@ -2997,7 +2962,7 @@ dependencies = [
[[package]]
name = "ruff_linter"
version = "0.12.5"
version = "0.12.4"
dependencies = [
"aho-corasick",
"anyhow",
@@ -3329,7 +3294,7 @@ dependencies = [
[[package]]
name = "ruff_wasm"
version = "0.12.5"
version = "0.12.4"
dependencies = [
"console_error_panic_hook",
"console_log",
@@ -3803,12 +3768,6 @@ dependencies = [
"syn",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tempfile"
version = "3.20.0"
@@ -4223,7 +4182,6 @@ version = "0.0.0"
dependencies = [
"bitflags 2.9.1",
"insta",
"itertools 0.14.0",
"regex",
"ruff_db",
"ruff_python_ast",
@@ -4232,10 +4190,11 @@ dependencies = [
"ruff_source_file",
"ruff_text_size",
"rustc-hash",
"salsa",
"smallvec",
"tracing",
"ty_project",
"ty_python_semantic",
"ty_vendored",
]
[[package]]
@@ -4269,6 +4228,7 @@ dependencies = [
"thiserror 2.0.12",
"toml 0.9.2",
"tracing",
"ty_ide",
"ty_python_semantic",
"ty_vendored",
]
@@ -4279,7 +4239,6 @@ version = "0.0.0"
dependencies = [
"anyhow",
"bitflags 2.9.1",
"bitvec",
"camino",
"colored 3.0.0",
"compact_str",
@@ -4331,13 +4290,10 @@ dependencies = [
"anyhow",
"bitflags 2.9.1",
"crossbeam",
"dunce",
"insta",
"jod-thread",
"libc",
"lsp-server",
"lsp-types",
"regex",
"ruff_db",
"ruff_notebook",
"ruff_python_ast",
@@ -4348,7 +4304,6 @@ dependencies = [
"serde",
"serde_json",
"shellexpand",
"tempfile",
"thiserror 2.0.12",
"tracing",
"tracing-subscriber",
@@ -5137,15 +5092,6 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
[[package]]
name = "wyz"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
dependencies = [
"tap",
]
[[package]]
name = "yansi"
version = "1.0.1"

View File

@@ -57,9 +57,6 @@ assert_fs = { version = "1.1.0" }
argfile = { version = "0.2.0" }
bincode = { version = "2.0.0" }
bitflags = { version = "2.5.0" }
bitvec = { version = "1.0.1", default-features = false, features = [
"alloc",
] }
bstr = { version = "1.9.1" }
cachedir = { version = "0.3.1" }
camino = { version = "1.1.7" }
@@ -73,7 +70,7 @@ console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
compact_str = "0.9.0"
criterion = { version = "0.7.0", default-features = false }
criterion = { version = "0.6.0", default-features = false }
crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" }
dir-test = { version = "0.4.0" }
@@ -83,7 +80,7 @@ etcetera = { version = "0.10.0" }
fern = { version = "0.7.0" }
filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" }
get-size2 = { version = "0.6.0", features = [
get-size2 = { version = "0.5.0", features = [
"derive",
"smallvec",
"hashbrown",

View File

@@ -148,8 +148,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.12.5/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.12.5/install.ps1 | iex"
curl -LsSf https://astral.sh/ruff/0.12.4/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.12.4/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -182,7 +182,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.12.5
rev: v0.12.4
hooks:
# Run the linter.
- id: ruff-check

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.12.5"
version = "0.12.4"
publish = true
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -169,9 +169,6 @@ pub struct AnalyzeGraphCommand {
/// Attempt to detect imports from string literals.
#[clap(long)]
detect_string_imports: bool,
/// The minimum number of dots in a string import to consider it a valid import.
#[clap(long)]
min_dots: Option<usize>,
/// Enable preview mode. Use `--no-preview` to disable.
#[arg(long, overrides_with("no_preview"))]
preview: bool,
@@ -811,7 +808,6 @@ impl AnalyzeGraphCommand {
} else {
None
},
string_imports_min_dots: self.min_dots,
preview: resolve_bool_arg(self.preview, self.no_preview).map(PreviewMode::from),
target_version: self.target_version.map(ast::PythonVersion::from),
..ExplicitConfigOverrides::default()
@@ -1309,7 +1305,6 @@ struct ExplicitConfigOverrides {
show_fixes: Option<bool>,
extension: Option<Vec<ExtensionPair>>,
detect_string_imports: Option<bool>,
string_imports_min_dots: Option<usize>,
}
impl ConfigurationTransformer for ExplicitConfigOverrides {
@@ -1397,9 +1392,6 @@ impl ConfigurationTransformer for ExplicitConfigOverrides {
if let Some(detect_string_imports) = &self.detect_string_imports {
config.analyze.detect_string_imports = Some(*detect_string_imports);
}
if let Some(string_imports_min_dots) = &self.string_imports_min_dots {
config.analyze.string_imports_min_dots = Some(*string_imports_min_dots);
}
config
}

View File

@@ -18,14 +18,12 @@ use rustc_hash::FxHashMap;
use tempfile::NamedTempFile;
use ruff_cache::{CacheKey, CacheKeyHasher};
use ruff_db::diagnostic::Diagnostic;
use ruff_db::diagnostic::{Diagnostic, SerializableDiagnostics};
use ruff_diagnostics::Fix;
use ruff_linter::message::create_lint_diagnostic;
use ruff_linter::package::PackageRoot;
use ruff_linter::{VERSION, warn_user};
use ruff_macros::CacheKey;
use ruff_notebook::NotebookIndex;
use ruff_source_file::SourceFileBuilder;
use ruff_text_size::{TextRange, TextSize};
use ruff_workspace::Settings;
use ruff_workspace::resolver::Resolver;
@@ -345,22 +343,7 @@ impl FileCache {
let diagnostics = if lint.messages.is_empty() {
Vec::new()
} else {
let file = SourceFileBuilder::new(path.to_string_lossy(), &*lint.source).finish();
lint.messages
.iter()
.map(|msg| {
create_lint_diagnostic(
&msg.body,
msg.suggestion.as_ref(),
msg.range,
msg.fix.clone(),
msg.parent,
file.clone(),
msg.noqa_offset,
msg.rule,
)
})
.collect()
lint.messages.to_diagnostics()
};
let notebook_indexes = if let Some(notebook_index) = lint.notebook_index.as_ref() {
FxHashMap::from_iter([(path.to_string_lossy().to_string(), notebook_index.clone())])
@@ -415,7 +398,8 @@ pub(crate) struct LintCacheData {
/// Imports made.
// pub(super) imports: ImportMap,
/// Diagnostic messages.
pub(super) messages: Vec<CacheMessage>,
#[bincode(with_serde)]
pub(super) messages: SerializableDiagnostics,
/// Source code of the file.
///
/// # Notes
@@ -438,30 +422,7 @@ impl LintCacheData {
String::new() // No messages, no need to keep the source!
};
let messages = diagnostics
.iter()
// Parse the kebab-case rule name into a `Rule`. This will fail for syntax errors, so
// this also serves to filter them out, but we shouldn't be caching files with syntax
// errors anyway.
.filter_map(|msg| Some((msg.name().parse().ok()?, msg)))
.map(|(rule, msg)| {
// Make sure that all message use the same source file.
assert_eq!(
msg.expect_ruff_source_file(),
diagnostics.first().unwrap().expect_ruff_source_file(),
"message uses a different source file"
);
CacheMessage {
rule,
body: msg.body().to_string(),
suggestion: msg.first_help_text().map(ToString::to_string),
range: msg.expect_range(),
parent: msg.parent(),
fix: msg.fix().cloned(),
noqa_offset: msg.noqa_offset(),
}
})
.collect();
let messages = SerializableDiagnostics::new(diagnostics);
Self {
messages,

View File

@@ -102,7 +102,7 @@ pub(crate) fn analyze_graph(
// Resolve the per-file settings.
let settings = resolver.resolve(path);
let string_imports = settings.analyze.string_imports;
let string_imports = settings.analyze.detect_string_imports;
let include_dependencies = settings.analyze.include_dependencies.get(path).cloned();
// Skip excluded files.

View File

@@ -279,7 +279,6 @@ mod test {
TextEmitter::default()
.with_show_fix_status(true)
.with_color(false)
.emit(
&mut output,
&diagnostics.inner,

View File

@@ -264,7 +264,6 @@ impl Printer {
.with_show_fix_diff(self.flags.intersects(Flags::SHOW_FIX_DIFF))
.with_show_source(self.format == OutputFormat::Full)
.with_unsafe_fixes(self.unsafe_fixes)
.with_preview(preview)
.emit(writer, &diagnostics.inner, &context)?;
if self.flags.intersects(Flags::SHOW_FIX_SUMMARY) {

View File

@@ -57,40 +57,33 @@ fn dependencies() -> Result<()> {
.write_str(indoc::indoc! {r#"
def f(): pass
"#})?;
root.child("ruff")
.child("e.pyi")
.write_str(indoc::indoc! {r#"
def f() -> None: ...
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": [
"ruff/d.py"
],
"ruff/d.py": [
"ruff/e.py",
"ruff/e.pyi"
],
"ruff/e.py": [],
"ruff/e.pyi": []
}
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": [
"ruff/d.py"
],
"ruff/d.py": [
"ruff/e.py"
],
"ruff/e.py": []
}
----- stderr -----
"#);
----- stderr -----
"###);
});
Ok(())
@@ -204,43 +197,23 @@ fn string_detection() -> Result<()> {
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--detect-string-imports").current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
assert_cmd_snapshot!(command().arg("--detect-string-imports").current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"#);
});
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--detect-string-imports").arg("--min-dots").arg("1").current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"#);
----- stderr -----
"###);
});
Ok(())

View File

@@ -2422,7 +2422,7 @@ requires-python = ">= 3.11"
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.11
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -2734,7 +2734,7 @@ requires-python = ">= 3.11"
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.10
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -3098,7 +3098,7 @@ from typing import Union;foo: Union[int, str] = 1
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.11
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -3478,7 +3478,7 @@ from typing import Union;foo: Union[int, str] = 1
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.11
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -3806,7 +3806,7 @@ from typing import Union;foo: Union[int, str] = 1
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.10
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -4134,7 +4134,7 @@ from typing import Union;foo: Union[int, str] = 1
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.9
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -4419,7 +4419,7 @@ from typing import Union;foo: Union[int, str] = 1
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.9
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
@@ -4757,7 +4757,7 @@ from typing import Union;foo: Union[int, str] = 1
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.10
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}

View File

@@ -392,7 +392,7 @@ formatter.docstring_code_line_width = dynamic
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = 3.7
analyze.string_imports = disabled
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}

View File

@@ -25,6 +25,7 @@ ty_static = { workspace = true }
anstyle = { workspace = true }
arc-swap = { workspace = true }
camino = { workspace = true }
countme = { workspace = true }
dashmap = { workspace = true }
dunce = { workspace = true }
filetime = { workspace = true }
@@ -42,7 +43,6 @@ serde_json = { workspace = true, optional = true }
thiserror = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, optional = true }
unicode-width = { workspace = true }
zip = { workspace = true }
[target.'cfg(target_arch="wasm32")'.dependencies]
@@ -59,11 +59,6 @@ tempfile = { workspace = true }
cache = ["ruff_cache"]
junit = ["dep:quick-junit"]
os = ["ignore", "dep:etcetera"]
serde = [
"camino/serde1",
"dep:serde",
"dep:serde_json",
"ruff_diagnostics/serde",
]
serde = ["camino/serde1", "dep:serde", "dep:serde_json", "ruff_diagnostics/serde"]
# Exposes testing utilities.
testing = ["tracing-subscriber"]

View File

@@ -1,16 +1,17 @@
use std::{fmt::Formatter, path::Path, sync::Arc};
use ruff_diagnostics::{Applicability, Fix};
use ruff_diagnostics::Fix;
use ruff_source_file::{LineColumn, SourceCode, SourceFile};
use ruff_annotate_snippets::Level as AnnotateLevel;
use ruff_text_size::{Ranged, TextRange, TextSize};
pub use self::render::{
DisplayDiagnostic, DisplayDiagnostics, FileResolver, Input, ceil_char_boundary,
};
pub use self::render::{DisplayDiagnostic, DisplayDiagnostics, FileResolver, Input};
use crate::{Db, files::File};
#[cfg(feature = "serde")]
pub use serde_diagnostics::SerializableDiagnostics;
mod render;
mod stylesheet;
@@ -124,14 +125,7 @@ impl Diagnostic {
/// directly. If callers want or need to avoid cloning the diagnostic
/// message, then they can also pass a `DiagnosticMessage` directly.
pub fn info<'a>(&mut self, message: impl IntoDiagnosticMessage + 'a) {
self.sub(SubDiagnostic::new(SubDiagnosticSeverity::Info, message));
}
/// Adds a "help" sub-diagnostic with the given message.
///
/// See the closely related [`Diagnostic::info`] method for more details.
pub fn help<'a>(&mut self, message: impl IntoDiagnosticMessage + 'a) {
self.sub(SubDiagnostic::new(SubDiagnosticSeverity::Help, message));
self.sub(SubDiagnostic::new(Severity::Info, message));
}
/// Adds a "sub" diagnostic to this diagnostic.
@@ -386,15 +380,9 @@ impl Diagnostic {
self.primary_message()
}
/// Returns the message of the first sub-diagnostic with a `Help` severity.
///
/// Note that this is used as the fix title/suggestion for some of Ruff's output formats, but in
/// general this is not the guaranteed meaning of such a message.
pub fn first_help_text(&self) -> Option<&str> {
self.sub_diagnostics()
.iter()
.find(|sub| matches!(sub.inner.severity, SubDiagnosticSeverity::Help))
.map(|sub| sub.inner.message.as_str())
/// Returns the fix suggestion for the violation.
pub fn suggestion(&self) -> Option<&str> {
self.primary_annotation()?.get_message()
}
/// Returns the URL for the rule documentation, if it exists.
@@ -580,10 +568,7 @@ impl SubDiagnostic {
/// Callers can pass anything that implements `std::fmt::Display`
/// directly. If callers want or need to avoid cloning the diagnostic
/// message, then they can also pass a `DiagnosticMessage` directly.
pub fn new<'a>(
severity: SubDiagnosticSeverity,
message: impl IntoDiagnosticMessage + 'a,
) -> SubDiagnostic {
pub fn new<'a>(severity: Severity, message: impl IntoDiagnosticMessage + 'a) -> SubDiagnostic {
let inner = Box::new(SubDiagnosticInner {
severity,
message: message.into_diagnostic_message(),
@@ -661,7 +646,7 @@ impl SubDiagnostic {
#[derive(Debug, Clone, Eq, PartialEq, get_size2::GetSize)]
struct SubDiagnosticInner {
severity: SubDiagnosticSeverity,
severity: Severity,
message: DiagnosticMessage,
annotations: Vec<Annotation>,
}
@@ -808,6 +793,7 @@ impl Annotation {
/// These tags are used to provide additional information about the annotation.
/// and are passed through to the language server protocol.
#[derive(Debug, Clone, Eq, PartialEq, get_size2::GetSize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DiagnosticTag {
/// Unused or unnecessary code. Used for unused parameters, unreachable code, etc.
Unnecessary,
@@ -822,6 +808,7 @@ pub enum DiagnosticTag {
///
/// Rules use kebab case, e.g. `no-foo`.
#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, get_size2::GetSize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
pub struct LintName(&'static str);
impl LintName {
@@ -862,6 +849,7 @@ impl PartialEq<&str> for LintName {
/// Uniquely identifies the kind of a diagnostic.
#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, get_size2::GetSize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DiagnosticId {
Panic,
@@ -1159,6 +1147,7 @@ impl From<crate::files::FileRange> for Span {
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, get_size2::GetSize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Severity {
Info,
Warning,
@@ -1188,32 +1177,6 @@ impl Severity {
}
}
/// Like [`Severity`] but exclusively for sub-diagnostics.
///
/// This type only exists to add an additional `Help` severity that isn't present in `Severity` or
/// used for main diagnostics. If we want to add `Severity::Help` in the future, this type could be
/// deleted and the two combined again.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, get_size2::GetSize)]
pub enum SubDiagnosticSeverity {
Help,
Info,
Warning,
Error,
Fatal,
}
impl SubDiagnosticSeverity {
fn to_annotate(self) -> AnnotateLevel {
match self {
SubDiagnosticSeverity::Help => AnnotateLevel::Help,
SubDiagnosticSeverity::Info => AnnotateLevel::Info,
SubDiagnosticSeverity::Warning => AnnotateLevel::Warning,
SubDiagnosticSeverity::Error => AnnotateLevel::Error,
SubDiagnosticSeverity::Fatal => AnnotateLevel::Error,
}
}
}
/// Configuration for rendering diagnostics.
#[derive(Clone, Debug)]
pub struct DisplayDiagnosticConfig {
@@ -1240,15 +1203,6 @@ pub struct DisplayDiagnosticConfig {
reason = "This is currently only used for JSON but will be needed soon for other formats"
)]
preview: bool,
/// Whether to hide the real `Severity` of diagnostics.
///
/// This is intended for temporary use by Ruff, which only has a single `error` severity at the
/// moment. We should be able to remove this option when Ruff gets more severities.
hide_severity: bool,
/// Whether to show the availability of a fix in a diagnostic.
show_fix_status: bool,
/// The lowest applicability that should be shown when reporting diagnostics.
fix_applicability: Applicability,
}
impl DisplayDiagnosticConfig {
@@ -1277,35 +1231,6 @@ impl DisplayDiagnosticConfig {
..self
}
}
/// Whether to hide a diagnostic's severity or not.
pub fn hide_severity(self, yes: bool) -> DisplayDiagnosticConfig {
DisplayDiagnosticConfig {
hide_severity: yes,
..self
}
}
/// Whether to show a fix's availability or not.
pub fn show_fix_status(self, yes: bool) -> DisplayDiagnosticConfig {
DisplayDiagnosticConfig {
show_fix_status: yes,
..self
}
}
/// Set the lowest fix applicability that should be shown.
///
/// In other words, an applicability of `Safe` (the default) would suppress showing fixes or fix
/// availability for unsafe or display-only fixes.
///
/// Note that this option is currently ignored when `hide_severity` is false.
pub fn fix_applicability(self, applicability: Applicability) -> DisplayDiagnosticConfig {
DisplayDiagnosticConfig {
fix_applicability: applicability,
..self
}
}
}
impl Default for DisplayDiagnosticConfig {
@@ -1315,9 +1240,6 @@ impl Default for DisplayDiagnosticConfig {
color: false,
context: 2,
preview: false,
hide_severity: false,
show_fix_status: false,
fix_applicability: Applicability::Safe,
}
}
}
@@ -1429,6 +1351,7 @@ impl std::fmt::Display for ConciseMessage<'_> {
/// a blanket trait implementation for `IntoDiagnosticMessage` for
/// anything that implements `std::fmt::Display`.
#[derive(Clone, Debug, Eq, PartialEq, get_size2::GetSize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct DiagnosticMessage(Box<str>);
impl DiagnosticMessage {
@@ -1492,7 +1415,11 @@ impl<T: std::fmt::Display> IntoDiagnosticMessage for T {
///
/// For Ruff rules this means the noqa code.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash, get_size2::GetSize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize), serde(transparent))]
#[cfg_attr(
feature = "serde",
derive(serde::Serialize, serde::Deserialize),
serde(transparent)
)]
pub struct SecondaryCode(String);
impl SecondaryCode {
@@ -1537,3 +1464,205 @@ impl From<&SecondaryCode> for SecondaryCode {
value.clone()
}
}
#[cfg(feature = "serde")]
mod serde_diagnostics {
use std::sync::Arc;
use ruff_diagnostics::Fix;
use ruff_source_file::{SourceFile, SourceFileBuilder};
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use super::{
Annotation, Diagnostic, DiagnosticId, DiagnosticInner, DiagnosticMessage, DiagnosticTag,
LintName, SecondaryCode, Severity, Span, SubDiagnostic, SubDiagnosticInner, UnifiedFile,
};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct SerializableDiagnostics {
source_files: FxHashMap<String, String>,
diagnostics: Vec<SerializableDiagnostic>,
}
impl SerializableDiagnostics {
pub fn new(diagnostics: &[Diagnostic]) -> Self {
let mut source_files = FxHashMap::default();
let mut serializable_diagnostics = Vec::with_capacity(diagnostics.len());
for diagnostic in diagnostics {
let mut subs = Vec::with_capacity(diagnostic.inner.subs.len());
for sub in &diagnostic.inner.subs {
subs.push(SerializableSubDiagnostic {
severity: sub.inner.severity,
message: sub.inner.message.clone(),
annotations: serializable_annotations(
&mut source_files,
&sub.inner.annotations,
),
});
}
serializable_diagnostics.push(SerializableDiagnostic {
id: diagnostic.inner.id,
severity: diagnostic.inner.severity,
message: diagnostic.inner.message.clone(),
annotations: serializable_annotations(
&mut source_files,
&diagnostic.inner.annotations,
),
subs,
fix: diagnostic.inner.fix.clone(),
parent: diagnostic.inner.parent,
noqa_offset: diagnostic.inner.noqa_offset,
secondary_code: diagnostic.inner.secondary_code.clone(),
});
}
Self {
source_files,
diagnostics: serializable_diagnostics,
}
}
pub fn is_empty(&self) -> bool {
self.diagnostics.is_empty()
}
pub fn to_diagnostics(&self) -> Vec<Diagnostic> {
let source_files: FxHashMap<&str, SourceFile> = self
.source_files
.iter()
.map(|(name, contents)| {
(
name.as_str(),
SourceFileBuilder::new(name.clone(), contents.clone()).finish(),
)
})
.collect();
self.diagnostics
.iter()
.map(|diag| Diagnostic {
inner: Arc::new(DiagnosticInner {
id: diag.id,
severity: diag.severity,
message: diag.message.clone(),
annotations: annotations(&source_files, &diag.annotations),
subs: subdiagnostics(&source_files, &diag.subs),
fix: diag.fix.clone(),
parent: diag.parent,
noqa_offset: diag.noqa_offset,
secondary_code: diag.secondary_code.clone(),
}),
})
.collect()
}
}
fn serializable_annotations(
source_files: &mut FxHashMap<String, String>,
annotations: &[Annotation],
) -> Vec<SerializableAnnotation> {
let mut serializable_annotations = Vec::with_capacity(annotations.len());
for annotation in annotations {
let file = annotation.span.expect_ruff_file();
source_files.insert(file.name().to_string(), file.source_text().to_string());
serializable_annotations.push(SerializableAnnotation {
span: SerializableSpan {
name: file.name().to_string(),
range: annotation.span.range,
},
message: annotation.message.clone(),
is_primary: annotation.is_primary,
tags: annotation.tags.clone(),
});
}
serializable_annotations
}
fn annotations(
source_files: &FxHashMap<&str, SourceFile>,
serializable_annotations: &[SerializableAnnotation],
) -> Vec<Annotation> {
serializable_annotations
.iter()
.map(|ann| {
let span = Span {
file: UnifiedFile::Ruff(
source_files
.get(ann.span.name.as_str())
.expect("Expected source file in cache")
.clone(),
),
range: ann.span.range,
};
Annotation {
span,
message: ann.message.clone(),
is_primary: ann.is_primary,
tags: ann.tags.clone(),
}
})
.collect()
}
fn subdiagnostics(
source_files: &FxHashMap<&str, SourceFile>,
serializable_annotations: &[SerializableSubDiagnostic],
) -> Vec<SubDiagnostic> {
serializable_annotations
.iter()
.map(|sub| SubDiagnostic {
inner: Box::new(SubDiagnosticInner {
severity: sub.severity,
message: sub.message.clone(),
annotations: annotations(source_files, &sub.annotations),
}),
})
.collect()
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct SerializableDiagnostic {
id: DiagnosticId,
severity: Severity,
message: DiagnosticMessage,
annotations: Vec<SerializableAnnotation>,
subs: Vec<SerializableSubDiagnostic>,
fix: Option<Fix>,
parent: Option<TextSize>,
noqa_offset: Option<TextSize>,
secondary_code: Option<SecondaryCode>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct SerializableAnnotation {
span: SerializableSpan,
message: Option<DiagnosticMessage>,
is_primary: bool,
tags: Vec<DiagnosticTag>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct SerializableSubDiagnostic {
severity: Severity,
message: DiagnosticMessage,
annotations: Vec<SerializableAnnotation>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct SerializableSpan {
name: String,
range: Option<TextRange>,
}
impl<'de> Deserialize<'de> for LintName {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?.into_boxed_str();
Ok(LintName::of(Box::leak(s)))
}
}
}

View File

@@ -1,4 +1,3 @@
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::path::Path;
@@ -8,9 +7,9 @@ use ruff_annotate_snippets::{
};
use ruff_notebook::{Notebook, NotebookIndex};
use ruff_source_file::{LineIndex, OneIndexed, SourceCode};
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_text_size::{TextRange, TextSize};
use crate::diagnostic::stylesheet::DiagnosticStylesheet;
use crate::diagnostic::stylesheet::{DiagnosticStylesheet, fmt_styled};
use crate::{
Db,
files::File,
@@ -19,17 +18,14 @@ use crate::{
};
use super::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticSource, DisplayDiagnosticConfig,
Annotation, Diagnostic, DiagnosticFormat, DiagnosticSource, DisplayDiagnosticConfig, Severity,
SubDiagnostic, UnifiedFile,
};
use azure::AzureRenderer;
use concise::ConciseRenderer;
use pylint::PylintRenderer;
mod azure;
mod concise;
mod full;
#[cfg(feature = "serde")]
mod json;
#[cfg(feature = "serde")]
@@ -108,7 +104,48 @@ impl std::fmt::Display for DisplayDiagnostics<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self.config.format {
DiagnosticFormat::Concise => {
ConciseRenderer::new(self.resolver, self.config).render(f, self.diagnostics)?;
let stylesheet = if self.config.color {
DiagnosticStylesheet::styled()
} else {
DiagnosticStylesheet::plain()
};
for diag in self.diagnostics {
let (severity, severity_style) = match diag.severity() {
Severity::Info => ("info", stylesheet.info),
Severity::Warning => ("warning", stylesheet.warning),
Severity::Error => ("error", stylesheet.error),
Severity::Fatal => ("fatal", stylesheet.error),
};
write!(
f,
"{severity}[{id}]",
severity = fmt_styled(severity, severity_style),
id = fmt_styled(diag.id(), stylesheet.emphasis)
)?;
if let Some(span) = diag.primary_span() {
write!(
f,
" {path}",
path = fmt_styled(span.file().path(self.resolver), stylesheet.emphasis)
)?;
if let Some(range) = span.range() {
let diagnostic_source = span.file().diagnostic_source(self.resolver);
let start = diagnostic_source
.as_source_code()
.line_column(range.start());
write!(
f,
":{line}:{col}",
line = fmt_styled(start.line, stylesheet.emphasis),
col = fmt_styled(start.column, stylesheet.emphasis),
)?;
}
write!(f, ":")?;
}
writeln!(f, " {message}", message = diag.concise_message())?;
}
}
DiagnosticFormat::Full => {
let stylesheet = if self.config.color {
@@ -219,7 +256,7 @@ impl<'a> Resolved<'a> {
/// both.)
#[derive(Debug)]
struct ResolvedDiagnostic<'a> {
level: AnnotateLevel,
severity: Severity,
id: Option<String>,
message: String,
annotations: Vec<ResolvedAnnotation<'a>>,
@@ -244,7 +281,7 @@ impl<'a> ResolvedDiagnostic<'a> {
let id = Some(diag.inner.id.to_string());
let message = diag.inner.message.as_str().to_string();
ResolvedDiagnostic {
level: diag.inner.severity.to_annotate(),
severity: diag.inner.severity,
id,
message,
annotations,
@@ -267,7 +304,7 @@ impl<'a> ResolvedDiagnostic<'a> {
})
.collect();
ResolvedDiagnostic {
level: diag.inner.severity.to_annotate(),
severity: diag.inner.severity,
id: None,
message: diag.inner.message.as_str().to_string(),
annotations,
@@ -334,7 +371,7 @@ impl<'a> ResolvedDiagnostic<'a> {
snippets_by_input
.sort_by(|snips1, snips2| snips1.has_primary.cmp(&snips2.has_primary).reverse());
RenderableDiagnostic {
level: self.level,
severity: self.severity,
id: self.id.as_deref(),
message: &self.message,
snippets_by_input,
@@ -422,7 +459,7 @@ struct Renderable<'r> {
#[derive(Debug)]
struct RenderableDiagnostic<'r> {
/// The severity of the diagnostic.
level: AnnotateLevel,
severity: Severity,
/// The ID of the diagnostic. The ID can usually be used on the CLI or in a
/// config file to change the severity of a lint.
///
@@ -441,6 +478,7 @@ struct RenderableDiagnostic<'r> {
impl RenderableDiagnostic<'_> {
/// Convert this to an "annotate" snippet.
fn to_annotate(&self) -> AnnotateMessage<'_> {
let level = self.severity.to_annotate();
let snippets = self.snippets_by_input.iter().flat_map(|snippets| {
let path = snippets.path;
snippets
@@ -448,7 +486,7 @@ impl RenderableDiagnostic<'_> {
.iter()
.map(|snippet| snippet.to_annotate(path))
});
let mut message = self.level.title(self.message);
let mut message = level.title(self.message);
if let Some(id) = self.id {
message = message.id(id);
}
@@ -521,7 +559,7 @@ impl<'r> RenderableSnippets<'r> {
#[derive(Debug)]
struct RenderableSnippet<'r> {
/// The actual snippet text.
snippet: Cow<'r, str>,
snippet: &'r str,
/// The absolute line number corresponding to where this
/// snippet begins.
line_start: OneIndexed,
@@ -581,13 +619,6 @@ impl<'r> RenderableSnippet<'r> {
.iter()
.map(|ann| RenderableAnnotation::new(snippet_start, ann))
.collect();
let EscapedSourceCode {
text: snippet,
annotations,
} = replace_whitespace_and_unprintable(snippet, annotations)
.fix_up_empty_spans_after_line_terminator();
RenderableSnippet {
snippet,
line_start,
@@ -598,7 +629,7 @@ impl<'r> RenderableSnippet<'r> {
/// Convert this to an "annotate" snippet.
fn to_annotate<'a>(&'a self, path: &'a str) -> AnnotateSnippet<'a> {
AnnotateSnippet::source(&self.snippet)
AnnotateSnippet::source(self.snippet)
.origin(path)
.line_start(self.line_start.get())
.annotations(
@@ -828,239 +859,12 @@ fn relativize_path<'p>(cwd: &SystemPath, path: &'p str) -> &'p str {
path
}
/// Given some source code and annotation ranges, this routine replaces tabs
/// with ASCII whitespace, and unprintable characters with printable
/// representations of them.
///
/// The source code and annotations returned are updated to reflect changes made
/// to the source code (if any).
fn replace_whitespace_and_unprintable<'r>(
source: &'r str,
mut annotations: Vec<RenderableAnnotation<'r>>,
) -> EscapedSourceCode<'r> {
// Updates the annotation ranges given by the caller whenever a single byte (at `index` in
// `source`) is replaced with `len` bytes.
//
// When the index occurs before the start of the range, the range is
// offset by `len`. When the range occurs after or at the start but before
// the end, then the end of the range only is offset by `len`.
let mut update_ranges = |index: usize, len: u32| {
for ann in &mut annotations {
if index < usize::from(ann.range.start()) {
ann.range += TextSize::new(len - 1);
} else if index < usize::from(ann.range.end()) {
ann.range = ann.range.add_end(TextSize::new(len - 1));
}
}
};
// If `c` is an unprintable character, then this returns a printable
// representation of it (using a fancier Unicode codepoint).
let unprintable_replacement = |c: char| -> Option<char> {
match c {
'\x07' => Some('␇'),
'\x08' => Some('␈'),
'\x1b' => Some('␛'),
'\x7f' => Some('␡'),
_ => None,
}
};
const TAB_SIZE: usize = 4;
let mut width = 0;
let mut column = 0;
let mut last_end = 0;
let mut result = String::new();
for (index, c) in source.char_indices() {
let old_width = width;
match c {
'\n' | '\r' => {
width = 0;
column = 0;
}
'\t' => {
let tab_offset = TAB_SIZE - (column % TAB_SIZE);
width += tab_offset;
column += tab_offset;
let tab_width =
u32::try_from(width - old_width).expect("small width because of tab size");
result.push_str(&source[last_end..index]);
update_ranges(result.text_len().to_usize(), tab_width);
for _ in 0..tab_width {
result.push(' ');
}
last_end = index + 1;
}
_ => {
width += unicode_width::UnicodeWidthChar::width(c).unwrap_or(0);
column += 1;
if let Some(printable) = unprintable_replacement(c) {
result.push_str(&source[last_end..index]);
let len = printable.text_len().to_u32();
update_ranges(result.text_len().to_usize(), len);
result.push(printable);
last_end = index + 1;
}
}
}
}
// No tabs or unprintable chars
if result.is_empty() {
EscapedSourceCode {
annotations,
text: Cow::Borrowed(source),
}
} else {
result.push_str(&source[last_end..]);
EscapedSourceCode {
annotations,
text: Cow::Owned(result),
}
}
}
struct EscapedSourceCode<'r> {
text: Cow<'r, str>,
annotations: Vec<RenderableAnnotation<'r>>,
}
impl<'r> EscapedSourceCode<'r> {
// This attempts to "fix up" the spans on each annotation in the case where
// it's an empty span immediately following a line terminator.
//
// At present, `annotate-snippets` (both upstream and our vendored copy)
// will render annotations of such spans to point to the space immediately
// following the previous line. But ideally, this should point to the space
// immediately preceding the next line.
//
// After attempting to fix `annotate-snippets` and giving up after a couple
// hours, this routine takes a different tact: it adjusts the span to be
// non-empty and it will cover the first codepoint of the following line.
// This forces `annotate-snippets` to point to the right place.
//
// See also: <https://github.com/astral-sh/ruff/issues/15509> and
// `ruff_linter::message::text::SourceCode::fix_up_empty_spans_after_line_terminator`,
// from which this was adapted.
fn fix_up_empty_spans_after_line_terminator(mut self) -> EscapedSourceCode<'r> {
for ann in &mut self.annotations {
let range = ann.range;
if !range.is_empty()
|| range.start() == TextSize::from(0)
|| range.start() >= self.text.text_len()
{
continue;
}
if !matches!(
self.text.as_bytes()[range.start().to_usize() - 1],
b'\n' | b'\r'
) {
continue;
}
let start = range.start();
let end = ceil_char_boundary(&self.text, start + TextSize::from(1));
ann.range = TextRange::new(start, end);
}
self
}
}
/// Finds the closest [`TextSize`] not less than the offset given for which
/// `is_char_boundary` is `true`. Unless the offset given is greater than
/// the length of the underlying contents, in which case, the length of the
/// contents is returned.
///
/// Can be replaced with `str::ceil_char_boundary` once it's stable.
///
/// # Examples
///
/// From `std`:
///
/// ```
/// use ruff_db::diagnostic::ceil_char_boundary;
/// use ruff_text_size::{Ranged, TextLen, TextSize};
///
/// let source = "❤️🧡💛💚💙💜";
/// assert_eq!(source.text_len(), TextSize::from(26));
/// assert!(!source.is_char_boundary(13));
///
/// let closest = ceil_char_boundary(source, TextSize::from(13));
/// assert_eq!(closest, TextSize::from(14));
/// assert_eq!(&source[..closest.to_usize()], "❤️🧡💛");
/// ```
///
/// Additional examples:
///
/// ```
/// use ruff_db::diagnostic::ceil_char_boundary;
/// use ruff_text_size::{Ranged, TextRange, TextSize};
///
/// let source = "Hello";
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(0)),
/// TextSize::from(0)
/// );
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(5)),
/// TextSize::from(5)
/// );
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(6)),
/// TextSize::from(5)
/// );
///
/// let source = "α";
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(0)),
/// TextSize::from(0)
/// );
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(1)),
/// TextSize::from(2)
/// );
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(2)),
/// TextSize::from(2)
/// );
///
/// assert_eq!(
/// ceil_char_boundary(source, TextSize::from(3)),
/// TextSize::from(2)
/// );
/// ```
pub fn ceil_char_boundary(text: &str, offset: TextSize) -> TextSize {
let upper_bound = offset
.to_u32()
.saturating_add(4)
.min(text.text_len().to_u32());
(offset.to_u32()..upper_bound)
.map(TextSize::from)
.find(|offset| text.is_char_boundary(offset.to_usize()))
.unwrap_or_else(|| TextSize::from(upper_bound))
}
#[cfg(test)]
mod tests {
use ruff_diagnostics::{Applicability, Edit, Fix};
use ruff_diagnostics::{Edit, Fix};
use crate::diagnostic::{
Annotation, DiagnosticId, IntoDiagnosticMessage, SecondaryCode, Severity, Span,
SubDiagnosticSeverity,
};
use crate::diagnostic::{Annotation, DiagnosticId, SecondaryCode, Severity, Span};
use crate::files::system_path_to_file;
use crate::system::{DbWithWritableSystem, SystemPath};
use crate::tests::TestDb;
@@ -1744,7 +1548,7 @@ watermelon
let mut diag = env.err().primary("animals", "3", "3", "").build();
diag.sub(
env.sub_builder(SubDiagnosticSeverity::Info, "this is a helpful note")
env.sub_builder(Severity::Info, "this is a helpful note")
.build(),
);
insta::assert_snapshot!(
@@ -1773,15 +1577,15 @@ watermelon
let mut diag = env.err().primary("animals", "3", "3", "").build();
diag.sub(
env.sub_builder(SubDiagnosticSeverity::Info, "this is a helpful note")
env.sub_builder(Severity::Info, "this is a helpful note")
.build(),
);
diag.sub(
env.sub_builder(SubDiagnosticSeverity::Info, "another helpful note")
env.sub_builder(Severity::Info, "another helpful note")
.build(),
);
diag.sub(
env.sub_builder(SubDiagnosticSeverity::Info, "and another helpful note")
env.sub_builder(Severity::Info, "and another helpful note")
.build(),
);
insta::assert_snapshot!(
@@ -2503,27 +2307,6 @@ watermelon
self.config = config;
}
/// Hide diagnostic severity when rendering.
pub(super) fn hide_severity(&mut self, yes: bool) {
let mut config = std::mem::take(&mut self.config);
config = config.hide_severity(yes);
self.config = config;
}
/// Show fix availability when rendering.
pub(super) fn show_fix_status(&mut self, yes: bool) {
let mut config = std::mem::take(&mut self.config);
config = config.show_fix_status(yes);
self.config = config;
}
/// The lowest fix applicability to show when rendering.
pub(super) fn fix_applicability(&mut self, applicability: Applicability) {
let mut config = std::mem::take(&mut self.config);
config = config.fix_applicability(applicability);
self.config = config;
}
/// Add a file with the given path and contents to this environment.
pub(super) fn add(&mut self, path: &str, contents: &str) {
let path = SystemPath::new(path);
@@ -2587,11 +2370,11 @@ watermelon
/// sub-diagnostic with "error" severity and canned values for
/// its identifier and message.
fn sub_warn(&mut self) -> SubDiagnosticBuilder<'_> {
self.sub_builder(SubDiagnosticSeverity::Warning, "sub-diagnostic message")
self.sub_builder(Severity::Warning, "sub-diagnostic message")
}
/// Returns a builder for tersely constructing diagnostics.
pub(super) fn builder(
fn builder(
&mut self,
identifier: &'static str,
severity: Severity,
@@ -2608,11 +2391,7 @@ watermelon
}
/// Returns a builder for tersely constructing sub-diagnostics.
fn sub_builder(
&mut self,
severity: SubDiagnosticSeverity,
message: &str,
) -> SubDiagnosticBuilder<'_> {
fn sub_builder(&mut self, severity: Severity, message: &str) -> SubDiagnosticBuilder<'_> {
let subdiag = SubDiagnostic::new(severity, message);
SubDiagnosticBuilder { env: self, subdiag }
}
@@ -2658,7 +2437,7 @@ watermelon
///
/// See the docs on `TestEnvironment::span` for the meaning of
/// `path`, `line_offset_start` and `line_offset_end`.
pub(super) fn primary(
fn primary(
mut self,
path: &str,
line_offset_start: &str,
@@ -2715,12 +2494,6 @@ watermelon
self.diag.set_noqa_offset(noqa_offset);
self
}
/// Adds a "help" sub-diagnostic with the given message.
fn help(mut self, message: impl IntoDiagnosticMessage) -> DiagnosticBuilder<'e> {
self.diag.help(message);
self
}
}
/// A helper builder for tersely populating a `SubDiagnostic`.
@@ -2827,8 +2600,7 @@ def fibonacci(n):
let diagnostics = vec![
env.builder("unused-import", Severity::Error, "`os` imported but unused")
.primary("fib.py", "1:7", "1:9", "")
.help("Remove unused import: `os`")
.primary("fib.py", "1:7", "1:9", "Remove unused import: `os`")
.secondary_code("F401")
.fix(Fix::unsafe_edit(Edit::range_deletion(TextRange::new(
TextSize::from(0),
@@ -2841,8 +2613,12 @@ def fibonacci(n):
Severity::Error,
"Local variable `x` is assigned to but never used",
)
.primary("fib.py", "6:4", "6:5", "")
.help("Remove assignment to unused variable `x`")
.primary(
"fib.py",
"6:4",
"6:5",
"Remove assignment to unused variable `x`",
)
.secondary_code("F841")
.fix(Fix::unsafe_edit(Edit::deletion(
TextSize::from(94),
@@ -2889,25 +2665,6 @@ if call(foo
}
/// Create Ruff-style diagnostics for testing the various output formats for a notebook.
///
/// The concatenated cells look like this:
///
/// ```python
/// # cell 1
/// import os
/// # cell 2
/// import math
///
/// print('hello world')
/// # cell 3
/// def foo():
/// print()
/// x = 1
/// ```
///
/// The first diagnostic is on the unused `os` import with location cell 1, row 2, column 8
/// (`cell 1:2:8`). The second diagnostic is the unused `math` import at `cell 2:2:8`, and the
/// third diagnostic is an unfixable unused variable at `cell 3:4:5`.
#[allow(
dead_code,
reason = "This is currently only used for JSON but will be needed soon for other formats"
@@ -2963,8 +2720,7 @@ if call(foo
let diagnostics = vec![
env.builder("unused-import", Severity::Error, "`os` imported but unused")
.primary("notebook.ipynb", "2:7", "2:9", "")
.help("Remove unused import: `os`")
.primary("notebook.ipynb", "2:7", "2:9", "Remove unused import: `os`")
.secondary_code("F401")
.fix(Fix::safe_edit(Edit::range_deletion(TextRange::new(
TextSize::from(9),
@@ -2977,8 +2733,12 @@ if call(foo
Severity::Error,
"`math` imported but unused",
)
.primary("notebook.ipynb", "4:7", "4:11", "")
.help("Remove unused import: `math`")
.primary(
"notebook.ipynb",
"4:7",
"4:11",
"Remove unused import: `math`",
)
.secondary_code("F401")
.fix(Fix::safe_edit(Edit::range_deletion(TextRange::new(
TextSize::from(28),
@@ -2991,8 +2751,12 @@ if call(foo
Severity::Error,
"Local variable `x` is assigned to but never used",
)
.primary("notebook.ipynb", "10:4", "10:5", "")
.help("Remove assignment to unused variable `x`")
.primary(
"notebook.ipynb",
"10:4",
"10:5",
"Remove assignment to unused variable `x`",
)
.secondary_code("F841")
.fix(Fix::unsafe_edit(Edit::range_deletion(TextRange::new(
TextSize::from(94),

View File

@@ -1,195 +0,0 @@
use crate::diagnostic::{
Diagnostic, DisplayDiagnosticConfig, Severity,
stylesheet::{DiagnosticStylesheet, fmt_styled},
};
use super::FileResolver;
pub(super) struct ConciseRenderer<'a> {
resolver: &'a dyn FileResolver,
config: &'a DisplayDiagnosticConfig,
}
impl<'a> ConciseRenderer<'a> {
pub(super) fn new(resolver: &'a dyn FileResolver, config: &'a DisplayDiagnosticConfig) -> Self {
Self { resolver, config }
}
pub(super) fn render(
&self,
f: &mut std::fmt::Formatter,
diagnostics: &[Diagnostic],
) -> std::fmt::Result {
let stylesheet = if self.config.color {
DiagnosticStylesheet::styled()
} else {
DiagnosticStylesheet::plain()
};
let sep = fmt_styled(":", stylesheet.separator);
for diag in diagnostics {
if let Some(span) = diag.primary_span() {
write!(
f,
"{path}",
path = fmt_styled(
span.file().relative_path(self.resolver).to_string_lossy(),
stylesheet.emphasis
)
)?;
if let Some(range) = span.range() {
let diagnostic_source = span.file().diagnostic_source(self.resolver);
let start = diagnostic_source
.as_source_code()
.line_column(range.start());
if let Some(notebook_index) = self.resolver.notebook_index(span.file()) {
write!(
f,
"{sep}cell {cell}{sep}{line}{sep}{col}",
cell = notebook_index.cell(start.line).unwrap_or_default(),
line = notebook_index.cell_row(start.line).unwrap_or_default(),
col = start.column,
)?;
} else {
write!(
f,
"{sep}{line}{sep}{col}",
line = start.line,
col = start.column,
)?;
}
}
write!(f, "{sep} ")?;
}
if self.config.hide_severity {
if let Some(code) = diag.secondary_code() {
write!(
f,
"{code} ",
code = fmt_styled(code, stylesheet.secondary_code)
)?;
}
if self.config.show_fix_status {
if let Some(fix) = diag.fix() {
// Do not display an indicator for inapplicable fixes
if fix.applies(self.config.fix_applicability) {
write!(f, "[{fix}] ", fix = fmt_styled("*", stylesheet.separator))?;
}
}
}
} else {
let (severity, severity_style) = match diag.severity() {
Severity::Info => ("info", stylesheet.info),
Severity::Warning => ("warning", stylesheet.warning),
Severity::Error => ("error", stylesheet.error),
Severity::Fatal => ("fatal", stylesheet.error),
};
write!(
f,
"{severity}[{id}] ",
severity = fmt_styled(severity, severity_style),
id = fmt_styled(diag.id(), stylesheet.emphasis)
)?;
}
writeln!(f, "{message}", message = diag.concise_message())?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ruff_diagnostics::Applicability;
use crate::diagnostic::{
DiagnosticFormat,
render::tests::{
TestEnvironment, create_diagnostics, create_notebook_diagnostics,
create_syntax_error_diagnostics,
},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Concise);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
fib.py:1:8: error[unused-import] `os` imported but unused
fib.py:6:5: error[unused-variable] Local variable `x` is assigned to but never used
undef.py:1:4: error[undefined-name] Undefined name `a`
");
}
#[test]
fn show_fixes() {
let (mut env, diagnostics) = create_diagnostics(DiagnosticFormat::Concise);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
fib.py:1:8: F401 [*] `os` imported but unused
fib.py:6:5: F841 [*] Local variable `x` is assigned to but never used
undef.py:1:4: F821 Undefined name `a`
");
}
#[test]
fn show_fixes_preview() {
let (mut env, diagnostics) = create_diagnostics(DiagnosticFormat::Concise);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
env.preview(true);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
fib.py:1:8: F401 [*] `os` imported but unused
fib.py:6:5: F841 [*] Local variable `x` is assigned to but never used
undef.py:1:4: F821 Undefined name `a`
");
}
#[test]
fn show_fixes_syntax_errors() {
let (mut env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Concise);
env.hide_severity(true);
env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
syntax_errors.py:1:15: SyntaxError: Expected one or more symbol names after import
syntax_errors.py:3:12: SyntaxError: Expected ')', found newline
");
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Concise);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
syntax_errors.py:1:15: error[invalid-syntax] SyntaxError: Expected one or more symbol names after import
syntax_errors.py:3:12: error[invalid-syntax] SyntaxError: Expected ')', found newline
");
}
#[test]
fn notebook_output() {
let (env, diagnostics) = create_notebook_diagnostics(DiagnosticFormat::Concise);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
notebook.ipynb:cell 1:2:8: error[unused-import] `os` imported but unused
notebook.ipynb:cell 2:2:8: error[unused-import] `math` imported but unused
notebook.ipynb:cell 3:4:5: error[unused-variable] Local variable `x` is assigned to but never used
");
}
#[test]
fn missing_file() {
let mut env = TestEnvironment::new();
env.format(DiagnosticFormat::Concise);
let diag = env.err().build();
insta::assert_snapshot!(
env.render(&diag),
@"error[test-diagnostic] main diagnostic message",
);
}
}

View File

@@ -1,180 +0,0 @@
#[cfg(test)]
mod tests {
use crate::diagnostic::{
DiagnosticFormat, Severity,
render::tests::{TestEnvironment, create_diagnostics, create_syntax_error_diagnostics},
};
#[test]
fn output() {
let (env, diagnostics) = create_diagnostics(DiagnosticFormat::Full);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r#"
error[unused-import]: `os` imported but unused
--> fib.py:1:8
|
1 | import os
| ^^
|
help: Remove unused import: `os`
error[unused-variable]: Local variable `x` is assigned to but never used
--> fib.py:6:5
|
4 | def fibonacci(n):
5 | """Compute the nth number in the Fibonacci sequence."""
6 | x = 1
| ^
7 | if n == 0:
8 | return 0
|
help: Remove assignment to unused variable `x`
error[undefined-name]: Undefined name `a`
--> undef.py:1:4
|
1 | if a == 1: pass
| ^
|
"#);
}
#[test]
fn syntax_errors() {
let (env, diagnostics) = create_syntax_error_diagnostics(DiagnosticFormat::Full);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
error[invalid-syntax]: SyntaxError: Expected one or more symbol names after import
--> syntax_errors.py:1:15
|
1 | from os import
| ^
2 |
3 | if call(foo
|
error[invalid-syntax]: SyntaxError: Expected ')', found newline
--> syntax_errors.py:3:12
|
1 | from os import
2 |
3 | if call(foo
| ^
4 | def bar():
5 | pass
|
");
}
/// Check that the new `full` rendering code in `ruff_db` handles cases fixed by commit c9b99e4.
///
/// For example, without the fix, we get diagnostics like this:
///
/// ```
/// error[no-indented-block]: Expected an indented block
/// --> example.py:3:1
/// |
/// 2 | if False:
/// | ^
/// 3 | print()
/// |
/// ```
///
/// where the caret points to the end of the previous line instead of the start of the next.
#[test]
fn empty_span_after_line_terminator() {
let mut env = TestEnvironment::new();
env.add(
"example.py",
r#"
if False:
print()
"#,
);
env.format(DiagnosticFormat::Full);
let diagnostic = env
.builder(
"no-indented-block",
Severity::Error,
"Expected an indented block",
)
.primary("example.py", "3:0", "3:0", "")
.build();
insta::assert_snapshot!(env.render(&diagnostic), @r"
error[no-indented-block]: Expected an indented block
--> example.py:3:1
|
2 | if False:
3 | print()
| ^
|
");
}
/// Check that the new `full` rendering code in `ruff_db` handles cases fixed by commit 2922490.
///
/// For example, without the fix, we get diagnostics like this:
///
/// ```
/// error[invalid-character-sub]: Invalid unescaped character SUB, use "\x1A" instead
/// --> example.py:1:25
/// |
/// 1 | nested_fstrings = f'␈{f'{f'␛'}'}'
/// | ^
/// |
/// ```
///
/// where the caret points to the `f` in the f-string instead of the start of the invalid
/// character (`^Z`).
#[test]
fn unprintable_characters() {
let mut env = TestEnvironment::new();
env.add("example.py", "nested_fstrings = f'{f'{f''}'}'");
env.format(DiagnosticFormat::Full);
let diagnostic = env
.builder(
"invalid-character-sub",
Severity::Error,
r#"Invalid unescaped character SUB, use "\x1A" instead"#,
)
.primary("example.py", "1:24", "1:24", "")
.build();
insta::assert_snapshot!(env.render(&diagnostic), @r#"
error[invalid-character-sub]: Invalid unescaped character SUB, use "\x1A" instead
--> example.py:1:25
|
1 | nested_fstrings = f'␈{f'{f'␛'}'}'
| ^
|
"#);
}
#[test]
fn multiple_unprintable_characters() -> std::io::Result<()> {
let mut env = TestEnvironment::new();
env.add("example.py", "");
env.format(DiagnosticFormat::Full);
let diagnostic = env
.builder(
"invalid-character-sub",
Severity::Error,
r#"Invalid unescaped character SUB, use "\x1A" instead"#,
)
.primary("example.py", "1:1", "1:1", "")
.build();
insta::assert_snapshot!(env.render(&diagnostic), @r#"
error[invalid-character-sub]: Invalid unescaped character SUB, use "\x1A" instead
--> example.py:1:2
|
1 | ␈
| ^
|
"#);
Ok(())
}
}

View File

@@ -87,7 +87,7 @@ pub(super) fn diagnostic_to_json<'a>(
let fix = diagnostic.fix().map(|fix| JsonFix {
applicability: fix.applicability(),
message: diagnostic.first_help_text(),
message: diagnostic.suggestion(),
edits: ExpandedEdits {
edits: fix.edits(),
notebook_index,

View File

@@ -41,8 +41,6 @@ pub struct DiagnosticStylesheet {
pub(crate) line_no: Style,
pub(crate) emphasis: Style,
pub(crate) none: Style,
pub(crate) separator: Style,
pub(crate) secondary_code: Style,
}
impl Default for DiagnosticStylesheet {
@@ -64,8 +62,6 @@ impl DiagnosticStylesheet {
line_no: bright_blue.effects(Effects::BOLD),
emphasis: Style::new().effects(Effects::BOLD),
none: Style::new(),
separator: AnsiColor::Cyan.on_default(),
secondary_code: AnsiColor::Red.on_default().effects(Effects::BOLD),
}
}
@@ -79,8 +75,6 @@ impl DiagnosticStylesheet {
line_no: Style::new(),
emphasis: Style::new(),
none: Style::new(),
separator: Style::new(),
secondary_code: Style::new(),
}
}
}

View File

@@ -1,6 +1,7 @@
use std::fmt;
use std::sync::Arc;
use countme::Count;
use dashmap::mapref::entry::Entry;
pub use file_root::{FileRoot, FileRootKind};
pub use path::FilePath;
@@ -311,6 +312,11 @@ pub struct File {
/// the file has been deleted is to change the status to `Deleted`.
#[default]
status: FileStatus,
/// Counter that counts the number of created file instances and active file instances.
/// Only enabled in debug builds.
#[default]
count: Count<File>,
}
// The Salsa heap is tracked separately.

View File

@@ -1,6 +1,8 @@
use std::ops::Deref;
use std::sync::Arc;
use countme::Count;
use ruff_notebook::Notebook;
use ruff_python_ast::PySourceType;
use ruff_source_file::LineIndex;
@@ -36,7 +38,11 @@ pub fn source_text(db: &dyn Db, file: File) -> SourceText {
};
SourceText {
inner: Arc::new(SourceTextInner { kind, read_error }),
inner: Arc::new(SourceTextInner {
kind,
read_error,
count: Count::new(),
}),
}
}
@@ -119,6 +125,8 @@ impl std::fmt::Debug for SourceText {
#[derive(Eq, PartialEq, get_size2::GetSize)]
struct SourceTextInner {
#[get_size(ignore)]
count: Count<SourceText>,
kind: SourceTextKind,
read_error: Option<SourceTextError>,
}

View File

@@ -20,7 +20,6 @@ ty_python_semantic = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true, optional = true }
memchr = { workspace = true }
salsa = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true }

View File

@@ -1,4 +1,3 @@
use crate::StringImports;
use ruff_python_ast::visitor::source_order::{
SourceOrderVisitor, walk_expr, walk_module, walk_stmt,
};
@@ -11,13 +10,13 @@ pub(crate) struct Collector<'a> {
/// The path to the current module.
module_path: Option<&'a [String]>,
/// Whether to detect imports from string literals.
string_imports: StringImports,
string_imports: bool,
/// The collected imports from the Python AST.
imports: Vec<CollectedImport>,
}
impl<'a> Collector<'a> {
pub(crate) fn new(module_path: Option<&'a [String]>, string_imports: StringImports) -> Self {
pub(crate) fn new(module_path: Option<&'a [String]>, string_imports: bool) -> Self {
Self {
module_path,
string_imports,
@@ -119,7 +118,7 @@ impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
| Stmt::Continue(_)
| Stmt::IpyEscapeCommand(_) => {
// Only traverse simple statements when string imports is enabled.
if self.string_imports.enabled {
if self.string_imports {
walk_stmt(self, stmt);
}
}
@@ -127,26 +126,20 @@ impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
}
fn visit_expr(&mut self, expr: &'ast Expr) {
if self.string_imports.enabled {
if self.string_imports {
if let Expr::StringLiteral(ast::ExprStringLiteral {
value,
range: _,
node_index: _,
}) = expr
{
let value = value.to_str();
// Determine whether the string literal "looks like" an import statement: contains
// the requisite number of dots, and consists solely of valid Python identifiers.
if self.string_imports.min_dots == 0
|| memchr::memchr_iter(b'.', value.as_bytes()).count()
>= self.string_imports.min_dots
{
if let Some(module_name) = ModuleName::new(value) {
self.imports.push(CollectedImport::Import(module_name));
}
// a dot, and consists solely of valid Python identifiers.
let value = value.to_str();
if let Some(module_name) = ModuleName::new(value) {
self.imports.push(CollectedImport::Import(module_name));
}
}
walk_expr(self, expr);
}
}

View File

@@ -9,7 +9,7 @@ use ruff_python_parser::{Mode, ParseOptions, parse};
use crate::collector::Collector;
pub use crate::db::ModuleDb;
use crate::resolver::Resolver;
pub use crate::settings::{AnalyzeSettings, Direction, StringImports};
pub use crate::settings::{AnalyzeSettings, Direction};
mod collector;
mod db;
@@ -26,7 +26,7 @@ impl ModuleImports {
db: &ModuleDb,
path: &SystemPath,
package: Option<&SystemPath>,
string_imports: StringImports,
string_imports: bool,
) -> Result<Self> {
// Read and parse the source code.
let source = std::fs::read_to_string(path)?;
@@ -42,11 +42,13 @@ impl ModuleImports {
// Resolve the imports.
let mut resolved_imports = ModuleImports::default();
for import in imports {
for resolved in Resolver::new(db).resolve(import) {
if let Some(path) = resolved.as_system_path() {
resolved_imports.insert(path.to_path_buf());
}
}
let Some(resolved) = Resolver::new(db).resolve(import) else {
continue;
};
let Some(path) = resolved.as_system_path() else {
continue;
};
resolved_imports.insert(path.to_path_buf());
}
Ok(resolved_imports)

View File

@@ -1,5 +1,5 @@
use ruff_db::files::FilePath;
use ty_python_semantic::{ModuleName, resolve_module, resolve_real_module};
use ty_python_semantic::resolve_module;
use crate::ModuleDb;
use crate::collector::CollectedImport;
@@ -16,67 +16,24 @@ impl<'a> Resolver<'a> {
}
/// Resolve the [`CollectedImport`] into a [`FilePath`].
pub(crate) fn resolve(&self, import: CollectedImport) -> impl Iterator<Item = &'a FilePath> {
pub(crate) fn resolve(&self, import: CollectedImport) -> Option<&'a FilePath> {
match import {
CollectedImport::Import(import) => {
// Attempt to resolve the module (e.g., given `import foo`, look for `foo`).
let file = self.resolve_module(&import);
// If the file is a stub, look for the corresponding source file.
let source_file = file
.is_some_and(|file| file.extension() == Some("pyi"))
.then(|| self.resolve_real_module(&import))
.flatten();
std::iter::once(file)
.chain(std::iter::once(source_file))
.flatten()
let module = resolve_module(self.db, &import)?;
Some(module.file()?.path(self.db))
}
CollectedImport::ImportFrom(import) => {
// Attempt to resolve the member (e.g., given `from foo import bar`, look for `foo.bar`).
if let Some(file) = self.resolve_module(&import) {
// If the file is a stub, look for the corresponding source file.
let source_file = (file.extension() == Some("pyi"))
.then(|| self.resolve_real_module(&import))
.flatten();
return std::iter::once(Some(file))
.chain(std::iter::once(source_file))
.flatten();
}
// Attempt to resolve the module (e.g., given `from foo import bar`, look for `foo`).
let parent = import.parent();
let file = parent
.as_ref()
.and_then(|parent| self.resolve_module(parent));
// If the file is a stub, look for the corresponding source file.
let source_file = file
.is_some_and(|file| file.extension() == Some("pyi"))
.then(|| {
parent
.as_ref()
.and_then(|parent| self.resolve_real_module(parent))
})
.flatten();
let module = resolve_module(self.db, &import).or_else(|| {
// Attempt to resolve the module (e.g., given `from foo import bar`, look for `foo`).
std::iter::once(file)
.chain(std::iter::once(source_file))
.flatten()
resolve_module(self.db, &parent?)
})?;
Some(module.file()?.path(self.db))
}
}
}
/// Resolves a module name to a module.
fn resolve_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> {
let module = resolve_module(self.db, module_name)?;
Some(module.file(self.db)?.path(self.db))
}
/// Resolves a module name to a module (stubs not allowed).
fn resolve_real_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> {
let module = resolve_real_module(self.db, module_name)?;
Some(module.file(self.db)?.path(self.db))
}
}

View File

@@ -11,7 +11,7 @@ pub struct AnalyzeSettings {
pub exclude: FilePatternSet,
pub preview: PreviewMode,
pub target_version: PythonVersion,
pub string_imports: StringImports,
pub detect_string_imports: bool,
pub include_dependencies: BTreeMap<PathBuf, (PathBuf, Vec<String>)>,
pub extension: ExtensionMapping,
}
@@ -26,7 +26,7 @@ impl fmt::Display for AnalyzeSettings {
self.exclude,
self.preview,
self.target_version,
self.string_imports,
self.detect_string_imports,
self.extension | debug,
self.include_dependencies | debug,
]
@@ -35,31 +35,6 @@ impl fmt::Display for AnalyzeSettings {
}
}
#[derive(Debug, Copy, Clone, CacheKey)]
pub struct StringImports {
pub enabled: bool,
pub min_dots: usize,
}
impl Default for StringImports {
fn default() -> Self {
Self {
enabled: false,
min_dots: 2,
}
}
}
impl fmt::Display for StringImports {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.enabled {
write!(f, "enabled (min_dots: {})", self.min_dots)
} else {
write!(f, "disabled")
}
}
}
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, CacheKey)]
#[cfg_attr(
feature = "serde",

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_linter"
version = "0.12.5"
version = "0.12.4"
publish = false
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -25,5 +25,5 @@ def my_func():
# t-strings - all ok
t"0.0.0.0"
t"0.0.0.0" t"0.0.0.0{expr}0.0.0.0"
t"0.0.0.0" t"0.0.0.0{expr}0.0.0.0" t"0.0.0.0{expr}0.0.0.0"
"0.0.0.0" t"0.0.0.0{expr}0.0.0.0"
"0.0.0.0" f"0.0.0.0{expr}0.0.0.0" t"0.0.0.0{expr}0.0.0.0"

View File

@@ -94,7 +94,7 @@ except Exception:
logging.error("...", exc_info=True)
from logging import critical, error, exception
from logging import error, exception
try:
pass
@@ -114,23 +114,6 @@ except Exception:
error("...", exc_info=None)
try:
pass
except Exception:
critical("...")
try:
pass
except Exception:
critical("...", exc_info=False)
try:
pass
except Exception:
critical("...", exc_info=None)
try:
pass
except Exception:
@@ -142,13 +125,6 @@ try:
except Exception:
error("...", exc_info=True)
try:
pass
except Exception:
critical("...", exc_info=True)
try:
...
except Exception as e:

View File

@@ -650,17 +650,3 @@ f"""This is a test. {
if True else
"Don't add a trailing comma here ->"
}"""
type X[
T
] = T
def f[
T
](): pass
class C[
T
]: pass
type X[T,] = T
def f[T,](): pass
class C[T,]: pass

View File

@@ -142,7 +142,3 @@ field47: typing.Optional[int] | typing.Optional[dict]
# avoid reporting twice
field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
field49: typing.Optional[complex | complex] | complex
# Regression test for https://github.com/astral-sh/ruff/issues/19403
# Should throw duplicate union member but not fix
isinstance(None, typing.Union[None, None])

View File

@@ -47,19 +47,3 @@ def _():
from builtin import open
with open(p) as _: ... # No error
file = "file_1.py"
rename(file, "file_2.py")
rename(
# commment 1
file, # comment 2
"file_2.py"
,
# comment 3
)
rename(file, "file_2.py", src_dir_fd=None, dst_dir_fd=None)
rename(file, "file_2.py", src_dir_fd=1)

View File

@@ -84,25 +84,3 @@ class MyRequestHandler(BaseHTTPRequestHandler):
def dont_GET(self):
pass
from http.server import CGIHTTPRequestHandler
class MyCGIRequestHandler(CGIHTTPRequestHandler):
def do_OPTIONS(self):
pass
def dont_OPTIONS(self):
pass
from http.server import SimpleHTTPRequestHandler
class MySimpleRequestHandler(SimpleHTTPRequestHandler):
def do_OPTIONS(self):
pass
def dont_OPTIONS(self):
pass

View File

@@ -278,30 +278,3 @@ def f():
for i in src:
if lambda: 0:
dst.append(i)
def f():
i = "xyz"
result = []
for i in range(3):
result.append(x for x in [i])
def f():
i = "xyz"
result = []
for i in range(3):
result.append((x for x in [i]))
G_INDEX = None
def f():
global G_INDEX
result = []
for G_INDEX in range(3):
result.append(G_INDEX)
def f():
NL_INDEX = None
def x():
nonlocal NL_INDEX
result = []
for NL_INDEX in range(3):
result.append(NL_INDEX)

View File

@@ -1,5 +0,0 @@
#
x = 0 \
#
+1
print(x)

View File

@@ -143,23 +143,3 @@ class NotAMethodButHardToDetect:
# without risking false positives elsewhere or introducing complex heuristics
# that users would find surprising and confusing
FOO = sorted([x for x in BAR], key=lambda x: x.baz)
# https://github.com/astral-sh/ruff/issues/19305
import pytest
@pytest.fixture
def my_fixture_with_param(request):
return request.param
@pytest.fixture()
def my_fixture_with_param2(request):
return request.param
# Decorated function (should be ignored)
def custom_decorator(func):
return func
@custom_decorator
def add(x, y):
return x + y

View File

@@ -55,12 +55,3 @@ _ = Decimal(0.1)
_ = Decimal(-0.5)
_ = Decimal(5.0)
_ = decimal.Decimal(4.2)
# Cases with int and bool - should produce safe fixes
_ = Decimal.from_float(1)
_ = Decimal.from_float(True)
# Cases with non-finite floats - should produce safe fixes
_ = Decimal.from_float(float("-nan"))
_ = Decimal.from_float(float("\x2dnan"))
_ = Decimal.from_float(float("\N{HYPHEN-MINUS}nan"))

View File

@@ -65,62 +65,3 @@ class Foo:
bar = "should've used attrs"
def __post_init__(self, bar: str = "ahhh", baz: str = "hmm") -> None: ...
# https://github.com/astral-sh/ruff/issues/18950
@dataclass
class Foo:
def __post_init__(self, bar: int = (x := 1)) -> None:
pass
@dataclass
class Foo:
def __post_init__(
self,
bar: int = (x := 1) # comment
,
baz: int = (y := 2), # comment
foo = (a := 1) # comment
,
faz = (b := 2), # comment
) -> None:
pass
@dataclass
class Foo:
def __post_init__(
self,
bar: int = 1, # comment
baz: int = 2, # comment
) -> None:
pass
@dataclass
class Foo:
def __post_init__(
self,
arg1: int = (1) # comment
,
arg2: int = ((1)) # comment
,
arg2: int = (i for i in range(10)) # comment
,
) -> None:
pass
# makes little sense, but is valid syntax
def fun_with_python_syntax():
@dataclass
class Foo:
def __post_init__(
self,
bar: (int) = (yield from range(5)) # comment
,
) -> None:
...
return Foo

View File

@@ -53,16 +53,3 @@ regex.subn(br"""eak your machine with rm -""", rf"""/""")
regex.splititer(both, non_literal)
regex.subf(f, lambda _: r'means', '"format"')
regex.subfn(fn, f'''a$1n't''', lambda: "'function'")
# https://github.com/astral-sh/ruff/issues/16713
re.compile("\a\f\n\r\t\u27F2\U0001F0A1\v\x41") # with unsafe fix
re.compile("\b") # without fix
re.compile("\"") # without fix
re.compile("\'") # without fix
re.compile('\"') # without fix
re.compile('\'') # without fix
re.compile("\\") # without fix
re.compile("\101") # without fix
re.compile("a\
b") # without fix

View File

@@ -91,20 +91,3 @@ regex.subf(
br''br""br''
)
regex.subfn(br'I\s\nee*d\s[O0o]me\x20\Qoffe\E, ' br'b')
# https://github.com/astral-sh/ruff/issues/16713
re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"\u200d" # zero width joiner
"\u200c" # zero width non-joiner
"\\u200c" # must not be escaped in a raw string
"]+",
flags=re.UNICODE,
)

View File

@@ -1,3 +0,0 @@
import re
re.compile("\N{Partial Differential}") # with unsafe fix if python target is 3.8 or higher, else without fix

View File

@@ -1039,10 +1039,14 @@ pub(crate) fn expression(expr: &Expr, checker: &Checker) {
flake8_simplify::rules::zip_dict_keys_and_values(checker, call);
}
if checker.any_rule_enabled(&[
Rule::OsChmod,
Rule::OsMkdir,
Rule::OsMakedirs,
Rule::OsRename,
Rule::OsReplace,
Rule::OsStat,
Rule::OsPathJoin,
Rule::OsPathSamefile,
Rule::OsPathSplitext,
Rule::BuiltinOpen,
Rule::PyPath,
@@ -1108,18 +1112,6 @@ pub(crate) fn expression(expr: &Expr, checker: &Checker) {
if checker.is_rule_enabled(Rule::OsGetcwd) {
flake8_use_pathlib::rules::os_getcwd(checker, call, segments);
}
if checker.is_rule_enabled(Rule::OsChmod) {
flake8_use_pathlib::rules::os_chmod(checker, call, segments);
}
if checker.is_rule_enabled(Rule::OsRename) {
flake8_use_pathlib::rules::os_rename(checker, call, segments);
}
if checker.is_rule_enabled(Rule::OsReplace) {
flake8_use_pathlib::rules::os_replace(checker, call, segments);
}
if checker.is_rule_enabled(Rule::OsPathSamefile) {
flake8_use_pathlib::rules::os_path_samefile(checker, call, segments);
}
if checker.is_rule_enabled(Rule::PathConstructorCurrentDirectory) {
flake8_use_pathlib::rules::path_constructor_current_directory(
checker, call, segments,

View File

@@ -3216,11 +3216,6 @@ impl<'a> LintContext<'a> {
pub(crate) fn iter(&mut self) -> impl Iterator<Item = &Diagnostic> {
self.diagnostics.get_mut().iter()
}
/// The [`LinterSettings`] for the current analysis, including the enabled rules.
pub(crate) const fn settings(&self) -> &LinterSettings {
self.settings
}
}
/// An abstraction for mutating a diagnostic.

View File

@@ -16,6 +16,7 @@ use crate::rules::{
eradicate, flake8_commas, flake8_executable, flake8_fixme, flake8_implicit_str_concat,
flake8_pyi, flake8_todos, pycodestyle, pygrep_hooks, pylint, pyupgrade, ruff,
};
use crate::settings::LinterSettings;
use super::ast::LintContext;
@@ -26,6 +27,7 @@ pub(crate) fn check_tokens(
locator: &Locator,
indexer: &Indexer,
stylist: &Stylist,
settings: &LinterSettings,
source_type: PySourceType,
cell_offsets: Option<&CellOffsets>,
context: &mut LintContext,
@@ -40,8 +42,15 @@ pub(crate) fn check_tokens(
Rule::BlankLinesAfterFunctionOrClass,
Rule::BlankLinesBeforeNestedDefinition,
]) {
BlankLinesChecker::new(locator, stylist, source_type, cell_offsets, context)
.check_lines(tokens);
BlankLinesChecker::new(
locator,
stylist,
settings,
source_type,
cell_offsets,
context,
)
.check_lines(tokens);
}
if context.is_rule_enabled(Rule::BlanketTypeIgnore) {
@@ -49,17 +58,17 @@ pub(crate) fn check_tokens(
}
if context.is_rule_enabled(Rule::EmptyComment) {
pylint::rules::empty_comments(context, comment_ranges, locator, indexer);
pylint::rules::empty_comments(context, comment_ranges, locator);
}
if context.is_rule_enabled(Rule::AmbiguousUnicodeCharacterComment) {
for range in comment_ranges {
ruff::rules::ambiguous_unicode_character_comment(context, locator, range);
ruff::rules::ambiguous_unicode_character_comment(context, locator, range, settings);
}
}
if context.is_rule_enabled(Rule::CommentedOutCode) {
eradicate::rules::commented_out_code(context, locator, comment_ranges);
eradicate::rules::commented_out_code(context, locator, comment_ranges, settings);
}
if context.is_rule_enabled(Rule::UTF8EncodingDeclaration) {
@@ -101,7 +110,7 @@ pub(crate) fn check_tokens(
Rule::SingleLineImplicitStringConcatenation,
Rule::MultiLineImplicitStringConcatenation,
]) {
flake8_implicit_str_concat::rules::implicit(context, tokens, locator, indexer);
flake8_implicit_str_concat::rules::implicit(context, tokens, locator, indexer, settings);
}
if context.any_rule_enabled(&[

View File

@@ -920,11 +920,11 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
// flake8-use-pathlib
(Flake8UsePathlib, "100") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsPathAbspath),
(Flake8UsePathlib, "101") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsChmod),
(Flake8UsePathlib, "101") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsChmod),
(Flake8UsePathlib, "102") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsMkdir),
(Flake8UsePathlib, "103") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsMakedirs),
(Flake8UsePathlib, "104") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsRename),
(Flake8UsePathlib, "105") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsReplace),
(Flake8UsePathlib, "104") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsRename),
(Flake8UsePathlib, "105") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsReplace),
(Flake8UsePathlib, "106") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsRmdir),
(Flake8UsePathlib, "107") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsRemove),
(Flake8UsePathlib, "108") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsUnlink),
@@ -940,7 +940,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8UsePathlib, "118") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsPathJoin),
(Flake8UsePathlib, "119") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsPathBasename),
(Flake8UsePathlib, "120") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsPathDirname),
(Flake8UsePathlib, "121") => (RuleGroup::Stable, rules::flake8_use_pathlib::rules::OsPathSamefile),
(Flake8UsePathlib, "121") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsPathSamefile),
(Flake8UsePathlib, "122") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::OsPathSplitext),
(Flake8UsePathlib, "123") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::BuiltinOpen),
(Flake8UsePathlib, "124") => (RuleGroup::Stable, rules::flake8_use_pathlib::violations::PyPath),

View File

@@ -188,6 +188,7 @@ pub fn check_path(
locator,
indexer,
stylist,
settings,
source_type,
source_kind.as_ipy_notebook().map(Notebook::cell_offsets),
&mut context,
@@ -472,7 +473,7 @@ pub fn lint_only(
&& !is_py314_support_enabled(settings)
{
warn_user_once!(
"Support for Python 3.14 is in preview and may undergo breaking changes. Enable `preview` to remove this warning."
"Support for Python 3.14 is under development and may be unstable. Enable `preview` to remove this warning."
);
}
@@ -583,7 +584,7 @@ pub fn lint_fix<'a>(
&& !is_py314_support_enabled(settings)
{
warn_user_once!(
"Support for Python 3.14 is in preview and may undergo breaking changes. Enable `preview` to remove this warning."
"Support for Python 3.14 is under development and may be unstable. Enable `preview` to remove this warning."
);
}

View File

@@ -118,6 +118,86 @@ impl<'a> Locator<'a> {
}
}
/// Finds the closest [`TextSize`] not less than the offset given for which
/// `is_char_boundary` is `true`. Unless the offset given is greater than
/// the length of the underlying contents, in which case, the length of the
/// contents is returned.
///
/// Can be replaced with `str::ceil_char_boundary` once it's stable.
///
/// # Examples
///
/// From `std`:
///
/// ```
/// use ruff_text_size::{Ranged, TextSize};
/// use ruff_linter::Locator;
///
/// let locator = Locator::new("❤️🧡💛💚💙💜");
/// assert_eq!(locator.text_len(), TextSize::from(26));
/// assert!(!locator.contents().is_char_boundary(13));
///
/// let closest = locator.ceil_char_boundary(TextSize::from(13));
/// assert_eq!(closest, TextSize::from(14));
/// assert_eq!(&locator.contents()[..closest.to_usize()], "❤️🧡💛");
/// ```
///
/// Additional examples:
///
/// ```
/// use ruff_text_size::{Ranged, TextRange, TextSize};
/// use ruff_linter::Locator;
///
/// let locator = Locator::new("Hello");
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(0)),
/// TextSize::from(0)
/// );
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(5)),
/// TextSize::from(5)
/// );
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(6)),
/// TextSize::from(5)
/// );
///
/// let locator = Locator::new("α");
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(0)),
/// TextSize::from(0)
/// );
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(1)),
/// TextSize::from(2)
/// );
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(2)),
/// TextSize::from(2)
/// );
///
/// assert_eq!(
/// locator.ceil_char_boundary(TextSize::from(3)),
/// TextSize::from(2)
/// );
/// ```
pub fn ceil_char_boundary(&self, offset: TextSize) -> TextSize {
let upper_bound = offset
.to_u32()
.saturating_add(4)
.min(self.text_len().to_u32());
(offset.to_u32()..upper_bound)
.map(TextSize::from)
.find(|offset| self.contents.is_char_boundary(offset.to_usize()))
.unwrap_or_else(|| TextSize::from(upper_bound))
}
/// Take the source code between the given [`TextRange`].
#[inline]
pub fn slice<T: Ranged>(&self, ranged: T) -> &'a str {

View File

@@ -75,12 +75,11 @@ where
);
let span = Span::from(file).with_range(range);
let annotation = Annotation::primary(span);
diagnostic.annotate(annotation);
let mut annotation = Annotation::primary(span);
if let Some(suggestion) = suggestion {
diagnostic.help(suggestion);
annotation = annotation.message(suggestion);
}
diagnostic.annotate(annotation);
if let Some(fix) = fix {
diagnostic.set_fix(fix);

View File

@@ -6,13 +6,13 @@ use bitflags::bitflags;
use colored::Colorize;
use ruff_annotate_snippets::{Level, Renderer, Snippet};
use ruff_db::diagnostic::{
Diagnostic, DiagnosticFormat, DisplayDiagnosticConfig, SecondaryCode, ceil_char_boundary,
};
use ruff_db::diagnostic::{Diagnostic, SecondaryCode};
use ruff_notebook::NotebookIndex;
use ruff_source_file::OneIndexed;
use ruff_source_file::{LineColumn, OneIndexed};
use ruff_text_size::{TextLen, TextRange, TextSize};
use crate::Locator;
use crate::fs::relativize_path;
use crate::line_width::{IndentWidth, LineWidthBuilder};
use crate::message::diff::Diff;
use crate::message::{Emitter, EmitterContext};
@@ -21,6 +21,8 @@ use crate::settings::types::UnsafeFixes;
bitflags! {
#[derive(Default)]
struct EmitterFlags: u8 {
/// Whether to show the fix status of a diagnostic.
const SHOW_FIX_STATUS = 1 << 0;
/// Whether to show the diff of a fix, for diagnostics that have a fix.
const SHOW_FIX_DIFF = 1 << 1;
/// Whether to show the source code of a diagnostic.
@@ -28,27 +30,17 @@ bitflags! {
}
}
#[derive(Default)]
pub struct TextEmitter {
flags: EmitterFlags,
config: DisplayDiagnosticConfig,
}
impl Default for TextEmitter {
fn default() -> Self {
Self {
flags: EmitterFlags::default(),
config: DisplayDiagnosticConfig::default()
.format(DiagnosticFormat::Concise)
.hide_severity(true)
.color(!cfg!(test) && colored::control::SHOULD_COLORIZE.should_colorize()),
}
}
unsafe_fixes: UnsafeFixes,
}
impl TextEmitter {
#[must_use]
pub fn with_show_fix_status(mut self, show_fix_status: bool) -> Self {
self.config = self.config.show_fix_status(show_fix_status);
self.flags
.set(EmitterFlags::SHOW_FIX_STATUS, show_fix_status);
self
}
@@ -66,21 +58,7 @@ impl TextEmitter {
#[must_use]
pub fn with_unsafe_fixes(mut self, unsafe_fixes: UnsafeFixes) -> Self {
self.config = self
.config
.fix_applicability(unsafe_fixes.required_applicability());
self
}
#[must_use]
pub fn with_preview(mut self, preview: bool) -> Self {
self.config = self.config.preview(preview);
self
}
#[must_use]
pub fn with_color(mut self, color: bool) -> Self {
self.config = self.config.color(color);
self.unsafe_fixes = unsafe_fixes;
self
}
}
@@ -93,10 +71,51 @@ impl Emitter for TextEmitter {
context: &EmitterContext,
) -> anyhow::Result<()> {
for message in diagnostics {
write!(writer, "{}", message.display(context, &self.config))?;
let filename = message.expect_ruff_filename();
write!(
writer,
"{path}{sep}",
path = relativize_path(&filename).bold(),
sep = ":".cyan(),
)?;
let start_location = message.expect_ruff_start_location();
let notebook_index = context.notebook_index(&filename);
// Check if we're working on a jupyter notebook and translate positions with cell accordingly
let diagnostic_location = if let Some(notebook_index) = notebook_index {
write!(
writer,
"cell {cell}{sep}",
cell = notebook_index
.cell(start_location.line)
.unwrap_or(OneIndexed::MIN),
sep = ":".cyan(),
)?;
LineColumn {
line: notebook_index
.cell_row(start_location.line)
.unwrap_or(OneIndexed::MIN),
column: start_location.column,
}
} else {
start_location
};
writeln!(
writer,
"{row}{sep}{col}{sep} {code_and_body}",
row = diagnostic_location.line,
col = diagnostic_location.column,
sep = ":".cyan(),
code_and_body = RuleCodeAndBody {
message,
show_fix_status: self.flags.intersects(EmitterFlags::SHOW_FIX_STATUS),
unsafe_fixes: self.unsafe_fixes,
}
)?;
if self.flags.intersects(EmitterFlags::SHOW_SOURCE) {
// The `0..0` range is used to highlight file-level diagnostics.
if message.expect_range() != TextRange::default() {
@@ -167,7 +186,7 @@ pub(super) struct MessageCodeFrame<'a> {
impl Display for MessageCodeFrame<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let suggestion = self.message.first_help_text();
let suggestion = self.message.suggestion();
let footers = if let Some(suggestion) = suggestion {
vec![Level::Help.title(suggestion)]
} else {
@@ -377,8 +396,9 @@ impl<'a> SourceCode<'a> {
if self.text.as_bytes()[self.annotation_range.start().to_usize() - 1] != b'\n' {
return self;
}
let locator = Locator::new(&self.text);
let start = self.annotation_range.start();
let end = ceil_char_boundary(&self.text, start + TextSize::from(1));
let end = locator.ceil_char_boundary(start + TextSize::from(1));
SourceCode {
annotation_range: TextRange::new(start, end),
..self

View File

@@ -134,26 +134,6 @@ pub(crate) const fn is_fix_os_path_dirname_enabled(settings: &LinterSettings) ->
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/19404
pub(crate) const fn is_fix_os_chmod_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/19404
pub(crate) const fn is_fix_os_rename_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/19404
pub(crate) const fn is_fix_os_replace_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/19404
pub(crate) const fn is_fix_os_path_samefile_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/19245
pub(crate) const fn is_fix_os_getcwd_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
@@ -225,8 +205,3 @@ pub(crate) const fn is_assert_raises_exception_call_enabled(settings: &LinterSet
pub(crate) const fn is_add_future_annotations_imports_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/19390
pub(crate) const fn is_trailing_comma_type_params_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}

View File

@@ -5,6 +5,7 @@ use ruff_text_size::TextRange;
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::settings::LinterSettings;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::rules::eradicate::detection::comment_contains_code;
@@ -50,6 +51,7 @@ pub(crate) fn commented_out_code(
context: &LintContext,
locator: &Locator,
comment_ranges: &CommentRanges,
settings: &LinterSettings,
) {
let mut comments = comment_ranges.into_iter().peekable();
// Iterate over all comments in the document.
@@ -63,9 +65,7 @@ pub(crate) fn commented_out_code(
}
// Verify that the comment is on its own line, and that it contains code.
if is_own_line_comment(line)
&& comment_contains_code(line, &context.settings().task_tags[..])
{
if is_own_line_comment(line) && comment_contains_code(line, &settings.task_tags[..]) {
if let Some(mut diagnostic) =
context.report_diagnostic_if_enabled(CommentedOutCode, range)
{

View File

@@ -47,10 +47,9 @@ use crate::checkers::ast::Checker;
/// raise
/// ```
///
/// Exceptions that are logged via `logging.exception()` or are logged via
/// `logging.error()` or `logging.critical()` with `exc_info` enabled will
/// _not_ be flagged, as this is a common pattern for propagating exception
/// traces:
/// Exceptions that are logged via `logging.exception()` or `logging.error()`
/// with `exc_info` enabled will _not_ be flagged, as this is a common pattern
/// for propagating exception traces:
/// ```python
/// try:
/// foo()
@@ -202,7 +201,7 @@ impl<'a> StatementVisitor<'a> for LogExceptionVisitor<'a> {
) {
if match attr.as_str() {
"exception" => true,
"error" | "critical" => arguments
"error" => arguments
.find_keyword("exc_info")
.is_some_and(|keyword| is_const_true(&keyword.value)),
_ => false,
@@ -215,7 +214,7 @@ impl<'a> StatementVisitor<'a> for LogExceptionVisitor<'a> {
if self.semantic.resolve_qualified_name(func).is_some_and(
|qualified_name| match qualified_name.segments() {
["logging", "exception"] => true,
["logging", "error" | "critical"] => arguments
["logging", "error"] => arguments
.find_keyword("exc_info")
.is_some_and(|keyword| is_const_true(&keyword.value)),
_ => false,

View File

@@ -1,5 +1,6 @@
---
source: crates/ruff_linter/src/rules/flake8_blind_except/mod.rs
snapshot_kind: text
---
BLE.py:25:8: BLE001 Do not catch blind exception: `BaseException`
|
@@ -120,30 +121,3 @@ BLE.py:113:8: BLE001 Do not catch blind exception: `Exception`
| ^^^^^^^^^ BLE001
114 | error("...", exc_info=None)
|
BLE.py:119:8: BLE001 Do not catch blind exception: `Exception`
|
117 | try:
118 | pass
119 | except Exception:
| ^^^^^^^^^ BLE001
120 | critical("...")
|
BLE.py:125:8: BLE001 Do not catch blind exception: `Exception`
|
123 | try:
124 | pass
125 | except Exception:
| ^^^^^^^^^ BLE001
126 | critical("...", exc_info=False)
|
BLE.py:131:8: BLE001 Do not catch blind exception: `Exception`
|
129 | try:
130 | pass
131 | except Exception:
| ^^^^^^^^^ BLE001
132 | critical("...", exc_info=None)
|

View File

@@ -27,23 +27,4 @@ mod tests {
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("COM81.py"))]
#[test_case(Path::new("COM81_syntax_error.py"))]
fn preview_rules(path: &Path) -> Result<()> {
let snapshot = format!("preview__{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_commas").join(path).as_path(),
&settings::LinterSettings {
preview: crate::settings::types::PreviewMode::Enabled,
..settings::LinterSettings::for_rules(vec![
Rule::MissingTrailingComma,
Rule::TrailingCommaOnBareTuple,
Rule::ProhibitedTrailingComma,
])
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
}

View File

@@ -5,8 +5,6 @@ use ruff_text_size::{Ranged, TextRange};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::preview::is_trailing_comma_type_params_enabled;
use crate::settings::LinterSettings;
use crate::{AlwaysFixableViolation, Violation};
use crate::{Edit, Fix};
@@ -26,8 +24,6 @@ enum TokenType {
Def,
For,
Lambda,
Class,
Type,
Irrelevant,
}
@@ -73,8 +69,6 @@ impl From<(TokenKind, TextRange)> for SimpleToken {
TokenKind::Lbrace => TokenType::OpeningCurlyBracket,
TokenKind::Rbrace => TokenType::ClosingBracket,
TokenKind::Def => TokenType::Def,
TokenKind::Class => TokenType::Class,
TokenKind::Type => TokenType::Type,
TokenKind::For => TokenType::For,
TokenKind::Lambda => TokenType::Lambda,
// Import treated like a function.
@@ -104,8 +98,6 @@ enum ContextType {
Dict,
/// Lambda parameter list, e.g. `lambda a, b`.
LambdaParameters,
/// Type parameter list, e.g. `def foo[T, U](): ...`
TypeParameters,
}
/// Comma context - described a comma-delimited "situation".
@@ -298,7 +290,7 @@ pub(crate) fn trailing_commas(
}
// Update the comma context stack.
let context = update_context(token, prev, prev_prev, &mut stack, lint_context.settings());
let context = update_context(token, prev, prev_prev, &mut stack);
check_token(token, prev, prev_prev, context, locator, lint_context);
@@ -334,7 +326,6 @@ fn check_token(
ContextType::No => false,
ContextType::FunctionParameters => true,
ContextType::CallArguments => true,
ContextType::TypeParameters => true,
// `(1)` is not equivalent to `(1,)`.
ContextType::Tuple => context.num_commas != 0,
// `x[1]` is not equivalent to `x[1,]`.
@@ -417,7 +408,6 @@ fn update_context(
prev: SimpleToken,
prev_prev: SimpleToken,
stack: &mut Vec<Context>,
settings: &LinterSettings,
) -> Context {
let new_context = match token.ty {
TokenType::OpeningBracket => match (prev.ty, prev_prev.ty) {
@@ -427,17 +417,6 @@ fn update_context(
}
_ => Context::new(ContextType::Tuple),
},
TokenType::OpeningSquareBracket if is_trailing_comma_type_params_enabled(settings) => {
match (prev.ty, prev_prev.ty) {
(TokenType::Named, TokenType::Def | TokenType::Class | TokenType::Type) => {
Context::new(ContextType::TypeParameters)
}
(TokenType::ClosingBracket | TokenType::Named | TokenType::String, _) => {
Context::new(ContextType::Subscript)
}
_ => Context::new(ContextType::List),
}
}
TokenType::OpeningSquareBracket => match prev.ty {
TokenType::ClosingBracket | TokenType::Named | TokenType::String => {
Context::new(ContextType::Subscript)

View File

@@ -1,30 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_commas/mod.rs
---
COM81_syntax_error.py:3:5: SyntaxError: Starred expression cannot be used here
|
1 | # Check for `flake8-commas` violation for a file containing syntax errors.
2 | (
3 | *args
| ^^^^^
4 | )
|
COM81_syntax_error.py:6:9: SyntaxError: Type parameter list cannot be empty
|
4 | )
5 |
6 | def foo[(param1='test', param2='test',):
| ^
7 | pass
|
COM81_syntax_error.py:6:38: COM819 Trailing comma prohibited
|
4 | )
5 |
6 | def foo[(param1='test', param2='test',):
| ^ COM819
7 | pass
|
= help: Remove trailing comma

View File

@@ -11,6 +11,7 @@ use ruff_text_size::{Ranged, TextRange};
use crate::Locator;
use crate::checkers::ast::LintContext;
use crate::settings::LinterSettings;
use crate::{Edit, Fix, FixAvailability, Violation};
/// ## What it does
@@ -107,15 +108,13 @@ pub(crate) fn implicit(
tokens: &Tokens,
locator: &Locator,
indexer: &Indexer,
settings: &LinterSettings,
) {
for (a_token, b_token) in tokens
.iter()
.filter(|token| {
token.kind() != TokenKind::Comment
&& (context
.settings()
.flake8_implicit_str_concat
.allow_multiline
&& (settings.flake8_implicit_str_concat.allow_multiline
|| token.kind() != TokenKind::NonLogicalNewline)
})
.tuple_windows()

View File

@@ -64,7 +64,6 @@ pub(crate) fn duplicate_union_member<'a>(checker: &Checker, expr: &'a Expr) {
let mut diagnostics = Vec::new();
let mut union_type = UnionKind::TypingUnion;
let mut optional_present = false;
// Adds a member to `literal_exprs` if it is a `Literal` annotation
let mut check_for_duplicate_members = |expr: &'a Expr, parent: &'a Expr| {
if matches!(parent, Expr::BinOp(_)) {
@@ -75,7 +74,6 @@ pub(crate) fn duplicate_union_member<'a>(checker: &Checker, expr: &'a Expr) {
&& is_optional_type(checker, expr)
{
// If the union member is an `Optional`, add a virtual `None` literal.
optional_present = true;
&VIRTUAL_NONE_LITERAL
} else {
expr
@@ -89,7 +87,7 @@ pub(crate) fn duplicate_union_member<'a>(checker: &Checker, expr: &'a Expr) {
DuplicateUnionMember {
duplicate_name: checker.generator().expr(virtual_expr),
},
// Use the real expression's range for diagnostics.
// Use the real expression's range for diagnostics,
expr.range(),
));
}
@@ -106,13 +104,6 @@ pub(crate) fn duplicate_union_member<'a>(checker: &Checker, expr: &'a Expr) {
return;
}
// Do not reduce `Union[None, ... None]` to avoid introducing a `TypeError` unintentionally
// e.g. `isinstance(None, Union[None, None])`, if reduced to `isinstance(None, None)`, causes
// `TypeError: isinstance() arg 2 must be a type, a tuple of types, or a union` to throw.
if unique_nodes.iter().all(|expr| expr.is_none_literal_expr()) && !optional_present {
return;
}
// Mark [`Fix`] as unsafe when comments are in range.
let applicability = if checker.comment_ranges().intersects(expr.range()) {
Applicability::Unsafe

View File

@@ -974,8 +974,6 @@ PYI016.py:143:61: PYI016 [*] Duplicate union member `complex`
143 |-field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
143 |+field48: typing.Union[typing.Optional[complex], complex]
144 144 | field49: typing.Optional[complex | complex] | complex
145 145 |
146 146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
PYI016.py:144:36: PYI016 [*] Duplicate union member `complex`
|
@@ -983,8 +981,6 @@ PYI016.py:144:36: PYI016 [*] Duplicate union member `complex`
143 | field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
144 | field49: typing.Optional[complex | complex] | complex
| ^^^^^^^ PYI016
145 |
146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
|
= help: Remove duplicate union member `complex`
@@ -994,15 +990,3 @@ PYI016.py:144:36: PYI016 [*] Duplicate union member `complex`
143 143 | field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
144 |-field49: typing.Optional[complex | complex] | complex
144 |+field49: typing.Optional[complex] | complex
145 145 |
146 146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
147 147 | # Should throw duplicate union member but not fix
PYI016.py:148:37: PYI016 Duplicate union member `None`
|
146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
147 | # Should throw duplicate union member but not fix
148 | isinstance(None, typing.Union[None, None])
| ^^^^ PYI016
|
= help: Remove duplicate union member `None`

View File

@@ -1162,8 +1162,6 @@ PYI016.py:143:61: PYI016 [*] Duplicate union member `complex`
143 |-field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
143 |+field48: typing.Union[None, complex]
144 144 | field49: typing.Optional[complex | complex] | complex
145 145 |
146 146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
PYI016.py:143:72: PYI016 [*] Duplicate union member `complex`
|
@@ -1181,8 +1179,6 @@ PYI016.py:143:72: PYI016 [*] Duplicate union member `complex`
143 |-field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
143 |+field48: typing.Union[None, complex]
144 144 | field49: typing.Optional[complex | complex] | complex
145 145 |
146 146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
PYI016.py:144:36: PYI016 [*] Duplicate union member `complex`
|
@@ -1190,8 +1186,6 @@ PYI016.py:144:36: PYI016 [*] Duplicate union member `complex`
143 | field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
144 | field49: typing.Optional[complex | complex] | complex
| ^^^^^^^ PYI016
145 |
146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
|
= help: Remove duplicate union member `complex`
@@ -1201,9 +1195,6 @@ PYI016.py:144:36: PYI016 [*] Duplicate union member `complex`
143 143 | field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
144 |-field49: typing.Optional[complex | complex] | complex
144 |+field49: None | complex
145 145 |
146 146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
147 147 | # Should throw duplicate union member but not fix
PYI016.py:144:47: PYI016 [*] Duplicate union member `complex`
|
@@ -1211,8 +1202,6 @@ PYI016.py:144:47: PYI016 [*] Duplicate union member `complex`
143 | field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
144 | field49: typing.Optional[complex | complex] | complex
| ^^^^^^^ PYI016
145 |
146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
|
= help: Remove duplicate union member `complex`
@@ -1222,15 +1211,3 @@ PYI016.py:144:47: PYI016 [*] Duplicate union member `complex`
143 143 | field48: typing.Union[typing.Optional[typing.Union[complex, complex]], complex]
144 |-field49: typing.Optional[complex | complex] | complex
144 |+field49: None | complex
145 145 |
146 146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
147 147 | # Should throw duplicate union member but not fix
PYI016.py:148:37: PYI016 Duplicate union member `None`
|
146 | # Regression test for https://github.com/astral-sh/ruff/issues/19403
147 | # Should throw duplicate union member but not fix
148 | isinstance(None, typing.Union[None, None])
| ^^^^ PYI016
|
= help: Remove duplicate union member `None`

View File

@@ -1,8 +1,8 @@
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
use crate::{Applicability, Edit, Fix, Violation};
use ruff_python_ast::{self as ast, Expr, ExprCall};
use ruff_python_semantic::{SemanticModel, analyze::typing};
use ruff_python_ast::{self as ast};
use ruff_python_ast::{Expr, ExprCall};
use ruff_text_size::Ranged;
pub(crate) fn is_keyword_only_argument_non_default(arguments: &ast::Arguments, name: &str) -> bool {
@@ -72,85 +72,3 @@ pub(crate) fn check_os_pathlib_single_arg_calls(
});
}
}
pub(crate) fn get_name_expr(expr: &Expr) -> Option<&ast::ExprName> {
match expr {
Expr::Name(name) => Some(name),
Expr::Call(ExprCall { func, .. }) => get_name_expr(func),
_ => None,
}
}
/// Returns `true` if the given expression looks like a file descriptor, i.e., if it is an integer.
pub(crate) fn is_file_descriptor(expr: &Expr, semantic: &SemanticModel) -> bool {
if matches!(
expr,
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(_),
..
})
) {
return true;
}
let Some(name) = get_name_expr(expr) else {
return false;
};
let Some(binding) = semantic.only_binding(name).map(|id| semantic.binding(id)) else {
return false;
};
typing::is_int(binding, semantic)
}
pub(crate) fn check_os_pathlib_two_arg_calls(
checker: &Checker,
call: &ExprCall,
attr: &str,
path_arg: &str,
second_arg: &str,
fix_enabled: bool,
violation: impl Violation,
) {
let range = call.range();
let mut diagnostic = checker.report_diagnostic(violation, call.func.range());
let (Some(path_expr), Some(second_expr)) = (
call.arguments.find_argument_value(path_arg, 0),
call.arguments.find_argument_value(second_arg, 1),
) else {
return;
};
let path_code = checker.locator().slice(path_expr.range());
let second_code = checker.locator().slice(second_expr.range());
if fix_enabled {
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import("pathlib", "Path"),
call.start(),
checker.semantic(),
)?;
let replacement = if is_pathlib_path_call(checker, path_expr) {
format!("{path_code}.{attr}({second_code})")
} else {
format!("{binding}({path_code}).{attr}({second_code})")
};
let applicability = if checker.comment_ranges().intersects(range) {
Applicability::Unsafe
} else {
Applicability::Safe
};
Ok(Fix::applicable_edits(
Edit::range_replacement(replacement, range),
[import_edit],
applicability,
))
});
}
}

View File

@@ -1,6 +1,5 @@
pub(crate) use glob_rule::*;
pub(crate) use invalid_pathlib_with_suffix::*;
pub(crate) use os_chmod::*;
pub(crate) use os_getcwd::*;
pub(crate) use os_path_abspath::*;
pub(crate) use os_path_basename::*;
@@ -15,11 +14,8 @@ pub(crate) use os_path_isabs::*;
pub(crate) use os_path_isdir::*;
pub(crate) use os_path_isfile::*;
pub(crate) use os_path_islink::*;
pub(crate) use os_path_samefile::*;
pub(crate) use os_readlink::*;
pub(crate) use os_remove::*;
pub(crate) use os_rename::*;
pub(crate) use os_replace::*;
pub(crate) use os_rmdir::*;
pub(crate) use os_sep_split::*;
pub(crate) use os_unlink::*;
@@ -28,7 +24,6 @@ pub(crate) use replaceable_by_pathlib::*;
mod glob_rule;
mod invalid_pathlib_with_suffix;
mod os_chmod;
mod os_getcwd;
mod os_path_abspath;
mod os_path_basename;
@@ -43,11 +38,8 @@ mod os_path_isabs;
mod os_path_isdir;
mod os_path_isfile;
mod os_path_islink;
mod os_path_samefile;
mod os_readlink;
mod os_remove;
mod os_rename;
mod os_replace;
mod os_rmdir;
mod os_sep_split;
mod os_unlink;

View File

@@ -1,94 +0,0 @@
use crate::checkers::ast::Checker;
use crate::preview::is_fix_os_chmod_enabled;
use crate::rules::flake8_use_pathlib::helpers::{
check_os_pathlib_two_arg_calls, is_file_descriptor, is_keyword_only_argument_non_default,
};
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprCall;
/// ## What it does
/// Checks for uses of `os.chmod`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os`. When possible, using `Path` object
/// methods such as `Path.chmod()` can improve readability over the `os`
/// module's counterparts (e.g., `os.chmod()`).
///
/// ## Examples
/// ```python
/// import os
///
/// os.chmod("file.py", 0o444)
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("file.py").chmod(0o444)
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe if the replacement would remove comments attached to the original expression.
///
/// ## References
/// - [Python documentation: `Path.chmod`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.chmod)
/// - [Python documentation: `os.chmod`](https://docs.python.org/3/library/os.html#os.chmod)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsChmod;
impl Violation for OsChmod {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`os.chmod()` should be replaced by `Path.chmod()`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `Path(...).chmod(...)`".to_string())
}
}
/// PTH101
pub(crate) fn os_chmod(checker: &Checker, call: &ExprCall, segments: &[&str]) {
if segments != ["os", "chmod"] {
return;
}
// `dir_fd` is not supported by pathlib, so check if it's set to non-default values.
// Signature as of Python 3.13 (https://docs.python.org/3/library/os.html#os.chmod)
// ```text
// 0 1 2 3
// os.chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
// ```
if call
.arguments
.find_argument_value("path", 0)
.is_some_and(|expr| is_file_descriptor(expr, checker.semantic()))
|| is_keyword_only_argument_non_default(&call.arguments, "dir_fd")
{
return;
}
check_os_pathlib_two_arg_calls(
checker,
call,
"chmod",
"path",
"mode",
is_fix_os_chmod_enabled(checker.settings()),
OsChmod,
);
}

View File

@@ -1,77 +0,0 @@
use crate::checkers::ast::Checker;
use crate::preview::is_fix_os_path_samefile_enabled;
use crate::rules::flake8_use_pathlib::helpers::check_os_pathlib_two_arg_calls;
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprCall;
/// ## What it does
/// Checks for uses of `os.path.samefile`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os.path`. When possible, using `Path` object
/// methods such as `Path.samefile()` can improve readability over the `os.path`
/// module's counterparts (e.g., `os.path.samefile()`).
///
/// ## Examples
/// ```python
/// import os
///
/// os.path.samefile("f1.py", "f2.py")
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("f1.py").samefile("f2.py")
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe if the replacement would remove comments attached to the original expression.
///
/// ## References
/// - [Python documentation: `Path.samefile`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.samefile)
/// - [Python documentation: `os.path.samefile`](https://docs.python.org/3/library/os.path.html#os.path.samefile)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsPathSamefile;
impl Violation for OsPathSamefile {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`os.path.samefile()` should be replaced by `Path.samefile()`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `Path(...).samefile()`".to_string())
}
}
/// PTH121
pub(crate) fn os_path_samefile(checker: &Checker, call: &ExprCall, segments: &[&str]) {
if segments != ["os", "path", "samefile"] {
return;
}
check_os_pathlib_two_arg_calls(
checker,
call,
"samefile",
"f1",
"f2",
is_fix_os_path_samefile_enabled(checker.settings()),
OsPathSamefile,
);
}

View File

@@ -1,91 +0,0 @@
use crate::checkers::ast::Checker;
use crate::preview::is_fix_os_rename_enabled;
use crate::rules::flake8_use_pathlib::helpers::{
check_os_pathlib_two_arg_calls, is_keyword_only_argument_non_default,
};
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprCall;
/// ## What it does
/// Checks for uses of `os.rename`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os`. When possible, using `Path` object
/// methods such as `Path.rename()` can improve readability over the `os`
/// module's counterparts (e.g., `os.rename()`).
///
/// ## Examples
/// ```python
/// import os
///
/// os.rename("old.py", "new.py")
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("old.py").rename("new.py")
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe if the replacement would remove comments attached to the original expression.
///
/// ## References
/// - [Python documentation: `Path.rename`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.rename)
/// - [Python documentation: `os.rename`](https://docs.python.org/3/library/os.html#os.rename)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsRename;
impl Violation for OsRename {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`os.rename()` should be replaced by `Path.rename()`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `Path(...).rename(...)`".to_string())
}
}
/// PTH104
pub(crate) fn os_rename(checker: &Checker, call: &ExprCall, segments: &[&str]) {
if segments != ["os", "rename"] {
return;
}
// `src_dir_fd` and `dst_dir_fd` are not supported by pathlib, so check if they are
// set to non-default values.
// Signature as of Python 3.13 (https://docs.python.org/3/library/os.html#os.rename)
// ```text
// 0 1 2 3
// os.rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
// ```
if is_keyword_only_argument_non_default(&call.arguments, "src_dir_fd")
|| is_keyword_only_argument_non_default(&call.arguments, "dst_dir_fd")
{
return;
}
check_os_pathlib_two_arg_calls(
checker,
call,
"rename",
"src",
"dst",
is_fix_os_rename_enabled(checker.settings()),
OsRename,
);
}

View File

@@ -1,94 +0,0 @@
use crate::checkers::ast::Checker;
use crate::preview::is_fix_os_replace_enabled;
use crate::rules::flake8_use_pathlib::helpers::{
check_os_pathlib_two_arg_calls, is_keyword_only_argument_non_default,
};
use crate::{FixAvailability, Violation};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::ExprCall;
/// ## What it does
/// Checks for uses of `os.replace`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os`. When possible, using `Path` object
/// methods such as `Path.replace()` can improve readability over the `os`
/// module's counterparts (e.g., `os.replace()`).
///
/// Note that `os` functions may be preferable if performance is a concern,
/// e.g., in hot loops.
///
/// ## Examples
/// ```python
/// import os
///
/// os.replace("old.py", "new.py")
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("old.py").replace("new.py")
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe if the replacement would remove comments attached to the original expression.
///
/// ## References
/// - [Python documentation: `Path.replace`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.replace)
/// - [Python documentation: `os.replace`](https://docs.python.org/3/library/os.html#os.replace)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsReplace;
impl Violation for OsReplace {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`os.replace()` should be replaced by `Path.replace()`".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Replace with `Path(...).replace(...)`".to_string())
}
}
/// PTH105
pub(crate) fn os_replace(checker: &Checker, call: &ExprCall, segments: &[&str]) {
if segments != ["os", "replace"] {
return;
}
// `src_dir_fd` and `dst_dir_fd` are not supported by pathlib, so check if they are
// set to non-default values.
// Signature as of Python 3.13 (https://docs.python.org/3/library/os.html#os.replace)
// ```text
// 0 1 2 3
// os.replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
// ```
if is_keyword_only_argument_non_default(&call.arguments, "src_dir_fd")
|| is_keyword_only_argument_non_default(&call.arguments, "dst_dir_fd")
{
return;
}
check_os_pathlib_two_arg_calls(
checker,
call,
"replace",
"src",
"dst",
is_fix_os_replace_enabled(checker.settings()),
OsReplace,
);
}

View File

@@ -1,16 +1,14 @@
use ruff_python_ast::{self as ast, Expr, ExprBooleanLiteral, ExprCall};
use ruff_python_semantic::SemanticModel;
use ruff_python_semantic::analyze::typing;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::rules::flake8_use_pathlib::helpers::{
is_file_descriptor, is_keyword_only_argument_non_default,
};
use crate::rules::flake8_use_pathlib::{
rules::Glob,
violations::{
BuiltinOpen, Joiner, OsListdir, OsMakedirs, OsMkdir, OsPathJoin, OsPathSplitext, OsStat,
OsSymlink, PyPath,
},
use crate::rules::flake8_use_pathlib::helpers::is_keyword_only_argument_non_default;
use crate::rules::flake8_use_pathlib::rules::Glob;
use crate::rules::flake8_use_pathlib::violations::{
BuiltinOpen, Joiner, OsChmod, OsListdir, OsMakedirs, OsMkdir, OsPathJoin, OsPathSamefile,
OsPathSplitext, OsRename, OsReplace, OsStat, OsSymlink, PyPath,
};
pub(crate) fn replaceable_by_pathlib(checker: &Checker, call: &ExprCall) {
@@ -20,6 +18,24 @@ pub(crate) fn replaceable_by_pathlib(checker: &Checker, call: &ExprCall) {
let range = call.func.range();
match qualified_name.segments() {
// PTH101
["os", "chmod"] => {
// `dir_fd` is not supported by pathlib, so check if it's set to non-default values.
// Signature as of Python 3.13 (https://docs.python.org/3/library/os.html#os.chmod)
// ```text
// 0 1 2 3
// os.chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
// ```
if call
.arguments
.find_argument_value("path", 0)
.is_some_and(|expr| is_file_descriptor(expr, checker.semantic()))
|| is_keyword_only_argument_non_default(&call.arguments, "dir_fd")
{
return;
}
checker.report_diagnostic_if_enabled(OsChmod, range)
}
// PTH102
["os", "makedirs"] => checker.report_diagnostic_if_enabled(OsMakedirs, range),
// PTH103
@@ -35,6 +51,38 @@ pub(crate) fn replaceable_by_pathlib(checker: &Checker, call: &ExprCall) {
}
checker.report_diagnostic_if_enabled(OsMkdir, range)
}
// PTH104
["os", "rename"] => {
// `src_dir_fd` and `dst_dir_fd` are not supported by pathlib, so check if they are
// set to non-default values.
// Signature as of Python 3.13 (https://docs.python.org/3/library/os.html#os.rename)
// ```text
// 0 1 2 3
// os.rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
// ```
if is_keyword_only_argument_non_default(&call.arguments, "src_dir_fd")
|| is_keyword_only_argument_non_default(&call.arguments, "dst_dir_fd")
{
return;
}
checker.report_diagnostic_if_enabled(OsRename, range)
}
// PTH105
["os", "replace"] => {
// `src_dir_fd` and `dst_dir_fd` are not supported by pathlib, so check if they are
// set to non-default values.
// Signature as of Python 3.13 (https://docs.python.org/3/library/os.html#os.replace)
// ```text
// 0 1 2 3
// os.replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
// ```
if is_keyword_only_argument_non_default(&call.arguments, "src_dir_fd")
|| is_keyword_only_argument_non_default(&call.arguments, "dst_dir_fd")
{
return;
}
checker.report_diagnostic_if_enabled(OsReplace, range)
}
// PTH116
["os", "stat"] => {
// `dir_fd` is not supported by pathlib, so check if it's set to non-default values.
@@ -76,6 +124,8 @@ pub(crate) fn replaceable_by_pathlib(checker: &Checker, call: &ExprCall) {
},
range,
),
// PTH121
["os", "path", "samefile"] => checker.report_diagnostic_if_enabled(OsPathSamefile, range),
// PTH122
["os", "path", "splitext"] => checker.report_diagnostic_if_enabled(OsPathSplitext, range),
// PTH211
@@ -184,6 +234,37 @@ pub(crate) fn replaceable_by_pathlib(checker: &Checker, call: &ExprCall) {
};
}
/// Returns `true` if the given expression looks like a file descriptor, i.e., if it is an integer.
fn is_file_descriptor(expr: &Expr, semantic: &SemanticModel) -> bool {
if matches!(
expr,
Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(_),
..
})
) {
return true;
}
let Some(name) = get_name_expr(expr) else {
return false;
};
let Some(binding) = semantic.only_binding(name).map(|id| semantic.binding(id)) else {
return false;
};
typing::is_int(binding, semantic)
}
fn get_name_expr(expr: &Expr) -> Option<&ast::ExprName> {
match expr {
Expr::Name(name) => Some(name),
Expr::Call(ExprCall { func, .. }) => get_name_expr(func),
_ => None,
}
}
/// Returns `true` if argument `name` is set to a non-default `None` value.
fn is_argument_non_default(arguments: &ast::Arguments, name: &str, position: usize) -> bool {
arguments

View File

@@ -20,7 +20,6 @@ full_name.py:8:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
9 | aaa = os.mkdir(p)
10 | os.makedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
full_name.py:9:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -51,7 +50,6 @@ full_name.py:11:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
12 | os.replace(p)
13 | os.rmdir(p)
|
= help: Replace with `Path(...).rename(...)`
full_name.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -62,7 +60,6 @@ full_name.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
13 | os.rmdir(p)
14 | os.remove(p)
|
= help: Replace with `Path(...).replace(...)`
full_name.py:13:1: PTH106 `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -256,7 +253,6 @@ full_name.py:30:1: PTH121 `os.path.samefile()` should be replaced by `Path.samef
31 | os.path.splitext(p)
32 | with open(p) as fp:
|
= help: Replace with `Path(...).samefile()`
full_name.py:31:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|

View File

@@ -20,7 +20,6 @@ import_as.py:8:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
9 | aaa = foo.mkdir(p)
10 | foo.makedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
import_as.py:9:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -51,7 +50,6 @@ import_as.py:11:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
12 | foo.replace(p)
13 | foo.rmdir(p)
|
= help: Replace with `Path(...).rename(...)`
import_as.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -62,7 +60,6 @@ import_as.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
13 | foo.rmdir(p)
14 | foo.remove(p)
|
= help: Replace with `Path(...).replace(...)`
import_as.py:13:1: PTH106 `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -255,7 +252,6 @@ import_as.py:30:1: PTH121 `os.path.samefile()` should be replaced by `Path.samef
| ^^^^^^^^^^^^^^ PTH121
31 | foo_p.splitext(p)
|
= help: Replace with `Path(...).samefile()`
import_as.py:31:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|

View File

@@ -20,7 +20,6 @@ import_from.py:10:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
11 | aaa = mkdir(p)
12 | makedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
import_from.py:11:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -51,7 +50,6 @@ import_from.py:13:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
14 | replace(p)
15 | rmdir(p)
|
= help: Replace with `Path(...).rename(...)`
import_from.py:14:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -62,7 +60,6 @@ import_from.py:14:1: PTH105 `os.replace()` should be replaced by `Path.replace()
15 | rmdir(p)
16 | remove(p)
|
= help: Replace with `Path(...).replace(...)`
import_from.py:15:1: PTH106 `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -256,7 +253,6 @@ import_from.py:32:1: PTH121 `os.path.samefile()` should be replaced by `Path.sam
33 | splitext(p)
34 | with open(p) as fp:
|
= help: Replace with `Path(...).samefile()`
import_from.py:33:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|
@@ -293,36 +289,3 @@ import_from.py:43:10: PTH123 `open()` should be replaced by `Path.open()`
43 | with open(p) as _: ... # Error
| ^^^^ PTH123
|
import_from.py:53:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
|
51 | file = "file_1.py"
52 |
53 | rename(file, "file_2.py")
| ^^^^^^ PTH104
54 |
55 | rename(
|
= help: Replace with `Path(...).rename(...)`
import_from.py:55:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
|
53 | rename(file, "file_2.py")
54 |
55 | rename(
| ^^^^^^ PTH104
56 | # commment 1
57 | file, # comment 2
|
= help: Replace with `Path(...).rename(...)`
import_from.py:63:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
|
61 | )
62 |
63 | rename(file, "file_2.py", src_dir_fd=None, dst_dir_fd=None)
| ^^^^^^ PTH104
64 |
65 | rename(file, "file_2.py", src_dir_fd=1)
|
= help: Replace with `Path(...).rename(...)`

View File

@@ -20,7 +20,6 @@ import_from_as.py:15:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
16 | aaa = xmkdir(p)
17 | xmakedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
import_from_as.py:16:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -51,7 +50,6 @@ import_from_as.py:18:1: PTH104 `os.rename()` should be replaced by `Path.rename(
19 | xreplace(p)
20 | xrmdir(p)
|
= help: Replace with `Path(...).rename(...)`
import_from_as.py:19:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -62,7 +60,6 @@ import_from_as.py:19:1: PTH105 `os.replace()` should be replaced by `Path.replac
20 | xrmdir(p)
21 | xremove(p)
|
= help: Replace with `Path(...).replace(...)`
import_from_as.py:20:1: PTH106 `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -255,7 +252,6 @@ import_from_as.py:37:1: PTH121 `os.path.samefile()` should be replaced by `Path.
| ^^^^^^^^^ PTH121
38 | xsplitext(p)
|
= help: Replace with `Path(...).samefile()`
import_from_as.py:38:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|

View File

@@ -34,7 +34,6 @@ full_name.py:8:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
9 | aaa = os.mkdir(p)
10 | os.makedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
full_name.py:9:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -65,7 +64,6 @@ full_name.py:11:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
12 | os.replace(p)
13 | os.rmdir(p)
|
= help: Replace with `Path(...).rename(...)`
full_name.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -76,7 +74,6 @@ full_name.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
13 | os.rmdir(p)
14 | os.remove(p)
|
= help: Replace with `Path(...).replace(...)`
full_name.py:13:1: PTH106 [*] `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -474,7 +471,6 @@ full_name.py:30:1: PTH121 `os.path.samefile()` should be replaced by `Path.samef
31 | os.path.splitext(p)
32 | with open(p) as fp:
|
= help: Replace with `Path(...).samefile()`
full_name.py:31:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|

View File

@@ -34,7 +34,6 @@ import_as.py:8:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
9 | aaa = foo.mkdir(p)
10 | foo.makedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
import_as.py:9:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -65,7 +64,6 @@ import_as.py:11:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
12 | foo.replace(p)
13 | foo.rmdir(p)
|
= help: Replace with `Path(...).rename(...)`
import_as.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -76,7 +74,6 @@ import_as.py:12:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
13 | foo.rmdir(p)
14 | foo.remove(p)
|
= help: Replace with `Path(...).replace(...)`
import_as.py:13:1: PTH106 [*] `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -472,7 +469,6 @@ import_as.py:30:1: PTH121 `os.path.samefile()` should be replaced by `Path.samef
| ^^^^^^^^^^^^^^ PTH121
31 | foo_p.splitext(p)
|
= help: Replace with `Path(...).samefile()`
import_as.py:31:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|

View File

@@ -35,7 +35,6 @@ import_from.py:10:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
11 | aaa = mkdir(p)
12 | makedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
import_from.py:11:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -66,7 +65,6 @@ import_from.py:13:1: PTH104 `os.rename()` should be replaced by `Path.rename()`
14 | replace(p)
15 | rmdir(p)
|
= help: Replace with `Path(...).rename(...)`
import_from.py:14:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -77,7 +75,6 @@ import_from.py:14:1: PTH105 `os.replace()` should be replaced by `Path.replace()
15 | rmdir(p)
16 | remove(p)
|
= help: Replace with `Path(...).replace(...)`
import_from.py:15:1: PTH106 [*] `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -487,7 +484,6 @@ import_from.py:32:1: PTH121 `os.path.samefile()` should be replaced by `Path.sam
33 | splitext(p)
34 | with open(p) as fp:
|
= help: Replace with `Path(...).samefile()`
import_from.py:33:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|
@@ -524,95 +520,3 @@ import_from.py:43:10: PTH123 `open()` should be replaced by `Path.open()`
43 | with open(p) as _: ... # Error
| ^^^^ PTH123
|
import_from.py:53:1: PTH104 [*] `os.rename()` should be replaced by `Path.rename()`
|
51 | file = "file_1.py"
52 |
53 | rename(file, "file_2.py")
| ^^^^^^ PTH104
54 |
55 | rename(
|
= help: Replace with `Path(...).rename(...)`
Safe fix
2 2 | from os import remove, unlink, getcwd, readlink, stat
3 3 | from os.path import abspath, exists, expanduser, isdir, isfile, islink
4 4 | from os.path import isabs, join, basename, dirname, samefile, splitext
5 |+import pathlib
5 6 |
6 7 | p = "/foo"
7 8 | q = "bar"
--------------------------------------------------------------------------------
50 51 |
51 52 | file = "file_1.py"
52 53 |
53 |-rename(file, "file_2.py")
54 |+pathlib.Path(file).rename("file_2.py")
54 55 |
55 56 | rename(
56 57 | # commment 1
import_from.py:55:1: PTH104 [*] `os.rename()` should be replaced by `Path.rename()`
|
53 | rename(file, "file_2.py")
54 |
55 | rename(
| ^^^^^^ PTH104
56 | # commment 1
57 | file, # comment 2
|
= help: Replace with `Path(...).rename(...)`
Unsafe fix
2 2 | from os import remove, unlink, getcwd, readlink, stat
3 3 | from os.path import abspath, exists, expanduser, isdir, isfile, islink
4 4 | from os.path import isabs, join, basename, dirname, samefile, splitext
5 |+import pathlib
5 6 |
6 7 | p = "/foo"
7 8 | q = "bar"
--------------------------------------------------------------------------------
52 53 |
53 54 | rename(file, "file_2.py")
54 55 |
55 |-rename(
56 |- # commment 1
57 |- file, # comment 2
58 |- "file_2.py"
59 |- ,
60 |- # comment 3
61 |-)
56 |+pathlib.Path(file).rename("file_2.py")
62 57 |
63 58 | rename(file, "file_2.py", src_dir_fd=None, dst_dir_fd=None)
64 59 |
import_from.py:63:1: PTH104 [*] `os.rename()` should be replaced by `Path.rename()`
|
61 | )
62 |
63 | rename(file, "file_2.py", src_dir_fd=None, dst_dir_fd=None)
| ^^^^^^ PTH104
64 |
65 | rename(file, "file_2.py", src_dir_fd=1)
|
= help: Replace with `Path(...).rename(...)`
Safe fix
2 2 | from os import remove, unlink, getcwd, readlink, stat
3 3 | from os.path import abspath, exists, expanduser, isdir, isfile, islink
4 4 | from os.path import isabs, join, basename, dirname, samefile, splitext
5 |+import pathlib
5 6 |
6 7 | p = "/foo"
7 8 | q = "bar"
--------------------------------------------------------------------------------
60 61 | # comment 3
61 62 | )
62 63 |
63 |-rename(file, "file_2.py", src_dir_fd=None, dst_dir_fd=None)
64 |+pathlib.Path(file).rename("file_2.py")
64 65 |
65 66 | rename(file, "file_2.py", src_dir_fd=1)

View File

@@ -35,7 +35,6 @@ import_from_as.py:15:6: PTH101 `os.chmod()` should be replaced by `Path.chmod()`
16 | aaa = xmkdir(p)
17 | xmakedirs(p)
|
= help: Replace with `Path(...).chmod(...)`
import_from_as.py:16:7: PTH102 `os.mkdir()` should be replaced by `Path.mkdir()`
|
@@ -66,7 +65,6 @@ import_from_as.py:18:1: PTH104 `os.rename()` should be replaced by `Path.rename(
19 | xreplace(p)
20 | xrmdir(p)
|
= help: Replace with `Path(...).rename(...)`
import_from_as.py:19:1: PTH105 `os.replace()` should be replaced by `Path.replace()`
|
@@ -77,7 +75,6 @@ import_from_as.py:19:1: PTH105 `os.replace()` should be replaced by `Path.replac
20 | xrmdir(p)
21 | xremove(p)
|
= help: Replace with `Path(...).replace(...)`
import_from_as.py:20:1: PTH106 [*] `os.rmdir()` should be replaced by `Path.rmdir()`
|
@@ -485,7 +482,6 @@ import_from_as.py:37:1: PTH121 `os.path.samefile()` should be replaced by `Path.
| ^^^^^^^^^ PTH121
38 | xsplitext(p)
|
= help: Replace with `Path(...).samefile()`
import_from_as.py:38:1: PTH122 `os.path.splitext()` should be replaced by `Path.suffix`, `Path.stem`, and `Path.parent`
|

View File

@@ -2,6 +2,51 @@ use ruff_macros::{ViolationMetadata, derive_message_formats};
use crate::Violation;
/// ## What it does
/// Checks for uses of `os.chmod`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os`. When possible, using `Path` object
/// methods such as `Path.chmod()` can improve readability over the `os`
/// module's counterparts (e.g., `os.chmod()`).
///
/// ## Examples
/// ```python
/// import os
///
/// os.chmod("file.py", 0o444)
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("file.py").chmod(0o444)
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## References
/// - [Python documentation: `Path.chmod`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.chmod)
/// - [Python documentation: `os.chmod`](https://docs.python.org/3/library/os.html#os.chmod)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsChmod;
impl Violation for OsChmod {
#[derive_message_formats]
fn message(&self) -> String {
"`os.chmod()` should be replaced by `Path.chmod()`".to_string()
}
}
/// ## What it does
/// Checks for uses of `os.makedirs`.
///
@@ -92,6 +137,99 @@ impl Violation for OsMkdir {
}
}
/// ## What it does
/// Checks for uses of `os.rename`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os`. When possible, using `Path` object
/// methods such as `Path.rename()` can improve readability over the `os`
/// module's counterparts (e.g., `os.rename()`).
///
/// ## Examples
/// ```python
/// import os
///
/// os.rename("old.py", "new.py")
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("old.py").rename("new.py")
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## References
/// - [Python documentation: `Path.rename`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.rename)
/// - [Python documentation: `os.rename`](https://docs.python.org/3/library/os.html#os.rename)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsRename;
impl Violation for OsRename {
#[derive_message_formats]
fn message(&self) -> String {
"`os.rename()` should be replaced by `Path.rename()`".to_string()
}
}
/// ## What it does
/// Checks for uses of `os.replace`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os`. When possible, using `Path` object
/// methods such as `Path.replace()` can improve readability over the `os`
/// module's counterparts (e.g., `os.replace()`).
///
/// Note that `os` functions may be preferable if performance is a concern,
/// e.g., in hot loops.
///
/// ## Examples
/// ```python
/// import os
///
/// os.replace("old.py", "new.py")
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("old.py").replace("new.py")
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## References
/// - [Python documentation: `Path.replace`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.replace)
/// - [Python documentation: `os.replace`](https://docs.python.org/3/library/os.html#os.replace)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsReplace;
impl Violation for OsReplace {
#[derive_message_formats]
fn message(&self) -> String {
"`os.replace()` should be replaced by `Path.replace()`".to_string()
}
}
/// ## What it does
/// Checks for uses of `os.stat`.
///
@@ -209,6 +347,51 @@ pub(crate) enum Joiner {
Joinpath,
}
/// ## What it does
/// Checks for uses of `os.path.samefile`.
///
/// ## Why is this bad?
/// `pathlib` offers a high-level API for path manipulation, as compared to
/// the lower-level API offered by `os.path`. When possible, using `Path` object
/// methods such as `Path.samefile()` can improve readability over the `os.path`
/// module's counterparts (e.g., `os.path.samefile()`).
///
/// ## Examples
/// ```python
/// import os
///
/// os.path.samefile("f1.py", "f2.py")
/// ```
///
/// Use instead:
/// ```python
/// from pathlib import Path
///
/// Path("f1.py").samefile("f2.py")
/// ```
///
/// ## Known issues
/// While using `pathlib` can improve the readability and type safety of your code,
/// it can be less performant than the lower-level alternatives that work directly with strings,
/// especially on older versions of Python.
///
/// ## References
/// - [Python documentation: `Path.samefile`](https://docs.python.org/3/library/pathlib.html#pathlib.Path.samefile)
/// - [Python documentation: `os.path.samefile`](https://docs.python.org/3/library/os.path.html#os.path.samefile)
/// - [PEP 428 The pathlib module object-oriented filesystem paths](https://peps.python.org/pep-0428/)
/// - [Correspondence between `os` and `pathlib`](https://docs.python.org/3/library/pathlib.html#correspondence-to-tools-in-the-os-module)
/// - [Why you should be using pathlib](https://treyhunner.com/2018/12/why-you-should-be-using-pathlib/)
/// - [No really, pathlib is great](https://treyhunner.com/2019/01/no-really-pathlib-is-great/)
#[derive(ViolationMetadata)]
pub(crate) struct OsPathSamefile;
impl Violation for OsPathSamefile {
#[derive_message_formats]
fn message(&self) -> String {
"`os.path.samefile()` should be replaced by `Path.samefile()`".to_string()
}
}
/// ## What it does
/// Checks for uses of `os.path.splitext`.
///

View File

@@ -100,7 +100,7 @@ pub(crate) fn invalid_function_name(
return;
}
// Ignore the do_* methods of the http.server.BaseHTTPRequestHandler class and its subclasses
// Ignore the do_* methods of the http.server.BaseHTTPRequestHandler class
if name.starts_with("do_")
&& parent_class.is_some_and(|class| {
any_base_class(class, semantic, &mut |superclass| {
@@ -108,13 +108,7 @@ pub(crate) fn invalid_function_name(
qualified.is_some_and(|name| {
matches!(
name.segments(),
[
"http",
"server",
"BaseHTTPRequestHandler"
| "CGIHTTPRequestHandler"
| "SimpleHTTPRequestHandler"
]
["http", "server", "BaseHTTPRequestHandler"]
)
})
})

View File

@@ -55,21 +55,3 @@ N802.py:84:9: N802 Function name `dont_GET` should be lowercase
| ^^^^^^^^ N802
85 | pass
|
N802.py:95:9: N802 Function name `dont_OPTIONS` should be lowercase
|
93 | pass
94 |
95 | def dont_OPTIONS(self):
| ^^^^^^^^^^^^ N802
96 | pass
|
N802.py:106:9: N802 Function name `dont_OPTIONS` should be lowercase
|
104 | pass
105 |
106 | def dont_OPTIONS(self):
| ^^^^^^^^^^^^ N802
107 | pass
|

View File

@@ -249,11 +249,6 @@ pub(crate) fn manual_list_comprehension(checker: &Checker, for_stmt: &ast::StmtF
.iter()
.find(|binding| for_stmt.target.range() == binding.range)
.unwrap();
// If the target variable is global (e.g., `global INDEX`) or nonlocal (e.g., `nonlocal INDEX`),
// then it is intended to be used elsewhere outside the for loop.
if target_binding.is_global() || target_binding.is_nonlocal() {
return;
}
// If any references to the loop target variable are after the loop,
// then converting it into a comprehension would cause a NameError
if target_binding
@@ -411,14 +406,7 @@ fn convert_to_list_extend(
};
let target_str = locator.slice(for_stmt.target.range());
let elt_str = locator.slice(to_append);
let generator_str = if to_append
.as_generator_expr()
.is_some_and(|generator| !generator.parenthesized)
{
format!("({elt_str}) {for_type} {target_str} in {for_iter_str}{if_str}")
} else {
format!("{elt_str} {for_type} {target_str} in {for_iter_str}{if_str}")
};
let generator_str = format!("{elt_str} {for_type} {target_str} in {for_iter_str}{if_str}");
let variable_name = locator.slice(binding);
let for_loop_inline_comments = comment_strings_in_range(

View File

@@ -241,29 +241,5 @@ PERF401.py:280:13: PERF401 Use `list.extend` to create a transformed list
279 | if lambda: 0:
280 | dst.append(i)
| ^^^^^^^^^^^^^ PERF401
281 |
282 | def f():
|
= help: Replace for loop with list.extend
PERF401.py:286:9: PERF401 Use a list comprehension to create a transformed list
|
284 | result = []
285 | for i in range(3):
286 | result.append(x for x in [i])
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PERF401
287 |
288 | def f():
|
= help: Replace for loop with list comprehension
PERF401.py:292:9: PERF401 Use a list comprehension to create a transformed list
|
290 | result = []
291 | for i in range(3):
292 | result.append((x for x in [i]))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PERF401
293 |
294 | G_INDEX = None
|
= help: Replace for loop with list comprehension

View File

@@ -566,8 +566,6 @@ PERF401.py:280:13: PERF401 [*] Use `list.extend` to create a transformed list
279 | if lambda: 0:
280 | dst.append(i)
| ^^^^^^^^^^^^^ PERF401
281 |
282 | def f():
|
= help: Replace for loop with list.extend
@@ -579,52 +577,3 @@ PERF401.py:280:13: PERF401 [*] Use `list.extend` to create a transformed list
279 |- if lambda: 0:
280 |- dst.append(i)
278 |+ dst.extend(i for i in src if (lambda: 0))
281 279 |
282 280 | def f():
283 281 | i = "xyz"
PERF401.py:286:9: PERF401 [*] Use a list comprehension to create a transformed list
|
284 | result = []
285 | for i in range(3):
286 | result.append(x for x in [i])
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PERF401
287 |
288 | def f():
|
= help: Replace for loop with list comprehension
Unsafe fix
281 281 |
282 282 | def f():
283 283 | i = "xyz"
284 |- result = []
285 |- for i in range(3):
286 |- result.append(x for x in [i])
284 |+ result = [(x for x in [i]) for i in range(3)]
287 285 |
288 286 | def f():
289 287 | i = "xyz"
PERF401.py:292:9: PERF401 [*] Use a list comprehension to create a transformed list
|
290 | result = []
291 | for i in range(3):
292 | result.append((x for x in [i]))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PERF401
293 |
294 | G_INDEX = None
|
= help: Replace for loop with list comprehension
Unsafe fix
287 287 |
288 288 | def f():
289 289 | i = "xyz"
290 |- result = []
291 |- for i in range(3):
292 |- result.append((x for x in [i]))
290 |+ result = [(x for x in [i]) for i in range(3)]
293 291 |
294 292 | G_INDEX = None
295 293 | def f():

View File

@@ -21,6 +21,7 @@ use crate::checkers::ast::{DiagnosticGuard, LintContext};
use crate::checkers::logical_lines::expand_indent;
use crate::line_width::IndentWidth;
use crate::rules::pycodestyle::helpers::is_non_logical_token;
use crate::settings::LinterSettings;
use crate::{AlwaysFixableViolation, Edit, Fix, Locator, Violation};
/// Number of blank lines around top level classes and functions.
@@ -693,12 +694,14 @@ pub(crate) struct BlankLinesChecker<'a, 'b> {
source_type: PySourceType,
cell_offsets: Option<&'a CellOffsets>,
context: &'a LintContext<'b>,
settings: &'a LinterSettings,
}
impl<'a, 'b> BlankLinesChecker<'a, 'b> {
pub(crate) fn new(
locator: &'a Locator<'a>,
stylist: &'a Stylist<'a>,
settings: &'a LinterSettings,
source_type: PySourceType,
cell_offsets: Option<&'a CellOffsets>,
context: &'a LintContext<'b>,
@@ -709,6 +712,7 @@ impl<'a, 'b> BlankLinesChecker<'a, 'b> {
source_type,
cell_offsets,
context,
settings,
}
}
@@ -729,7 +733,7 @@ impl<'a, 'b> BlankLinesChecker<'a, 'b> {
let line_preprocessor = LinePreprocessor::new(
tokens,
self.locator,
self.context.settings().tab_size,
self.settings.tab_size,
self.cell_offsets,
);
@@ -875,8 +879,7 @@ impl<'a, 'b> BlankLinesChecker<'a, 'b> {
// `isort` defaults to 2 if before a class or function definition (except in stubs where it is one) and 1 otherwise.
// Defaulting to 2 (or 1 in stubs) here is correct because the variable is only used when testing the
// blank lines before a class or function definition.
u32::try_from(self.context.settings().isort.lines_after_imports)
.unwrap_or(max_lines_level)
u32::try_from(self.settings.isort.lines_after_imports).unwrap_or(max_lines_level)
} else {
max_lines_level
}
@@ -938,10 +941,8 @@ impl<'a, 'b> BlankLinesChecker<'a, 'b> {
(LogicalLineKind::Import, Follows::FromImport)
| (LogicalLineKind::FromImport, Follows::Import)
) {
max_lines_level.max(
u32::try_from(self.context.settings().isort.lines_between_types)
.unwrap_or(u32::MAX),
)
max_lines_level
.max(u32::try_from(self.settings.isort.lines_between_types).unwrap_or(u32::MAX))
} else {
expected_blank_lines_before_definition
};

View File

@@ -48,7 +48,6 @@ mod tests {
#[test_case(Rule::ComparisonWithItself, Path::new("comparison_with_itself.py"))]
#[test_case(Rule::EqWithoutHash, Path::new("eq_without_hash.py"))]
#[test_case(Rule::EmptyComment, Path::new("empty_comment.py"))]
#[test_case(Rule::EmptyComment, Path::new("empty_comment_line_continuation.py"))]
#[test_case(Rule::ManualFromImport, Path::new("import_aliasing.py"))]
#[test_case(Rule::IfStmtMinMax, Path::new("if_stmt_min_max.py"))]
#[test_case(Rule::SingleStringSlots, Path::new("single_string_slots.py"))]

View File

@@ -1,5 +1,4 @@
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_index::Indexer;
use ruff_python_trivia::{CommentRanges, is_python_whitespace};
use ruff_source_file::LineRanges;
use ruff_text_size::{TextRange, TextSize};
@@ -50,7 +49,6 @@ pub(crate) fn empty_comments(
context: &LintContext,
comment_ranges: &CommentRanges,
locator: &Locator,
indexer: &Indexer,
) {
let block_comments = comment_ranges.block_comments(locator.contents());
@@ -61,12 +59,12 @@ pub(crate) fn empty_comments(
}
// If the line contains an empty comment, add a diagnostic.
empty_comment(context, range, locator, indexer);
empty_comment(context, range, locator);
}
}
/// Return a [`Diagnostic`] if the comment at the given [`TextRange`] is empty.
fn empty_comment(context: &LintContext, range: TextRange, locator: &Locator, indexer: &Indexer) {
fn empty_comment(context: &LintContext, range: TextRange, locator: &Locator) {
// Check: is the comment empty?
if !locator
.slice(range)
@@ -97,20 +95,12 @@ fn empty_comment(context: &LintContext, range: TextRange, locator: &Locator, ind
}
});
// If there is no character preceding the comment, this comment must be on its own physical line.
// If there is a line preceding the empty comment's line, check if it ends in a line continuation character. (`\`)
let is_on_same_logical_line = indexer
.preceded_by_continuations(first_hash_col, locator.contents())
.is_some();
if let Some(mut diagnostic) = context
.report_diagnostic_if_enabled(EmptyComment, TextRange::new(first_hash_col, line.end()))
{
diagnostic.set_fix(Fix::safe_edit(
if let Some(deletion_start_col) = deletion_start_col {
Edit::deletion(line.start() + deletion_start_col, line.end())
} else if is_on_same_logical_line {
Edit::deletion(first_hash_col, line.end())
} else {
Edit::range_deletion(locator.full_line_range(first_hash_col))
},

View File

@@ -6,9 +6,8 @@ use ruff_python_ast::{
use ruff_python_semantic::{SemanticModel, analyze::typing};
use ruff_text_size::Ranged;
use crate::Violation;
use crate::checkers::ast::Checker;
use crate::fix;
use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// ## What it does
/// Checks for access to the first or last element of `str.split()` or `str.rsplit()` without
@@ -36,14 +35,10 @@ use crate::{AlwaysFixableViolation, Applicability, Edit, Fix};
/// url = "www.example.com"
/// suffix = url.rsplit(".", maxsplit=1)[-1]
/// ```
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe for `split()`/`rsplit()` calls that contain `**kwargs`, as
/// adding a `maxsplit` keyword to such a call may lead to a duplicate keyword argument error.
#[derive(ViolationMetadata)]
pub(crate) struct MissingMaxsplitArg {
index: SliceBoundary,
actual_split_type: String,
suggested_split_type: String,
}
/// Represents the index of the slice used for this rule (which can only be 0 or -1)
@@ -52,27 +47,25 @@ enum SliceBoundary {
Last,
}
impl AlwaysFixableViolation for MissingMaxsplitArg {
impl Violation for MissingMaxsplitArg {
#[derive_message_formats]
fn message(&self) -> String {
let MissingMaxsplitArg {
actual_split_type: _,
suggested_split_type,
} = self;
format!("Replace with `{suggested_split_type}(..., maxsplit=1)`.")
}
fn fix_title(&self) -> String {
let MissingMaxsplitArg {
index,
actual_split_type,
suggested_split_type,
} = self;
let suggested_split_type = match index {
SliceBoundary::First => "split",
SliceBoundary::Last => "rsplit",
};
if actual_split_type == suggested_split_type {
format!("Pass `maxsplit=1` into `str.{actual_split_type}()`")
} else {
format!("Use `str.{suggested_split_type}()` and pass `maxsplit=1`")
format!(
"Instead of `str.{actual_split_type}()`, call `str.{suggested_split_type}()` and pass `maxsplit=1`",
)
}
}
}
@@ -130,8 +123,8 @@ pub(crate) fn missing_maxsplit_arg(checker: &Checker, value: &Expr, slice: &Expr
};
// Check the function is "split" or "rsplit"
let actual_split_type = attr.as_str();
if !matches!(actual_split_type, "split" | "rsplit") {
let attr = attr.as_str();
if !matches!(attr, "split" | "rsplit") {
return;
}
@@ -168,48 +161,11 @@ pub(crate) fn missing_maxsplit_arg(checker: &Checker, value: &Expr, slice: &Expr
}
}
let suggested_split_type = match slice_boundary {
SliceBoundary::First => "split",
SliceBoundary::Last => "rsplit",
};
let maxsplit_argument_edit = fix::edits::add_argument(
"maxsplit=1",
arguments,
checker.comment_ranges(),
checker.locator().contents(),
);
// Only change `actual_split_type` if it doesn't match `suggested_split_type`
let split_type_edit: Option<Edit> = if actual_split_type == suggested_split_type {
None
} else {
Some(Edit::range_replacement(
suggested_split_type.to_string(),
attr.range(),
))
};
let mut diagnostic = checker.report_diagnostic(
checker.report_diagnostic(
MissingMaxsplitArg {
actual_split_type: actual_split_type.to_string(),
suggested_split_type: suggested_split_type.to_string(),
index: slice_boundary,
actual_split_type: attr.to_string(),
},
expr.range(),
);
diagnostic.set_fix(Fix::applicable_edits(
maxsplit_argument_edit,
split_type_edit,
// If keyword.arg is `None` (i.e. if the function call contains `**kwargs`), mark the fix as unsafe
if arguments
.keywords
.iter()
.any(|keyword| keyword.arg.is_none())
{
Applicability::Unsafe
} else {
Applicability::Safe
},
));
}

View File

@@ -1,7 +1,7 @@
---
source: crates/ruff_linter/src/rules/pylint/mod.rs
---
missing_maxsplit_arg.py:14:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:14:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
12 | # Errors
13 | ## Test split called directly on string literal
@@ -10,19 +10,8 @@ missing_maxsplit_arg.py:14:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
15 | "1,2,3".split(",")[-1] # [missing-maxsplit-arg]
16 | "1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
11 11 |
12 12 | # Errors
13 13 | ## Test split called directly on string literal
14 |-"1,2,3".split(",")[0] # [missing-maxsplit-arg]
14 |+"1,2,3".split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
15 15 | "1,2,3".split(",")[-1] # [missing-maxsplit-arg]
16 16 | "1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
17 17 | "1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:15:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:15:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
13 | ## Test split called directly on string literal
14 | "1,2,3".split(",")[0] # [missing-maxsplit-arg]
@@ -31,19 +20,8 @@ missing_maxsplit_arg.py:15:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
16 | "1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
17 | "1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
12 12 | # Errors
13 13 | ## Test split called directly on string literal
14 14 | "1,2,3".split(",")[0] # [missing-maxsplit-arg]
15 |-"1,2,3".split(",")[-1] # [missing-maxsplit-arg]
15 |+"1,2,3".rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
16 16 | "1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
17 17 | "1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
18 18 |
missing_maxsplit_arg.py:16:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:16:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
14 | "1,2,3".split(",")[0] # [missing-maxsplit-arg]
15 | "1,2,3".split(",")[-1] # [missing-maxsplit-arg]
@@ -51,19 +29,8 @@ missing_maxsplit_arg.py:16:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
| ^^^^^^^^^^^^^^^^^^^^^^ PLC0207
17 | "1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
13 13 | ## Test split called directly on string literal
14 14 | "1,2,3".split(",")[0] # [missing-maxsplit-arg]
15 15 | "1,2,3".split(",")[-1] # [missing-maxsplit-arg]
16 |-"1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
16 |+"1,2,3".split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
17 17 | "1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
18 18 |
19 19 | ## Test split called on string variable
missing_maxsplit_arg.py:17:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:17:1: PLC0207 Pass `maxsplit=1` into `str.rsplit()`
|
15 | "1,2,3".split(",")[-1] # [missing-maxsplit-arg]
16 | "1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
@@ -72,19 +39,8 @@ missing_maxsplit_arg.py:17:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
18 |
19 | ## Test split called on string variable
|
= help: Pass `maxsplit=1` into `str.rsplit()`
Safe fix
14 14 | "1,2,3".split(",")[0] # [missing-maxsplit-arg]
15 15 | "1,2,3".split(",")[-1] # [missing-maxsplit-arg]
16 16 | "1,2,3".rsplit(",")[0] # [missing-maxsplit-arg]
17 |-"1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
17 |+"1,2,3".rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
18 18 |
19 19 | ## Test split called on string variable
20 20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:20:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:20:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
19 | ## Test split called on string variable
20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
@@ -92,19 +48,8 @@ missing_maxsplit_arg.py:20:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
21 | SEQ.split(",")[-1] # [missing-maxsplit-arg]
22 | SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
17 17 | "1,2,3".rsplit(",")[-1] # [missing-maxsplit-arg]
18 18 |
19 19 | ## Test split called on string variable
20 |-SEQ.split(",")[0] # [missing-maxsplit-arg]
20 |+SEQ.split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
21 21 | SEQ.split(",")[-1] # [missing-maxsplit-arg]
22 22 | SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
23 23 | SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:21:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:21:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
19 | ## Test split called on string variable
20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
@@ -113,19 +58,8 @@ missing_maxsplit_arg.py:21:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
22 | SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
23 | SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
18 18 |
19 19 | ## Test split called on string variable
20 20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
21 |-SEQ.split(",")[-1] # [missing-maxsplit-arg]
21 |+SEQ.rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
22 22 | SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
23 23 | SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
24 24 |
missing_maxsplit_arg.py:22:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:22:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
21 | SEQ.split(",")[-1] # [missing-maxsplit-arg]
@@ -133,19 +67,8 @@ missing_maxsplit_arg.py:22:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
| ^^^^^^^^^^^^^^^^^^ PLC0207
23 | SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
19 19 | ## Test split called on string variable
20 20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
21 21 | SEQ.split(",")[-1] # [missing-maxsplit-arg]
22 |-SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
22 |+SEQ.split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
23 23 | SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
24 24 |
25 25 | ## Test split called on class attribute
missing_maxsplit_arg.py:23:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:23:1: PLC0207 Pass `maxsplit=1` into `str.rsplit()`
|
21 | SEQ.split(",")[-1] # [missing-maxsplit-arg]
22 | SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
@@ -154,19 +77,8 @@ missing_maxsplit_arg.py:23:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
24 |
25 | ## Test split called on class attribute
|
= help: Pass `maxsplit=1` into `str.rsplit()`
Safe fix
20 20 | SEQ.split(",")[0] # [missing-maxsplit-arg]
21 21 | SEQ.split(",")[-1] # [missing-maxsplit-arg]
22 22 | SEQ.rsplit(",")[0] # [missing-maxsplit-arg]
23 |-SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
23 |+SEQ.rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
24 24 |
25 25 | ## Test split called on class attribute
26 26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:26:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:26:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
25 | ## Test split called on class attribute
26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
@@ -174,19 +86,8 @@ missing_maxsplit_arg.py:26:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
27 | Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
28 | Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
23 23 | SEQ.rsplit(",")[-1] # [missing-maxsplit-arg]
24 24 |
25 25 | ## Test split called on class attribute
26 |-Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
26 |+Foo.class_str.split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
27 27 | Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
28 28 | Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
29 29 | Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:27:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:27:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
25 | ## Test split called on class attribute
26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
@@ -195,19 +96,8 @@ missing_maxsplit_arg.py:27:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
28 | Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
29 | Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
24 24 |
25 25 | ## Test split called on class attribute
26 26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
27 |-Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
27 |+Foo.class_str.rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
28 28 | Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
29 29 | Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
30 30 |
missing_maxsplit_arg.py:28:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:28:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
27 | Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
@@ -215,19 +105,8 @@ missing_maxsplit_arg.py:28:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PLC0207
29 | Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
25 25 | ## Test split called on class attribute
26 26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
27 27 | Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
28 |-Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
28 |+Foo.class_str.split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
29 29 | Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
30 30 |
31 31 | ## Test split called on sliced string
missing_maxsplit_arg.py:29:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:29:1: PLC0207 Pass `maxsplit=1` into `str.rsplit()`
|
27 | Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
28 | Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
@@ -236,19 +115,8 @@ missing_maxsplit_arg.py:29:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
30 |
31 | ## Test split called on sliced string
|
= help: Pass `maxsplit=1` into `str.rsplit()`
Safe fix
26 26 | Foo.class_str.split(",")[0] # [missing-maxsplit-arg]
27 27 | Foo.class_str.split(",")[-1] # [missing-maxsplit-arg]
28 28 | Foo.class_str.rsplit(",")[0] # [missing-maxsplit-arg]
29 |-Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
29 |+Foo.class_str.rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
30 30 |
31 31 | ## Test split called on sliced string
32 32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:32:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:32:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
31 | ## Test split called on sliced string
32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
@@ -256,19 +124,8 @@ missing_maxsplit_arg.py:32:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
29 29 | Foo.class_str.rsplit(",")[-1] # [missing-maxsplit-arg]
30 30 |
31 31 | ## Test split called on sliced string
32 |-"1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
32 |+"1,2,3"[::-1].split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
33 33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
34 34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:33:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:33:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
31 | ## Test split called on sliced string
32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
@@ -277,19 +134,8 @@ missing_maxsplit_arg.py:33:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
30 30 |
31 31 | ## Test split called on sliced string
32 32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
33 |-"1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
33 |+"1,2,3"[::-1][::-1].split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
34 34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:34:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:34:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
@@ -298,19 +144,8 @@ missing_maxsplit_arg.py:34:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
31 31 | ## Test split called on sliced string
32 32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
33 33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
34 |-SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
34 |+SEQ[:3].split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
35 35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
37 37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:35:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:35:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
@@ -319,19 +154,8 @@ missing_maxsplit_arg.py:35:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
32 32 | "1,2,3"[::-1].split(",")[0] # [missing-maxsplit-arg]
33 33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
34 34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 |-Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
35 |+Foo.class_str[1:3].rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
36 36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
37 37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
38 38 | Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:36:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:36:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
@@ -340,19 +164,8 @@ missing_maxsplit_arg.py:36:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
38 | Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
33 33 | "1,2,3"[::-1][::-1].split(",")[0] # [missing-maxsplit-arg]
34 34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 |-"1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
36 |+"1,2,3"[::-1].split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
37 37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
38 38 | Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
39 39 |
missing_maxsplit_arg.py:37:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:37:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
@@ -360,19 +173,8 @@ missing_maxsplit_arg.py:37:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
| ^^^^^^^^^^^^^^^^^^^^^^ PLC0207
38 | Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
34 34 | SEQ[:3].split(",")[0] # [missing-maxsplit-arg]
35 35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
37 |-SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
37 |+SEQ[:3].split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
38 38 | Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
39 39 |
40 40 | ## Test sep given as named argument
missing_maxsplit_arg.py:38:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:38:1: PLC0207 Pass `maxsplit=1` into `str.rsplit()`
|
36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
@@ -381,19 +183,8 @@ missing_maxsplit_arg.py:38:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
39 |
40 | ## Test sep given as named argument
|
= help: Pass `maxsplit=1` into `str.rsplit()`
Safe fix
35 35 | Foo.class_str[1:3].split(",")[-1] # [missing-maxsplit-arg]
36 36 | "1,2,3"[::-1].rsplit(",")[0] # [missing-maxsplit-arg]
37 37 | SEQ[:3].rsplit(",")[0] # [missing-maxsplit-arg]
38 |-Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
38 |+Foo.class_str[1:3].rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
39 39 |
40 40 | ## Test sep given as named argument
41 41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:41:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:41:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
40 | ## Test sep given as named argument
41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
@@ -401,19 +192,8 @@ missing_maxsplit_arg.py:41:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
42 | "1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
43 | "1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
38 38 | Foo.class_str[1:3].rsplit(",")[-1] # [missing-maxsplit-arg]
39 39 |
40 40 | ## Test sep given as named argument
41 |-"1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
41 |+"1,2,3".split(maxsplit=1, sep=",")[0] # [missing-maxsplit-arg]
42 42 | "1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
43 43 | "1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
44 44 | "1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:42:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:42:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
40 | ## Test sep given as named argument
41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
@@ -422,19 +202,8 @@ missing_maxsplit_arg.py:42:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
43 | "1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
44 | "1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
39 39 |
40 40 | ## Test sep given as named argument
41 41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
42 |-"1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
42 |+"1,2,3".rsplit(maxsplit=1, sep=",")[-1] # [missing-maxsplit-arg]
43 43 | "1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
44 44 | "1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
45 45 |
missing_maxsplit_arg.py:43:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:43:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
42 | "1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
@@ -442,19 +211,8 @@ missing_maxsplit_arg.py:43:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ PLC0207
44 | "1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
40 40 | ## Test sep given as named argument
41 41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
42 42 | "1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
43 |-"1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
43 |+"1,2,3".split(maxsplit=1, sep=",")[0] # [missing-maxsplit-arg]
44 44 | "1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
45 45 |
46 46 | ## Special cases
missing_maxsplit_arg.py:44:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:44:1: PLC0207 Pass `maxsplit=1` into `str.rsplit()`
|
42 | "1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
43 | "1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
@@ -463,19 +221,8 @@ missing_maxsplit_arg.py:44:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
45 |
46 | ## Special cases
|
= help: Pass `maxsplit=1` into `str.rsplit()`
Safe fix
41 41 | "1,2,3".split(sep=",")[0] # [missing-maxsplit-arg]
42 42 | "1,2,3".split(sep=",")[-1] # [missing-maxsplit-arg]
43 43 | "1,2,3".rsplit(sep=",")[0] # [missing-maxsplit-arg]
44 |-"1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
44 |+"1,2,3".rsplit(maxsplit=1, sep=",")[-1] # [missing-maxsplit-arg]
45 45 |
46 46 | ## Special cases
47 47 | "1,2,3".split("\n")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:47:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:47:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
46 | ## Special cases
47 | "1,2,3".split("\n")[0] # [missing-maxsplit-arg]
@@ -483,19 +230,8 @@ missing_maxsplit_arg.py:47:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
48 | "1,2,3".split("split")[-1] # [missing-maxsplit-arg]
49 | "1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
44 44 | "1,2,3".rsplit(sep=",")[-1] # [missing-maxsplit-arg]
45 45 |
46 46 | ## Special cases
47 |-"1,2,3".split("\n")[0] # [missing-maxsplit-arg]
47 |+"1,2,3".split("\n", maxsplit=1)[0] # [missing-maxsplit-arg]
48 48 | "1,2,3".split("split")[-1] # [missing-maxsplit-arg]
49 49 | "1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
50 50 |
missing_maxsplit_arg.py:48:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:48:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
46 | ## Special cases
47 | "1,2,3".split("\n")[0] # [missing-maxsplit-arg]
@@ -503,19 +239,8 @@ missing_maxsplit_arg.py:48:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ PLC0207
49 | "1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
45 45 |
46 46 | ## Special cases
47 47 | "1,2,3".split("\n")[0] # [missing-maxsplit-arg]
48 |-"1,2,3".split("split")[-1] # [missing-maxsplit-arg]
48 |+"1,2,3".rsplit("split", maxsplit=1)[-1] # [missing-maxsplit-arg]
49 49 | "1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
50 50 |
51 51 | ## Test class attribute named split
missing_maxsplit_arg.py:49:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:49:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
47 | "1,2,3".split("\n")[0] # [missing-maxsplit-arg]
48 | "1,2,3".split("split")[-1] # [missing-maxsplit-arg]
@@ -524,19 +249,8 @@ missing_maxsplit_arg.py:49:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
50 |
51 | ## Test class attribute named split
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
46 46 | ## Special cases
47 47 | "1,2,3".split("\n")[0] # [missing-maxsplit-arg]
48 48 | "1,2,3".split("split")[-1] # [missing-maxsplit-arg]
49 |-"1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
49 |+"1,2,3".split("rsplit", maxsplit=1)[0] # [missing-maxsplit-arg]
50 50 |
51 51 | ## Test class attribute named split
52 52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:52:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:52:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
51 | ## Test class attribute named split
52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
@@ -544,19 +258,8 @@ missing_maxsplit_arg.py:52:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
53 | Bar.split.split(",")[-1] # [missing-maxsplit-arg]
54 | Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
|
= help: Pass `maxsplit=1` into `str.split()`
Safe fix
49 49 | "1,2,3".rsplit("rsplit")[0] # [missing-maxsplit-arg]
50 50 |
51 51 | ## Test class attribute named split
52 |-Bar.split.split(",")[0] # [missing-maxsplit-arg]
52 |+Bar.split.split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
53 53 | Bar.split.split(",")[-1] # [missing-maxsplit-arg]
54 54 | Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
55 55 | Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:53:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:53:1: PLC0207 Instead of `str.split()`, call `str.rsplit()` and pass `maxsplit=1`
|
51 | ## Test class attribute named split
52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
@@ -565,19 +268,8 @@ missing_maxsplit_arg.py:53:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
54 | Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
55 | Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.rsplit()` and pass `maxsplit=1`
Safe fix
50 50 |
51 51 | ## Test class attribute named split
52 52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
53 |-Bar.split.split(",")[-1] # [missing-maxsplit-arg]
53 |+Bar.split.rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
54 54 | Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
55 55 | Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
56 56 |
missing_maxsplit_arg.py:54:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:54:1: PLC0207 Instead of `str.rsplit()`, call `str.split()` and pass `maxsplit=1`
|
52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
53 | Bar.split.split(",")[-1] # [missing-maxsplit-arg]
@@ -585,19 +277,8 @@ missing_maxsplit_arg.py:54:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
| ^^^^^^^^^^^^^^^^^^^^^^^^ PLC0207
55 | Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
|
= help: Use `str.split()` and pass `maxsplit=1`
Safe fix
51 51 | ## Test class attribute named split
52 52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
53 53 | Bar.split.split(",")[-1] # [missing-maxsplit-arg]
54 |-Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
54 |+Bar.split.split(",", maxsplit=1)[0] # [missing-maxsplit-arg]
55 55 | Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
56 56 |
57 57 | ## Test unpacked dict literal kwargs
missing_maxsplit_arg.py:55:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`.
missing_maxsplit_arg.py:55:1: PLC0207 Pass `maxsplit=1` into `str.rsplit()`
|
53 | Bar.split.split(",")[-1] # [missing-maxsplit-arg]
54 | Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
@@ -606,37 +287,15 @@ missing_maxsplit_arg.py:55:1: PLC0207 [*] Replace with `rsplit(..., maxsplit=1)`
56 |
57 | ## Test unpacked dict literal kwargs
|
= help: Pass `maxsplit=1` into `str.rsplit()`
Safe fix
52 52 | Bar.split.split(",")[0] # [missing-maxsplit-arg]
53 53 | Bar.split.split(",")[-1] # [missing-maxsplit-arg]
54 54 | Bar.split.rsplit(",")[0] # [missing-maxsplit-arg]
55 |-Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
55 |+Bar.split.rsplit(",", maxsplit=1)[-1] # [missing-maxsplit-arg]
56 56 |
57 57 | ## Test unpacked dict literal kwargs
58 58 | "1,2,3".split(**{"sep": ","})[0] # [missing-maxsplit-arg]
missing_maxsplit_arg.py:58:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:58:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
57 | ## Test unpacked dict literal kwargs
58 | "1,2,3".split(**{"sep": ","})[0] # [missing-maxsplit-arg]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PLC0207
|
= help: Pass `maxsplit=1` into `str.split()`
Unsafe fix
55 55 | Bar.split.rsplit(",")[-1] # [missing-maxsplit-arg]
56 56 |
57 57 | ## Test unpacked dict literal kwargs
58 |-"1,2,3".split(**{"sep": ","})[0] # [missing-maxsplit-arg]
58 |+"1,2,3".split(maxsplit=1, **{"sep": ","})[0] # [missing-maxsplit-arg]
59 59 |
60 60 |
61 61 | # OK
missing_maxsplit_arg.py:179:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:179:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
177 | # Errors
178 | kwargs_without_maxsplit = {"seq": ","}
@@ -645,19 +304,8 @@ missing_maxsplit_arg.py:179:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`
180 | # OK
181 | kwargs_with_maxsplit = {"maxsplit": 1}
|
= help: Pass `maxsplit=1` into `str.split()`
Unsafe fix
176 176 | ## TODO: These require the ability to resolve a dict variable name to a value
177 177 | # Errors
178 178 | kwargs_without_maxsplit = {"seq": ","}
179 |-"1,2,3".split(**kwargs_without_maxsplit)[0] # TODO: [missing-maxsplit-arg]
179 |+"1,2,3".split(maxsplit=1, **kwargs_without_maxsplit)[0] # TODO: [missing-maxsplit-arg]
180 180 | # OK
181 181 | kwargs_with_maxsplit = {"maxsplit": 1}
182 182 | "1,2,3".split(",", **kwargs_with_maxsplit)[0] # TODO: false positive
missing_maxsplit_arg.py:182:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:182:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
180 | # OK
181 | kwargs_with_maxsplit = {"maxsplit": 1}
@@ -666,29 +314,11 @@ missing_maxsplit_arg.py:182:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`
183 | kwargs_with_maxsplit = {"sep": ",", "maxsplit": 1}
184 | "1,2,3".split(**kwargs_with_maxsplit)[0] # TODO: false positive
|
= help: Pass `maxsplit=1` into `str.split()`
Unsafe fix
179 179 | "1,2,3".split(**kwargs_without_maxsplit)[0] # TODO: [missing-maxsplit-arg]
180 180 | # OK
181 181 | kwargs_with_maxsplit = {"maxsplit": 1}
182 |-"1,2,3".split(",", **kwargs_with_maxsplit)[0] # TODO: false positive
182 |+"1,2,3".split(",", maxsplit=1, **kwargs_with_maxsplit)[0] # TODO: false positive
183 183 | kwargs_with_maxsplit = {"sep": ",", "maxsplit": 1}
184 184 | "1,2,3".split(**kwargs_with_maxsplit)[0] # TODO: false positive
missing_maxsplit_arg.py:184:1: PLC0207 [*] Replace with `split(..., maxsplit=1)`.
missing_maxsplit_arg.py:184:1: PLC0207 Pass `maxsplit=1` into `str.split()`
|
182 | "1,2,3".split(",", **kwargs_with_maxsplit)[0] # TODO: false positive
183 | kwargs_with_maxsplit = {"sep": ",", "maxsplit": 1}
184 | "1,2,3".split(**kwargs_with_maxsplit)[0] # TODO: false positive
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PLC0207
|
= help: Pass `maxsplit=1` into `str.split()`
Unsafe fix
181 181 | kwargs_with_maxsplit = {"maxsplit": 1}
182 182 | "1,2,3".split(",", **kwargs_with_maxsplit)[0] # TODO: false positive
183 183 | kwargs_with_maxsplit = {"sep": ",", "maxsplit": 1}
184 |-"1,2,3".split(**kwargs_with_maxsplit)[0] # TODO: false positive
184 |+"1,2,3".split(maxsplit=1, **kwargs_with_maxsplit)[0] # TODO: false positive

View File

@@ -1,36 +0,0 @@
---
source: crates/ruff_linter/src/rules/pylint/mod.rs
---
empty_comment_line_continuation.py:1:1: PLR2044 [*] Line with empty comment
|
1 | #
| ^ PLR2044
2 | x = 0 \
3 | #
|
= help: Delete the empty comment
Safe fix
1 |-#
2 1 | x = 0 \
3 2 | #
4 3 | +1
empty_comment_line_continuation.py:3:1: PLR2044 [*] Line with empty comment
|
1 | #
2 | x = 0 \
3 | #
| ^ PLR2044
4 | +1
5 | print(x)
|
= help: Delete the empty comment
Safe fix
1 1 | #
2 2 | x = 0 \
3 |-#
3 |+
4 4 | +1
5 5 | print(x)

View File

@@ -104,13 +104,6 @@ pub(crate) fn reimplemented_operator(checker: &Checker, target: &FunctionLike) {
return;
}
// Skip decorated functions
if let FunctionLike::Function(func) = target {
if !func.decorator_list.is_empty() {
return;
}
}
let Some(params) = target.parameters() else {
return;
};

Some files were not shown because too many files have changed in this diff Show More