Compare commits
368 Commits
charlie/pa
...
string-pre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5d2513e5a9 | ||
|
|
272306bf5a | ||
|
|
f8f1cd5016 | ||
|
|
87a0cd219f | ||
|
|
b685ee4749 | ||
|
|
ad893f8295 | ||
|
|
621bed55c0 | ||
|
|
86faee1522 | ||
|
|
a0917ec658 | ||
|
|
ee9ee005c5 | ||
|
|
5df0326bc8 | ||
|
|
6540321966 | ||
|
|
b34278e0cd | ||
|
|
222f1c37b8 | ||
|
|
8f41eab0c7 | ||
|
|
83daddbeb7 | ||
|
|
b19eec9b2a | ||
|
|
ca3c15858d | ||
|
|
bb4f7c681a | ||
|
|
0a167dd20b | ||
|
|
c43542896f | ||
|
|
192463c2fb | ||
|
|
5849a75223 | ||
|
|
dcbd8eacd8 | ||
|
|
297ec2c2d2 | ||
|
|
511cc25fc4 | ||
|
|
4c4eceee36 | ||
|
|
e07670ad97 | ||
|
|
b792140579 | ||
|
|
36a60bd50e | ||
|
|
4ae463d04b | ||
|
|
6dade5b9ab | ||
|
|
97510c888b | ||
|
|
94b68f201b | ||
|
|
ef34c5cbec | ||
|
|
fdbefd777c | ||
|
|
078547adbb | ||
|
|
42a0bec146 | ||
|
|
37b7d0f921 | ||
|
|
6a4dbd622b | ||
|
|
28b48ab902 | ||
|
|
4123d074bd | ||
|
|
c6ba7dfbc6 | ||
|
|
40f6456add | ||
|
|
3e1dffab20 | ||
|
|
3336d23f48 | ||
|
|
2421805033 | ||
|
|
359f50e6dc | ||
|
|
bccba5d73f | ||
|
|
0bfdb15ecf | ||
|
|
a902d14c31 | ||
|
|
728539291f | ||
|
|
c2bd8af59a | ||
|
|
c946bf157e | ||
|
|
8ab2519717 | ||
|
|
c4d85d6fb6 | ||
|
|
70ea49bf72 | ||
|
|
8e255974bc | ||
|
|
d358604464 | ||
|
|
8243db74fe | ||
|
|
0346e781d4 | ||
|
|
b66bfa6570 | ||
|
|
28273eb00b | ||
|
|
12acd191e1 | ||
|
|
64b929bc29 | ||
|
|
26ae0a6e8d | ||
|
|
959338d39d | ||
|
|
422ff82f4a | ||
|
|
8d0a5e01bd | ||
|
|
aae02cf275 | ||
|
|
7e2eba2592 | ||
|
|
22770fb4be | ||
|
|
1880cceac1 | ||
|
|
0d1fb823d6 | ||
|
|
c907317199 | ||
|
|
916dd5b7fa | ||
|
|
2cbe1733c8 | ||
|
|
1f6e1485f9 | ||
|
|
9b43162cc4 | ||
|
|
cc9e84c144 | ||
|
|
f4d50a2aec | ||
|
|
0c030b5bf3 | ||
|
|
1b082ce67e | ||
|
|
f936d319cc | ||
|
|
85d8b6228f | ||
|
|
7594dadc1d | ||
|
|
de37fbfac9 | ||
|
|
4e2769a16c | ||
|
|
75b5c314e3 | ||
|
|
450fb9b99a | ||
|
|
6163c99551 | ||
|
|
3112202a5b | ||
|
|
64ea00048b | ||
|
|
067a4acd54 | ||
|
|
c88376f468 | ||
|
|
9b7c29853d | ||
|
|
3e21d32b79 | ||
|
|
f9e3ea23ba | ||
|
|
6856d0b44b | ||
|
|
21539f1663 | ||
|
|
b9bb6bf780 | ||
|
|
d39eae2713 | ||
|
|
5d21b9c22e | ||
|
|
ec2f229a45 | ||
|
|
34c1cb7d11 | ||
|
|
2ddea7c657 | ||
|
|
45eabdd2c3 | ||
|
|
675c86c175 | ||
|
|
1f8e2b8f14 | ||
|
|
58b3040342 | ||
|
|
6a9b8aede1 | ||
|
|
f48126ad00 | ||
|
|
11287f944f | ||
|
|
a65efcf459 | ||
|
|
04183b0299 | ||
|
|
36fa1fe359 | ||
|
|
6e625bd93d | ||
|
|
ebd1b296fd | ||
|
|
1373e1c395 | ||
|
|
4bff397318 | ||
|
|
5347df4728 | ||
|
|
ebe9c03545 | ||
|
|
b0cbcd3dfa | ||
|
|
4df9e07a79 | ||
|
|
f0f7ea7502 | ||
|
|
4f26002dd5 | ||
|
|
d1a9c198e3 | ||
|
|
7a4f699fba | ||
|
|
3fb5418c2c | ||
|
|
9fcc009a0c | ||
|
|
bf8e5a167b | ||
|
|
8a001dfc3d | ||
|
|
0823394525 | ||
|
|
e15047815c | ||
|
|
7531bb3b21 | ||
|
|
2d9b39871f | ||
|
|
e122a96d27 | ||
|
|
f4c7bff36b | ||
|
|
56440ad835 | ||
|
|
179128dc54 | ||
|
|
e7a2779402 | ||
|
|
008da95b29 | ||
|
|
5d4dd3e38e | ||
|
|
e561f5783b | ||
|
|
ee0f1270cf | ||
|
|
e7b7e4a18d | ||
|
|
b4419c34ea | ||
|
|
08f19226b9 | ||
|
|
1e6df19a35 | ||
|
|
c21b960fc7 | ||
|
|
73ad2affa1 | ||
|
|
40c936922e | ||
|
|
874db4fb86 | ||
|
|
a41bb2733f | ||
|
|
24b848a4ea | ||
|
|
773ba5f816 | ||
|
|
f5701fcc63 | ||
|
|
ff0feb191c | ||
|
|
6566d00295 | ||
|
|
7c9bbcf4e2 | ||
|
|
babf8d718e | ||
|
|
878813f277 | ||
|
|
71c8a02ebd | ||
|
|
3a2c3a7398 | ||
|
|
7440e54ec6 | ||
|
|
0357e801ed | ||
|
|
69d0caabe7 | ||
|
|
9cb5ce750e | ||
|
|
0a07a2ca62 | ||
|
|
47a253fb62 | ||
|
|
d9544a2d37 | ||
|
|
41f0aad7b3 | ||
|
|
05951dd338 | ||
|
|
c260762900 | ||
|
|
1d5c4b0a14 | ||
|
|
a352f2f092 | ||
|
|
e376c3ff7e | ||
|
|
9671922e40 | ||
|
|
45f9fca228 | ||
|
|
6661be2c30 | ||
|
|
97f945651d | ||
|
|
5cea43731e | ||
|
|
7971e0b0ee | ||
|
|
842ff0212e | ||
|
|
f1a4eb9c28 | ||
|
|
041cdb95e0 | ||
|
|
fda48afc23 | ||
|
|
c1af1c291d | ||
|
|
171b66cb43 | ||
|
|
fa0b6f4813 | ||
|
|
eab85aea1a | ||
|
|
5b31524920 | ||
|
|
6d0469638c | ||
|
|
29ba2bb943 | ||
|
|
f0ea40a68d | ||
|
|
a3a531e0d4 | ||
|
|
b3e8eca871 | ||
|
|
447b7cb0e2 | ||
|
|
e3114a144c | ||
|
|
2e58ad437e | ||
|
|
04f2842e4f | ||
|
|
31990b8d3f | ||
|
|
5f59101811 | ||
|
|
fa6bff0078 | ||
|
|
ea7c394817 | ||
|
|
264d9159f8 | ||
|
|
37dfb205b1 | ||
|
|
f8e4e1d562 | ||
|
|
89be850b73 | ||
|
|
d68041ba24 | ||
|
|
b60b37e866 | ||
|
|
5a95edab45 | ||
|
|
1adde24133 | ||
|
|
e02d76f070 | ||
|
|
7ead2c17b1 | ||
|
|
e428099e4c | ||
|
|
7a83fd9926 | ||
|
|
e8f78fa2cf | ||
|
|
955501f267 | ||
|
|
175b3702c3 | ||
|
|
40ee4909b5 | ||
|
|
10a8e4a225 | ||
|
|
0465b03282 | ||
|
|
154fe7bdcc | ||
|
|
ece30e7c69 | ||
|
|
7be28a38c5 | ||
|
|
5ec73a6137 | ||
|
|
1067261a55 | ||
|
|
93ca8ebbc0 | ||
|
|
834566f34f | ||
|
|
a56121672c | ||
|
|
32f4a96c64 | ||
|
|
c004e03395 | ||
|
|
b57ddd54d2 | ||
|
|
af189db5eb | ||
|
|
c7217e34d7 | ||
|
|
d70247959c | ||
|
|
911d4f2918 | ||
|
|
d9cf31f355 | ||
|
|
7da99cc756 | ||
|
|
9c3b2c3c3c | ||
|
|
37d244d178 | ||
|
|
dbb34804a4 | ||
|
|
3c7486817b | ||
|
|
3c3c5b5c57 | ||
|
|
b0d171ac19 | ||
|
|
b70dde4a77 | ||
|
|
4eaa412370 | ||
|
|
577280c8be | ||
|
|
45680bbb44 | ||
|
|
71ff6f911d | ||
|
|
c05e4628b1 | ||
|
|
2f3a950f6f | ||
|
|
dea65536e9 | ||
|
|
fbc9b5a604 | ||
|
|
253a241f5d | ||
|
|
33806b8b7c | ||
|
|
afcd00da56 | ||
|
|
08e246764f | ||
|
|
0489bbc54c | ||
|
|
60132da7bb | ||
|
|
17a44c0078 | ||
|
|
376d3caf47 | ||
|
|
51d69b448c | ||
|
|
f7dca3d958 | ||
|
|
7c1aa98f43 | ||
|
|
68f605e80a | ||
|
|
96a9717c1a | ||
|
|
f4ba0ea144 | ||
|
|
92143afeee | ||
|
|
eb552da8a9 | ||
|
|
e2b2b1759f | ||
|
|
a3f4d7745a | ||
|
|
edfd888bd6 | ||
|
|
9c382e8291 | ||
|
|
eb2b226142 | ||
|
|
31947af6a3 | ||
|
|
b404e54f33 | ||
|
|
34e8de738e | ||
|
|
1550a6bfe7 | ||
|
|
dc4db39f78 | ||
|
|
f3aaf84a28 | ||
|
|
5de95d7054 | ||
|
|
8d1610d960 | ||
|
|
fad23bbe60 | ||
|
|
25c374856a | ||
|
|
34221346c1 | ||
|
|
924f10186f | ||
|
|
b7634b6ede | ||
|
|
4d49d5e845 | ||
|
|
715d86dae9 | ||
|
|
adb48692d6 | ||
|
|
19ccf1d073 | ||
|
|
e3c36465ec | ||
|
|
7e36284684 | ||
|
|
c448b4086a | ||
|
|
3200015c06 | ||
|
|
26d53c56a2 | ||
|
|
1439bb592e | ||
|
|
fc47e0dab2 | ||
|
|
87aa5d6b66 | ||
|
|
d1ad20c9ea | ||
|
|
ecca125f9a | ||
|
|
005e21a139 | ||
|
|
e1db036f90 | ||
|
|
9ad67b0758 | ||
|
|
af61abc747 | ||
|
|
ec575188c4 | ||
|
|
aea7500c1e | ||
|
|
58f5f27dc3 | ||
|
|
2893a9f6b5 | ||
|
|
60097bebcd | ||
|
|
9c98416b96 | ||
|
|
99f4c6886e | ||
|
|
30ebf7fc86 | ||
|
|
fa25dabf17 | ||
|
|
e615870659 | ||
|
|
a6aa16630d | ||
|
|
039694aaed | ||
|
|
6bc1ba6d62 | ||
|
|
cd47368ae4 | ||
|
|
a871714705 | ||
|
|
292fdd978e | ||
|
|
c2413dcd2c | ||
|
|
059757a8c8 | ||
|
|
fc89976c24 | ||
|
|
88c8bece38 | ||
|
|
d0b051e447 | ||
|
|
381fc5b2a8 | ||
|
|
7c480236e0 | ||
|
|
3f3494ad44 | ||
|
|
f33277a057 | ||
|
|
eae59cf088 | ||
|
|
15b73bdb8a | ||
|
|
ed1b4122d0 | ||
|
|
9d77552e18 | ||
|
|
f91bacbb94 | ||
|
|
2883ae4d46 | ||
|
|
bd625b1928 | ||
|
|
e0a40783ca | ||
|
|
0e79074c31 | ||
|
|
edb9b0c62a | ||
|
|
91a780c771 | ||
|
|
91880b8273 | ||
|
|
100904adb9 | ||
|
|
0bac7bd114 | ||
|
|
f2eb7bcacf | ||
|
|
29a0c1003b | ||
|
|
15b7525464 | ||
|
|
0b6dab5e3f | ||
|
|
1c66bb80b7 | ||
|
|
d1f07008f7 | ||
|
|
61b2ffa8e8 | ||
|
|
1044d66c1c | ||
|
|
813d7da7ec | ||
|
|
59e70896c0 | ||
|
|
f754ad5898 | ||
|
|
474e8fbcd4 | ||
|
|
6f23469e00 | ||
|
|
281ce56dc1 | ||
|
|
3bd199cdf6 | ||
|
|
948cd29b23 | ||
|
|
1e7d1968b1 | ||
|
|
8b46b71038 | ||
|
|
1cd7790a8a | ||
|
|
d376cb4c2a | ||
|
|
04a9a8dd03 | ||
|
|
e4c13846e3 |
@@ -10,7 +10,7 @@ indent_style = space
|
||||
insert_final_newline = true
|
||||
indent_size = 2
|
||||
|
||||
[*.{rs,py}]
|
||||
[*.{rs,py,pyi}]
|
||||
indent_size = 4
|
||||
|
||||
[*.snap]
|
||||
|
||||
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,7 +1,7 @@
|
||||
* text=auto eol=lf
|
||||
|
||||
crates/ruff/resources/test/fixtures/isort/line_ending_crlf.py text eol=crlf
|
||||
crates/ruff/resources/test/fixtures/pycodestyle/W605_1.py text eol=crlf
|
||||
crates/ruff_linter/resources/test/fixtures/isort/line_ending_crlf.py text eol=crlf
|
||||
crates/ruff_linter/resources/test/fixtures/pycodestyle/W605_1.py text eol=crlf
|
||||
|
||||
ruff.schema.json linguist-generated=true text=auto eol=lf
|
||||
*.md.snap linguist-language=Markdown
|
||||
|
||||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -6,4 +6,4 @@
|
||||
# - Order is important. The last matching pattern has the most precedence.
|
||||
|
||||
# Jupyter
|
||||
/crates/ruff/src/jupyter/ @dhruvmanila
|
||||
/crates/ruff_linter/src/jupyter/ @dhruvmanila
|
||||
|
||||
12
.github/dependabot.yml
vendored
12
.github/dependabot.yml
vendored
@@ -4,8 +4,10 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
time: "12:00"
|
||||
timezone: "America/New_York"
|
||||
commit-message:
|
||||
prefix: "ci(deps)"
|
||||
labels: ["internal"]
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
labels: ["internal"]
|
||||
|
||||
3
.github/release.yml
vendored
3
.github/release.yml
vendored
@@ -20,6 +20,9 @@ changelog:
|
||||
- title: Bug Fixes
|
||||
labels:
|
||||
- bug
|
||||
- title: Preview
|
||||
labels:
|
||||
- preview
|
||||
- title: Other Changes
|
||||
labels:
|
||||
- "*"
|
||||
|
||||
136
.github/workflows/benchmark.yaml
vendored
136
.github/workflows/benchmark.yaml
vendored
@@ -1,136 +0,0 @@
|
||||
name: Benchmark
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "Cargo.toml"
|
||||
- "Cargo.lock"
|
||||
- "rust-toolchain"
|
||||
- "crates/**"
|
||||
- "!crates/ruff_dev"
|
||||
- "!crates/ruff_shrinking"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-benchmark:
|
||||
if: github.event_name == 'pull_request'
|
||||
name: "Run | ${{ matrix.os }}"
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: "PR - Checkout Branch"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: "PR - Install Rust toolchain"
|
||||
run: rustup show
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: "PR - Build benchmarks"
|
||||
run: cargo bench -p ruff_benchmark --no-run
|
||||
|
||||
- name: "PR - Run benchmarks"
|
||||
run: cargo benchmark --save-baseline=pr
|
||||
|
||||
- name: "Main - Checkout Branch"
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
clean: false
|
||||
ref: main
|
||||
|
||||
- name: "Main - Install Rust toolchain"
|
||||
run: rustup show
|
||||
|
||||
- name: "Main - Build benchmarks"
|
||||
run: cargo bench -p ruff_benchmark --no-run
|
||||
|
||||
- name: "Main - Run benchmarks"
|
||||
run: cargo benchmark --save-baseline=main
|
||||
|
||||
- name: "Upload benchmark results"
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: benchmark-results-${{ matrix.os }}
|
||||
path: ./target/criterion
|
||||
|
||||
# Cleanup
|
||||
- name: Remove Criterion Artifact
|
||||
uses: JesseTG/rm@v1.0.3
|
||||
with:
|
||||
path: ./target/criterion
|
||||
|
||||
benchmark-compare:
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
name: Compare
|
||||
needs:
|
||||
- run-benchmark
|
||||
|
||||
steps:
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
|
||||
- name: "Install critcmp"
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
|
||||
- name: "Linux | Download PR benchmark results"
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: benchmark-results-ubuntu-latest
|
||||
path: ./target/criterion
|
||||
|
||||
- name: "Linux | Compare benchmark results"
|
||||
shell: bash
|
||||
run: |
|
||||
echo "### Benchmark" >> summary.md
|
||||
echo "#### Linux" >> summary.md
|
||||
echo "\`\`\`" >> summary.md
|
||||
critcmp main pr >> summary.md
|
||||
echo "\`\`\`" >> summary.md
|
||||
echo "" >> summary.md
|
||||
|
||||
- name: "Linux | Cleanup benchmark results"
|
||||
run: rm -rf ./target/criterion
|
||||
|
||||
- name: "Windows | Download PR benchmark results"
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: benchmark-results-windows-latest
|
||||
path: ./target/criterion
|
||||
|
||||
- name: "Windows | Compare benchmark results"
|
||||
shell: bash
|
||||
run: |
|
||||
echo "#### Windows" >> summary.md
|
||||
echo "\`\`\`" >> summary.md
|
||||
critcmp main pr >> summary.md
|
||||
echo "\`\`\`" >> summary.md
|
||||
echo "" >> summary.md
|
||||
|
||||
echo ${{ github.event.pull_request.number }} > pr-number
|
||||
|
||||
cat summary.md > $GITHUB_STEP_SUMMARY
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
name: Upload PR Number
|
||||
with:
|
||||
name: pr-number
|
||||
path: pr-number
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
name: Upload Summary
|
||||
with:
|
||||
name: summary
|
||||
path: summary.md
|
||||
53
.github/workflows/ci.yaml
vendored
53
.github/workflows/ci.yaml
vendored
@@ -26,11 +26,11 @@ jobs:
|
||||
linter: ${{ steps.changed.outputs.linter_any_changed }}
|
||||
formatter: ${{ steps.changed.outputs.formatter_any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: tj-actions/changed-files@v37
|
||||
- uses: tj-actions/changed-files@v39
|
||||
id: changed
|
||||
with:
|
||||
files_yaml: |
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
name: "cargo fmt"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup component add rustfmt
|
||||
- run: cargo fmt --all --check
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
name: "cargo clippy"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: |
|
||||
rustup component add clippy
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
name: "cargo test | ${{ matrix.os }}"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- name: "Install cargo insta"
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: "cargo fuzz"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -141,7 +141,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: "cargo test (wasm)"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- uses: actions/setup-node@v3
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
name: "test scripts"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup component add rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -182,7 +182,7 @@ jobs:
|
||||
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
|
||||
if: github.event_name == 'pull_request' && needs.determine_changes.outputs.linter == 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -227,7 +227,7 @@ jobs:
|
||||
name: "cargo udeps"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install nightly Rust toolchain"
|
||||
# Only pinned to make caching work, update freely
|
||||
run: rustup toolchain install nightly-2023-06-08
|
||||
@@ -241,7 +241,7 @@ jobs:
|
||||
name: "python package"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -265,7 +265,7 @@ jobs:
|
||||
name: "pre-commit"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -295,7 +295,7 @@ jobs:
|
||||
env:
|
||||
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
- name: "Add SSH key"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
@@ -330,7 +330,7 @@ jobs:
|
||||
needs: determine_changes
|
||||
if: needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- name: "Cache rust"
|
||||
@@ -341,3 +341,28 @@ jobs:
|
||||
run: cat target/progress_projects_stats.txt > $GITHUB_STEP_SUMMARY
|
||||
- name: "Remove checkouts from cache"
|
||||
run: rm -r target/progress_projects
|
||||
|
||||
benchmarks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Checkout Branch"
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
|
||||
- name: "Install codspeed"
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: cargo-codspeed
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: "Build benchmarks"
|
||||
run: cargo codspeed build --features codspeed -p ruff_benchmark
|
||||
|
||||
- name: "Run benchmarks"
|
||||
uses: CodSpeedHQ/action@v1
|
||||
with:
|
||||
run: cargo codspeed run
|
||||
token: ${{ secrets.CODSPEED_TOKEN }}
|
||||
|
||||
14
.github/workflows/docs.yaml
vendored
14
.github/workflows/docs.yaml
vendored
@@ -2,6 +2,11 @@ name: mkdocs
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref:
|
||||
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
|
||||
default: ""
|
||||
type: string
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
@@ -12,7 +17,9 @@ jobs:
|
||||
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
|
||||
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
- uses: actions/setup-python@v4
|
||||
- name: "Add SSH key"
|
||||
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
|
||||
@@ -40,8 +47,9 @@ jobs:
|
||||
run: mkdocs build --strict -f mkdocs.generated.yml
|
||||
- name: "Deploy to Cloudflare Pages"
|
||||
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
|
||||
uses: cloudflare/wrangler-action@v3.1.0
|
||||
uses: cloudflare/wrangler-action@v3.1.1
|
||||
with:
|
||||
apiToken: ${{ secrets.CF_API_TOKEN }}
|
||||
accountId: ${{ secrets.CF_ACCOUNT_ID }}
|
||||
command: pages publish site --project-name=ruff-docs --branch ${GITHUB_HEAD_REF} --commit-hash ${GITHUB_SHA}
|
||||
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
|
||||
command: pages deploy site --project-name=astral-docs --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}
|
||||
|
||||
14
.github/workflows/flake8-to-ruff.yaml
vendored
14
.github/workflows/flake8-to-ruff.yaml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
macos-x86_64:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
macos-universal:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
matrix:
|
||||
target: [x64, x86]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
matrix:
|
||||
target: [x86_64, i686]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -123,7 +123,7 @@ jobs:
|
||||
matrix:
|
||||
target: [aarch64, armv7, s390x, ppc64le, ppc64]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -160,7 +160,7 @@ jobs:
|
||||
- x86_64-unknown-linux-musl
|
||||
- i686-unknown-linux-musl
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -196,7 +196,7 @@ jobs:
|
||||
- target: armv7-unknown-linux-musleabihf
|
||||
arch: armv7
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
6
.github/workflows/playground.yaml
vendored
6
.github/workflows/playground.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
env:
|
||||
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- uses: actions/setup-node@v3
|
||||
@@ -40,8 +40,8 @@ jobs:
|
||||
working-directory: playground
|
||||
- name: "Deploy to Cloudflare Pages"
|
||||
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
|
||||
uses: cloudflare/wrangler-action@v3.1.0
|
||||
uses: cloudflare/wrangler-action@v3.1.1
|
||||
with:
|
||||
apiToken: ${{ secrets.CF_API_TOKEN }}
|
||||
accountId: ${{ secrets.CF_ACCOUNT_ID }}
|
||||
command: pages publish playground/dist --project-name=ruff --branch ${GITHUB_HEAD_REF} --commit-hash ${GITHUB_SHA}
|
||||
command: pages deploy playground/dist --project-name=ruff-playground --branch ${GITHUB_HEAD_REF} --commit-hash ${GITHUB_SHA}
|
||||
|
||||
58
.github/workflows/pr-comment.yaml
vendored
58
.github/workflows/pr-comment.yaml
vendored
@@ -2,7 +2,7 @@ name: PR Check Comment
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [CI, Benchmark]
|
||||
workflows: [CI]
|
||||
types: [completed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -43,42 +43,34 @@ jobs:
|
||||
path: pr/ecosystem
|
||||
if_no_artifact_found: ignore
|
||||
|
||||
- uses: dawidd6/action-download-artifact@v2
|
||||
name: "Download Benchmark Result"
|
||||
id: download-benchmark-result
|
||||
if: steps.pr-number.outputs.pr-number
|
||||
with:
|
||||
name: summary
|
||||
workflow: benchmark.yaml
|
||||
pr: ${{ steps.pr-number.outputs.pr-number }}
|
||||
path: pr/benchmark
|
||||
if_no_artifact_found: ignore
|
||||
|
||||
- name: Generate Comment
|
||||
id: generate-comment
|
||||
if: steps.download-ecosystem-result.outputs.found_artifact == 'true' || steps.download-benchmark-result.outputs.found_artifact == 'true'
|
||||
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
|
||||
run: |
|
||||
echo '## PR Check Results' >> comment.txt
|
||||
|
||||
echo "### Ecosystem" >> comment.txt
|
||||
cat pr/ecosystem/ecosystem-result >> comment.txt
|
||||
echo "" >> comment.txt
|
||||
|
||||
echo 'comment<<EOF' >> $GITHUB_OUTPUT
|
||||
echo '## PR Check Results' >> $GITHUB_OUTPUT
|
||||
|
||||
if [[ -f pr/ecosystem/ecosystem-result ]]
|
||||
then
|
||||
echo "### Ecosystem" >> $GITHUB_OUTPUT
|
||||
cat pr/ecosystem/ecosystem-result >> $GITHUB_OUTPUT
|
||||
echo "" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
if [[ -f pr/benchmark/summary.md ]]
|
||||
then
|
||||
cat pr/benchmark/summary.md >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
cat comment.txt >> $GITHUB_OUTPUT
|
||||
echo 'EOF' >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create or update comment
|
||||
if: steps.generate-comment.outputs.comment
|
||||
uses: thollander/actions-comment-pull-request@v2
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@v2
|
||||
if: steps.generate-comment.outcome == 'success'
|
||||
id: find-comment
|
||||
with:
|
||||
pr_number: ${{ steps.pr-number.outputs.pr-number }}
|
||||
message: ${{ steps.generate-comment.outputs.comment }}
|
||||
comment_tag: PR Check Results
|
||||
issue-number: ${{ steps.pr-number.outputs.pr-number }}
|
||||
comment-author: "github-actions[bot]"
|
||||
body-includes: PR Check Results
|
||||
|
||||
- name: Create or update comment
|
||||
if: steps.find-comment.outcome == 'success'
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
comment-id: ${{ steps.find-comment.outputs.comment-id }}
|
||||
issue-number: ${{ steps.pr-number.outputs.pr-number }}
|
||||
body-path: comment.txt
|
||||
edit-mode: replace
|
||||
|
||||
68
.github/workflows/release.yaml
vendored
68
.github/workflows/release.yaml
vendored
@@ -7,12 +7,15 @@ on:
|
||||
description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)."
|
||||
type: string
|
||||
sha:
|
||||
description: "Optionally, the full sha of the commit to be released"
|
||||
description: "The full sha of the commit to be released. If omitted, the latest commit on the default branch will be used."
|
||||
default: ""
|
||||
type: string
|
||||
pull_request:
|
||||
paths:
|
||||
# When we change pyproject.toml, we want to ensure that the maturin builds still work
|
||||
- pyproject.toml
|
||||
# And when we change this workflow itself...
|
||||
- .github/workflows/release.yaml
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -30,7 +33,9 @@ jobs:
|
||||
sdist:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -56,7 +61,9 @@ jobs:
|
||||
macos-x86_64:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -94,7 +101,9 @@ jobs:
|
||||
macos-universal:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -140,7 +149,9 @@ jobs:
|
||||
- target: aarch64-pc-windows-msvc
|
||||
arch: x64
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -186,7 +197,9 @@ jobs:
|
||||
- x86_64-unknown-linux-gnu
|
||||
- i686-unknown-linux-gnu
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -243,7 +256,9 @@ jobs:
|
||||
arch: ppc64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -296,7 +311,9 @@ jobs:
|
||||
- x86_64-unknown-linux-musl
|
||||
- i686-unknown-linux-musl
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -350,7 +367,9 @@ jobs:
|
||||
arch: armv7
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
@@ -398,9 +417,22 @@ jobs:
|
||||
# If you don't set an input tag, it's a dry run (no uploads).
|
||||
if: ${{ inputs.tag }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main # We checkout the main branch to check for the commit
|
||||
- name: Check main branch
|
||||
if: ${{ inputs.sha }}
|
||||
run: |
|
||||
# Fetch the main branch since a shallow checkout is used by default
|
||||
git fetch origin main --unshallow
|
||||
if ! git branch --contains ${{ inputs.sha }} | grep -E '(^|\s)main$'; then
|
||||
echo "The specified sha is not on the main branch" >&2
|
||||
exit 1
|
||||
fi
|
||||
- name: Check tag consistency
|
||||
run: |
|
||||
# Switch to the commit we want to release
|
||||
git checkout ${{ inputs.sha }}
|
||||
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
|
||||
if [ "${{ inputs.tag }}" != "${version}" ]; then
|
||||
echo "The input tag does not match the version from pyproject.toml:" >&2
|
||||
@@ -410,18 +442,6 @@ jobs:
|
||||
else
|
||||
echo "Releasing ${version}"
|
||||
fi
|
||||
- name: Check SHA consistency
|
||||
if: ${{ inputs.sha }}
|
||||
run: |
|
||||
git_sha=$(git rev-parse HEAD)
|
||||
if [ "${{ inputs.sha }}" != "${git_sha}" ]; then
|
||||
echo "The specified sha does not match the git checkout" >&2
|
||||
echo "${{ inputs.sha }}" >&2
|
||||
echo "${git_sha}" >&2
|
||||
exit 1
|
||||
else
|
||||
echo "Releasing ${git_sha}"
|
||||
fi
|
||||
|
||||
upload-release:
|
||||
name: Upload to PyPI
|
||||
@@ -464,7 +484,9 @@ jobs:
|
||||
# For git tag
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.sha }}
|
||||
- name: git tag
|
||||
run: |
|
||||
git config user.email "hey@astral.sh"
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,5 +1,5 @@
|
||||
# Benchmarking cpython (CONTRIBUTING.md)
|
||||
crates/ruff/resources/test/cpython
|
||||
crates/ruff_linter/resources/test/cpython
|
||||
# generate_mkdocs.py
|
||||
mkdocs.generated.yml
|
||||
# check_ecosystem.py
|
||||
|
||||
@@ -2,8 +2,8 @@ fail_fast: true
|
||||
|
||||
exclude: |
|
||||
(?x)^(
|
||||
crates/ruff/resources/.*|
|
||||
crates/ruff/src/rules/.*/snapshots/.*|
|
||||
crates/ruff_linter/resources/.*|
|
||||
crates/ruff_linter/src/rules/.*/snapshots/.*|
|
||||
crates/ruff_cli/resources/.*|
|
||||
crates/ruff_python_formatter/resources/.*|
|
||||
crates/ruff_python_formatter/tests/snapshots/.*|
|
||||
@@ -23,8 +23,6 @@ repos:
|
||||
- id: mdformat
|
||||
additional_dependencies:
|
||||
- mdformat-mkdocs
|
||||
- mdformat-black
|
||||
- black==23.1.0 # Must be the latest version of Black
|
||||
|
||||
- repo: https://github.com/igorshubovych/markdownlint-cli
|
||||
rev: v0.33.0
|
||||
@@ -52,7 +50,7 @@ repos:
|
||||
require_serial: true
|
||||
exclude: |
|
||||
(?x)^(
|
||||
crates/ruff/resources/.*|
|
||||
crates/ruff_linter/resources/.*|
|
||||
crates/ruff_python_formatter/resources/.*
|
||||
)$
|
||||
|
||||
|
||||
@@ -1,5 +1,20 @@
|
||||
# Breaking Changes
|
||||
|
||||
## 0.0.288
|
||||
|
||||
### Remove support for emoji identifiers ([#7212](https://github.com/astral-sh/ruff/pull/7212))
|
||||
|
||||
Previously, Ruff supported the non-standard compliant emoji identifiers e.g. `📦 = 1`.
|
||||
We decided to remove this non-standard language extension, and Ruff now reports syntax errors for emoji identifiers in your code, the same as CPython.
|
||||
|
||||
### Improved GitLab fingerprints ([#7203](https://github.com/astral-sh/ruff/pull/7203))
|
||||
|
||||
GitLab uses fingerprints to identify new, existing, or fixed violations. Previously, Ruff included the violation's position in the fingerprint. Using the location has the downside that changing any code before the violation causes the fingerprint to change, resulting in GitLab reporting one fixed and one new violation even though it is a pre-existing violation.
|
||||
|
||||
Ruff now uses a more stable location-agnostic fingerprint to minimize that existing violations incorrectly get marked as fixed and re-reported as new violations.
|
||||
|
||||
Expect GitLab to report each pre-existing violation in your project as fixed and a new violation in your Ruff upgrade PR.
|
||||
|
||||
## 0.0.283 / 0.284
|
||||
|
||||
### The target Python version now defaults to 3.8 instead of 3.10 ([#6397](https://github.com/astral-sh/ruff/pull/6397))
|
||||
@@ -284,4 +299,4 @@ default.
|
||||
`pyproject.toml` files are now resolved hierarchically, such that for each Python file, we find
|
||||
the first `pyproject.toml` file in its path, and use that to determine its lint settings.
|
||||
|
||||
See the [documentation](https://beta.ruff.rs/docs/configuration/#python-file-discovery) for more.
|
||||
See the [documentation](https://docs.astral.sh/ruff/configuration/#python-file-discovery) for more.
|
||||
|
||||
102
CONTRIBUTING.md
102
CONTRIBUTING.md
@@ -112,11 +112,11 @@ Ruff is structured as a monorepo with a [flat crate structure](https://matklad.g
|
||||
such that all crates are contained in a flat `crates` directory.
|
||||
|
||||
The vast majority of the code, including all lint rules, lives in the `ruff` crate (located at
|
||||
`crates/ruff`). As a contributor, that's the crate that'll be most relevant to you.
|
||||
`crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
|
||||
|
||||
At time of writing, the repository includes the following crates:
|
||||
|
||||
- `crates/ruff`: library crate containing all lint rules and the core logic for running them.
|
||||
- `crates/ruff_linter`: library crate containing all lint rules and the core logic for running them.
|
||||
If you're working on a rule, this is the crate for you.
|
||||
- `crates/ruff_benchmark`: binary crate for running micro-benchmarks.
|
||||
- `crates/ruff_cache`: library crate for caching lint results.
|
||||
@@ -129,6 +129,7 @@ At time of writing, the repository includes the following crates:
|
||||
intermediate representation. The backend for `ruff_python_formatter`.
|
||||
- `crates/ruff_index`: library crate inspired by `rustc_index`.
|
||||
- `crates/ruff_macros`: proc macro crate containing macros used by Ruff.
|
||||
- `crates/ruff_notebook`: library crate for parsing and manipulating Jupyter notebooks.
|
||||
- `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities.
|
||||
- `crates/ruff_python_codegen`: library crate containing utilities for generating Python source code.
|
||||
- `crates/ruff_python_formatter`: library crate implementing the Python formatter. Emits an
|
||||
@@ -152,7 +153,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
|
||||
1. Determine a name for the new rule as per our [rule naming convention](#rule-naming-convention)
|
||||
(e.g., `AssertFalse`, as in, "allow `assert False`").
|
||||
|
||||
1. Create a file for your rule (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs`).
|
||||
1. Create a file for your rule (e.g., `crates/ruff_linter/src/rules/flake8_bugbear/rules/assert_false.rs`).
|
||||
|
||||
1. In that file, define a violation struct (e.g., `pub struct AssertFalse`). You can grep for
|
||||
`#[violation]` to see examples.
|
||||
@@ -161,21 +162,21 @@ At a high level, the steps involved in adding a new lint rule are as follows:
|
||||
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
|
||||
an `ast::StmtAssert` node).
|
||||
|
||||
1. Define the logic for invoking the diagnostic in `crates/ruff/src/checkers/ast/analyze` (for
|
||||
AST-based rules), `crates/ruff/src/checkers/tokens.rs` (for token-based rules),
|
||||
`crates/ruff/src/checkers/physical_lines.rs` (for text-based rules),
|
||||
`crates/ruff/src/checkers/filesystem.rs` (for filesystem-based rules), etc. For AST-based rules,
|
||||
1. Define the logic for invoking the diagnostic in `crates/ruff_linter/src/checkers/ast/analyze` (for
|
||||
AST-based rules), `crates/ruff_linter/src/checkers/tokens.rs` (for token-based rules),
|
||||
`crates/ruff_linter/src/checkers/physical_lines.rs` (for text-based rules),
|
||||
`crates/ruff_linter/src/checkers/filesystem.rs` (for filesystem-based rules), etc. For AST-based rules,
|
||||
you'll likely want to modify `analyze/statement.rs` (if your rule is based on analyzing
|
||||
statements, like imports) or `analyze/expression.rs` (if your rule is based on analyzing
|
||||
expressions, like function calls).
|
||||
|
||||
1. Map the violation struct to a rule code in `crates/ruff/src/codes.rs` (e.g., `B011`).
|
||||
1. Map the violation struct to a rule code in `crates/ruff_linter/src/codes.rs` (e.g., `B011`).
|
||||
|
||||
1. Add proper [testing](#rule-testing-fixtures-and-snapshots) for your rule.
|
||||
|
||||
1. Update the generated files (documentation and generated code).
|
||||
|
||||
To trigger the violation, you'll likely want to augment the logic in `crates/ruff/src/checkers/ast.rs`
|
||||
To trigger the violation, you'll likely want to augment the logic in `crates/ruff_linter/src/checkers/ast.rs`
|
||||
to call your new function at the appropriate time and with the appropriate inputs. The `Checker`
|
||||
defined therein is a Python AST visitor, which iterates over the AST, building up a semantic model,
|
||||
and calling out to lint rule analyzer functions as it goes.
|
||||
@@ -220,7 +221,7 @@ Ruff's output for each fixture, which you can then commit alongside your changes
|
||||
|
||||
Once you've completed the code for the rule itself, you can define tests with the following steps:
|
||||
|
||||
1. Add a Python file to `crates/ruff/resources/test/fixtures/[linter]` that contains the code you
|
||||
1. Add a Python file to `crates/ruff_linter/resources/test/fixtures/[linter]` that contains the code you
|
||||
want to test. The file name should match the rule name (e.g., `E402.py`), and it should include
|
||||
examples of both violations and non-violations.
|
||||
|
||||
@@ -229,16 +230,16 @@ Once you've completed the code for the rule itself, you can define tests with th
|
||||
For example, if you're adding a new rule named `E402`, you would run:
|
||||
|
||||
```shell
|
||||
cargo run -p ruff_cli -- check crates/ruff/resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402
|
||||
cargo run -p ruff_cli -- check crates/ruff_linter/resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402
|
||||
```
|
||||
|
||||
**Note:** Only a subset of rules are enabled by default. When testing a new rule, ensure that
|
||||
you activate it by adding `--select ${rule_code}` to the command.
|
||||
|
||||
1. Add the test to the relevant `crates/ruff/src/rules/[linter]/mod.rs` file. If you're contributing
|
||||
1. Add the test to the relevant `crates/ruff_linter/src/rules/[linter]/mod.rs` file. If you're contributing
|
||||
a rule to a pre-existing set, you should be able to find a similar example to pattern-match
|
||||
against. If you're adding a new linter, you'll need to create a new `mod.rs` file (see,
|
||||
e.g., `crates/ruff/src/rules/flake8_bugbear/mod.rs`)
|
||||
e.g., `crates/ruff_linter/src/rules/flake8_bugbear/mod.rs`)
|
||||
|
||||
1. Run `cargo test`. Your test will fail, but you'll be prompted to follow-up
|
||||
with `cargo insta review`. Run `cargo insta review`, review and accept the generated snapshot,
|
||||
@@ -250,25 +251,24 @@ Once you've completed the code for the rule itself, you can define tests with th
|
||||
|
||||
Ruff's user-facing settings live in a few different places.
|
||||
|
||||
First, the command-line options are defined via the `Cli` struct in `crates/ruff/src/cli.rs`.
|
||||
First, the command-line options are defined via the `Args` struct in `crates/ruff_cli/src/args.rs`.
|
||||
|
||||
Second, the `pyproject.toml` options are defined in `crates/ruff/src/settings/options.rs` (via the
|
||||
`Options` struct), `crates/ruff/src/settings/configuration.rs` (via the `Configuration` struct), and
|
||||
`crates/ruff/src/settings/mod.rs` (via the `Settings` struct). These represent, respectively: the
|
||||
schema used to parse the `pyproject.toml` file; an internal, intermediate representation; and the
|
||||
final, internal representation used to power Ruff.
|
||||
Second, the `pyproject.toml` options are defined in `crates/ruff_workspace/src/options.rs` (via the
|
||||
`Options` struct), `crates/ruff_workspace/src/configuration.rs` (via the `Configuration` struct),
|
||||
and `crates/ruff_workspace/src/settings.rs` (via the `Settings` struct), which then includes
|
||||
the `LinterSettings` struct as a field.
|
||||
|
||||
These represent, respectively: the schema used to parse the `pyproject.toml` file; an internal,
|
||||
intermediate representation; and the final, internal representation used to power Ruff.
|
||||
|
||||
To add a new configuration option, you'll likely want to modify these latter few files (along with
|
||||
`cli.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
|
||||
`arg.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
|
||||
`dummy_variable_rgx`, which defines a regular expression to match against acceptable unused
|
||||
variables (e.g., `_`).
|
||||
|
||||
Note that plugin-specific configuration options are defined in their own modules (e.g.,
|
||||
`crates/ruff/src/flake8_unused_arguments/settings.rs`).
|
||||
|
||||
You may also want to add the new configuration option to the `flake8-to-ruff` tool, which is
|
||||
responsible for converting `flake8` configuration files to Ruff's TOML format. This logic
|
||||
lives in `crates/ruff/src/flake8_to_ruff/converter.rs`.
|
||||
`Settings` in `crates/ruff_linter/src/flake8_unused_arguments/settings.rs` coupled with
|
||||
`Flake8UnusedArgumentsOptions` in `crates/ruff_workspace/src/options.rs`).
|
||||
|
||||
Finally, regenerate the documentation and generated code with `cargo dev generate-all`.
|
||||
|
||||
@@ -361,46 +361,46 @@ First, clone [CPython](https://github.com/python/cpython). It's a large and dive
|
||||
which makes it a good target for benchmarking.
|
||||
|
||||
```shell
|
||||
git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff/resources/test/cpython
|
||||
git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff_linter/resources/test/cpython
|
||||
```
|
||||
|
||||
To benchmark the release build:
|
||||
|
||||
```shell
|
||||
cargo build --release && hyperfine --warmup 10 \
|
||||
"./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache -e" \
|
||||
"./target/release/ruff ./crates/ruff/resources/test/cpython/ -e"
|
||||
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
|
||||
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ -e"
|
||||
|
||||
Benchmark 1: ./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache
|
||||
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
|
||||
Time (mean ± σ): 293.8 ms ± 3.2 ms [User: 2384.6 ms, System: 90.3 ms]
|
||||
Range (min … max): 289.9 ms … 301.6 ms 10 runs
|
||||
|
||||
Benchmark 2: ./target/release/ruff ./crates/ruff/resources/test/cpython/
|
||||
Benchmark 2: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/
|
||||
Time (mean ± σ): 48.0 ms ± 3.1 ms [User: 65.2 ms, System: 124.7 ms]
|
||||
Range (min … max): 45.0 ms … 66.7 ms 62 runs
|
||||
|
||||
Summary
|
||||
'./target/release/ruff ./crates/ruff/resources/test/cpython/' ran
|
||||
6.12 ± 0.41 times faster than './target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache'
|
||||
'./target/release/ruff ./crates/ruff_linter/resources/test/cpython/' ran
|
||||
6.12 ± 0.41 times faster than './target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache'
|
||||
```
|
||||
|
||||
To benchmark against the ecosystem's existing tools:
|
||||
|
||||
```shell
|
||||
hyperfine --ignore-failure --warmup 5 \
|
||||
"./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache" \
|
||||
"pyflakes crates/ruff/resources/test/cpython" \
|
||||
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
|
||||
"pyflakes crates/ruff_linter/resources/test/cpython" \
|
||||
"autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython" \
|
||||
"pycodestyle crates/ruff/resources/test/cpython" \
|
||||
"flake8 crates/ruff/resources/test/cpython"
|
||||
"pycodestyle crates/ruff_linter/resources/test/cpython" \
|
||||
"flake8 crates/ruff_linter/resources/test/cpython"
|
||||
|
||||
Benchmark 1: ./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache
|
||||
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
|
||||
Time (mean ± σ): 294.3 ms ± 3.3 ms [User: 2467.5 ms, System: 89.6 ms]
|
||||
Range (min … max): 291.1 ms … 302.8 ms 10 runs
|
||||
|
||||
Warning: Ignoring non-zero exit code.
|
||||
|
||||
Benchmark 2: pyflakes crates/ruff/resources/test/cpython
|
||||
Benchmark 2: pyflakes crates/ruff_linter/resources/test/cpython
|
||||
Time (mean ± σ): 15.786 s ± 0.143 s [User: 15.560 s, System: 0.214 s]
|
||||
Range (min … max): 15.640 s … 16.157 s 10 runs
|
||||
|
||||
@@ -410,31 +410,31 @@ Benchmark 3: autoflake --recursive --expand-star-imports --remove-all-unused-imp
|
||||
Time (mean ± σ): 6.175 s ± 0.169 s [User: 54.102 s, System: 1.057 s]
|
||||
Range (min … max): 5.950 s … 6.391 s 10 runs
|
||||
|
||||
Benchmark 4: pycodestyle crates/ruff/resources/test/cpython
|
||||
Benchmark 4: pycodestyle crates/ruff_linter/resources/test/cpython
|
||||
Time (mean ± σ): 46.921 s ± 0.508 s [User: 46.699 s, System: 0.202 s]
|
||||
Range (min … max): 46.171 s … 47.863 s 10 runs
|
||||
|
||||
Warning: Ignoring non-zero exit code.
|
||||
|
||||
Benchmark 5: flake8 crates/ruff/resources/test/cpython
|
||||
Benchmark 5: flake8 crates/ruff_linter/resources/test/cpython
|
||||
Time (mean ± σ): 12.260 s ± 0.321 s [User: 102.934 s, System: 1.230 s]
|
||||
Range (min … max): 11.848 s … 12.933 s 10 runs
|
||||
|
||||
Warning: Ignoring non-zero exit code.
|
||||
|
||||
Summary
|
||||
'./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache' ran
|
||||
'./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache' ran
|
||||
20.98 ± 0.62 times faster than 'autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython'
|
||||
41.66 ± 1.18 times faster than 'flake8 crates/ruff/resources/test/cpython'
|
||||
53.64 ± 0.77 times faster than 'pyflakes crates/ruff/resources/test/cpython'
|
||||
159.43 ± 2.48 times faster than 'pycodestyle crates/ruff/resources/test/cpython'
|
||||
41.66 ± 1.18 times faster than 'flake8 crates/ruff_linter/resources/test/cpython'
|
||||
53.64 ± 0.77 times faster than 'pyflakes crates/ruff_linter/resources/test/cpython'
|
||||
159.43 ± 2.48 times faster than 'pycodestyle crates/ruff_linter/resources/test/cpython'
|
||||
```
|
||||
|
||||
To benchmark a subset of rules, e.g. `LineTooLong` and `DocLineTooLong`:
|
||||
|
||||
```shell
|
||||
cargo build --release && hyperfine --warmup 10 \
|
||||
"./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache -e --select W505,E501"
|
||||
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
|
||||
```
|
||||
|
||||
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
|
||||
@@ -467,10 +467,10 @@ rm Lib/test/bad_coding.py \
|
||||
Lib/test/test_typing.py
|
||||
```
|
||||
|
||||
Then, from `crates/ruff/resources/test/cpython`, run: `time pylint -j 0 -E $(git ls-files '*.py')`. This
|
||||
Then, from `crates/ruff_linter/resources/test/cpython`, run: `time pylint -j 0 -E $(git ls-files '*.py')`. This
|
||||
will execute Pylint with maximum parallelism and only report errors.
|
||||
|
||||
To benchmark Pyupgrade, run the following from `crates/ruff/resources/test/cpython`:
|
||||
To benchmark Pyupgrade, run the following from `crates/ruff_linter/resources/test/cpython`:
|
||||
|
||||
```shell
|
||||
hyperfine --ignore-failure --warmup 5 --prepare "git reset --hard HEAD" \
|
||||
@@ -718,8 +718,8 @@ Module {
|
||||
- `cargo dev generate-cli-help`, `cargo dev generate-docs` and `cargo dev generate-json-schema`:
|
||||
Update just `docs/configuration.md`, `docs/rules` and `ruff.schema.json` respectively.
|
||||
- `cargo dev generate-options`: Generate a markdown-compatible table of all `pyproject.toml`
|
||||
options. Used for <https://beta.ruff.rs/docs/settings/>
|
||||
- `cargo dev generate-rules-table`: Generate a markdown-compatible table of all rules. Used for <https://beta.ruff.rs/docs/rules/>
|
||||
options. Used for <https://docs.astral.sh/ruff/settings/>.
|
||||
- `cargo dev generate-rules-table`: Generate a markdown-compatible table of all rules. Used for <https://docs.astral.sh/ruff/rules/>.
|
||||
- `cargo dev round-trip <python file or jupyter notebook>`: Read a Python file or Jupyter Notebook,
|
||||
parse it, serialize the parsed representation and write it back. Used to check how good our
|
||||
representation is so that fixes don't rewrite irrelevant parts of a file.
|
||||
@@ -777,7 +777,7 @@ To understand Ruff's import categorization system, we first need to define two c
|
||||
- "Package root": The top-most directory defining the Python package that includes a given Python
|
||||
file. To find the package root for a given Python file, traverse up its parent directories until
|
||||
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't marked as
|
||||
a [namespace package](https://beta.ruff.rs/docs/settings/#namespace-packages)); take the directory
|
||||
a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
|
||||
just before that, i.e., the first directory in the package.
|
||||
|
||||
For example, given:
|
||||
@@ -866,7 +866,7 @@ There are three ways in which an import can be categorized as "first-party":
|
||||
package (e.g., `from foo import bar` or `import foo.bar`), they'll be classified as first-party
|
||||
automatically. This check is as simple as comparing the first segment of the current file's
|
||||
module path to the first segment of the import.
|
||||
1. **Source roots**: Ruff supports a `[src](https://beta.ruff.rs/docs/settings/#src)` setting, which
|
||||
1. **Source roots**: Ruff supports a `[src](https://docs.astral.sh/ruff/settings/#src)` setting, which
|
||||
sets the directories to scan when identifying first-party imports. The algorithm is
|
||||
straightforward: given an import, like `import foo`, iterate over the directories enumerated in
|
||||
the `src` setting and, for each directory, check for the existence of a subdirectory `foo` or a
|
||||
|
||||
1103
Cargo.lock
generated
1103
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
43
Cargo.toml
43
Cargo.toml
@@ -5,8 +5,8 @@ resolver = "2"
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
homepage = "https://beta.ruff.rs/docs"
|
||||
documentation = "https://beta.ruff.rs/docs"
|
||||
homepage = "https://docs.astral.sh/ruff"
|
||||
documentation = "https://docs.astral.sh/ruff"
|
||||
repository = "https://github.com/astral-sh/ruff"
|
||||
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
|
||||
license = "MIT"
|
||||
@@ -14,46 +14,49 @@ license = "MIT"
|
||||
[workspace.dependencies]
|
||||
anyhow = { version = "1.0.69" }
|
||||
bitflags = { version = "2.3.1" }
|
||||
chrono = { version = "0.4.23", default-features = false, features = ["clock"] }
|
||||
clap = { version = "4.1.8", features = ["derive"] }
|
||||
chrono = { version = "0.4.31", default-features = false, features = ["clock"] }
|
||||
clap = { version = "4.4.4", features = ["derive"] }
|
||||
colored = { version = "2.0.0" }
|
||||
filetime = { version = "0.2.20" }
|
||||
glob = { version = "0.3.1" }
|
||||
globset = { version = "0.4.10" }
|
||||
ignore = { version = "0.4.20" }
|
||||
insta = { version = "1.31.0", feature = ["filters", "glob"] }
|
||||
is-macro = { version = "0.2.2" }
|
||||
itertools = { version = "0.10.5" }
|
||||
is-macro = { version = "0.3.0" }
|
||||
itertools = { version = "0.11.0" }
|
||||
log = { version = "0.4.17" }
|
||||
memchr = "2.5.0"
|
||||
memchr = "2.6.3"
|
||||
num-bigint = { version = "0.4.3" }
|
||||
num-traits = { version = "0.2.15" }
|
||||
once_cell = { version = "1.17.1" }
|
||||
path-absolutize = { version = "3.0.14" }
|
||||
proc-macro2 = { version = "1.0.51" }
|
||||
path-absolutize = { version = "3.1.1" }
|
||||
proc-macro2 = { version = "1.0.67" }
|
||||
quote = { version = "1.0.23" }
|
||||
regex = { version = "1.7.1" }
|
||||
regex = { version = "1.9.5" }
|
||||
rustc-hash = { version = "1.1.0" }
|
||||
schemars = { version = "0.8.12" }
|
||||
schemars = { version = "0.8.15" }
|
||||
serde = { version = "1.0.152", features = ["derive"] }
|
||||
serde_json = { version = "1.0.93" }
|
||||
serde_json = { version = "1.0.107" }
|
||||
shellexpand = { version = "3.0.0" }
|
||||
similar = { version = "2.2.1", features = ["inline"] }
|
||||
smallvec = { version = "1.10.0" }
|
||||
strum = { version = "0.24.1", features = ["strum_macros"] }
|
||||
strum_macros = { version = "0.24.3" }
|
||||
syn = { version = "2.0.15" }
|
||||
test-case = { version = "3.0.0" }
|
||||
thiserror = { version = "1.0.43" }
|
||||
toml = { version = "0.7.2" }
|
||||
static_assertions = "1.1.0"
|
||||
strum = { version = "0.25.0", features = ["strum_macros"] }
|
||||
strum_macros = { version = "0.25.2" }
|
||||
syn = { version = "2.0.37" }
|
||||
test-case = { version = "3.2.1" }
|
||||
thiserror = { version = "1.0.48" }
|
||||
toml = { version = "0.7.8" }
|
||||
tracing = "0.1.37"
|
||||
tracing-indicatif = "0.3.4"
|
||||
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
|
||||
unicode-width = "0.1.10"
|
||||
unicode-ident = "1.0.12"
|
||||
unicode-width = "0.1.11"
|
||||
uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
|
||||
wsl = { version = "0.1.0" }
|
||||
|
||||
# v1.0.1
|
||||
libcst = { git = "https://github.com/Instagram/LibCST.git", rev = "3cacca1a1029f05707e50703b49fe3dd860aa839", default-features = false }
|
||||
libcst = { version = "0.1.0", default-features = false }
|
||||
|
||||
[profile.release]
|
||||
lto = "fat"
|
||||
|
||||
47
LICENSE
47
LICENSE
@@ -1194,7 +1194,27 @@ are:
|
||||
|
||||
- flake8-self, licensed as follows:
|
||||
"""
|
||||
Freely Distributable
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Korijn van Golen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"""
|
||||
|
||||
- flake8-django, licensed under the GPL license.
|
||||
@@ -1224,6 +1244,31 @@ are:
|
||||
SOFTWARE.
|
||||
"""
|
||||
|
||||
- flake8-logging, licensed as follows:
|
||||
"""
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Adam Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"""
|
||||
|
||||
- Pyright, licensed as follows:
|
||||
"""
|
||||
MIT License
|
||||
|
||||
28
README.md
28
README.md
@@ -8,7 +8,7 @@
|
||||
[](https://pypi.python.org/pypi/ruff)
|
||||
[](https://github.com/astral-sh/ruff/actions)
|
||||
|
||||
[**Discord**](https://discord.gg/c9MhzV8aU5) | [**Docs**](https://beta.ruff.rs/docs/) | [**Playground**](https://play.ruff.rs/)
|
||||
[**Discord**](https://discord.gg/c9MhzV8aU5) | [**Docs**](https://docs.astral.sh/ruff/) | [**Playground**](https://play.ruff.rs/)
|
||||
|
||||
An extremely fast Python linter, written in Rust.
|
||||
|
||||
@@ -30,13 +30,13 @@ An extremely fast Python linter, written in Rust.
|
||||
- 🤝 Python 3.11 compatibility
|
||||
- 📦 Built-in caching, to avoid re-analyzing unchanged files
|
||||
- 🔧 Autofix support, for automatic error correction (e.g., automatically remove unused imports)
|
||||
- 📏 Over [600 built-in rules](https://beta.ruff.rs/docs/rules/)
|
||||
- ⚖️ [Near-parity](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) with the
|
||||
- 📏 Over [600 built-in rules](https://docs.astral.sh/ruff/rules/)
|
||||
- ⚖️ [Near-parity](https://docs.astral.sh/ruff/faq/#how-does-ruff-compare-to-flake8) with the
|
||||
built-in Flake8 rule set
|
||||
- 🔌 Native re-implementations of dozens of Flake8 plugins, like flake8-bugbear
|
||||
- ⌨️ First-party [editor integrations](https://beta.ruff.rs/docs/editor-integrations/) for
|
||||
- ⌨️ First-party [editor integrations](https://docs.astral.sh/ruff/editor-integrations/) for
|
||||
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
|
||||
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://beta.ruff.rs/docs/configuration/#pyprojecttoml-discovery)
|
||||
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://docs.astral.sh/ruff/configuration/#pyprojecttoml-discovery)
|
||||
|
||||
Ruff aims to be orders of magnitude faster than alternative tools while integrating more
|
||||
functionality behind a single, common interface.
|
||||
@@ -98,7 +98,7 @@ developer of [Zulip](https://github.com/zulip/zulip):
|
||||
|
||||
## Table of Contents
|
||||
|
||||
For more, see the [documentation](https://beta.ruff.rs/docs/).
|
||||
For more, see the [documentation](https://docs.astral.sh/ruff/).
|
||||
|
||||
1. [Getting Started](#getting-started)
|
||||
1. [Configuration](#configuration)
|
||||
@@ -111,7 +111,7 @@ For more, see the [documentation](https://beta.ruff.rs/docs/).
|
||||
|
||||
## Getting Started
|
||||
|
||||
For more, see the [documentation](https://beta.ruff.rs/docs/).
|
||||
For more, see the [documentation](https://docs.astral.sh/ruff/).
|
||||
|
||||
### Installation
|
||||
|
||||
@@ -122,7 +122,7 @@ pip install ruff
|
||||
```
|
||||
|
||||
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
|
||||
and with [a variety of other package managers](https://beta.ruff.rs/docs/installation/).
|
||||
and with [a variety of other package managers](https://docs.astral.sh/ruff/installation/).
|
||||
|
||||
### Usage
|
||||
|
||||
@@ -140,7 +140,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
|
||||
```yaml
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.0.285
|
||||
rev: v0.0.290
|
||||
hooks:
|
||||
- id: ruff
|
||||
```
|
||||
@@ -165,7 +165,7 @@ jobs:
|
||||
### Configuration
|
||||
|
||||
Ruff can be configured through a `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file (see:
|
||||
[_Configuration_](https://beta.ruff.rs/docs/configuration/), or [_Settings_](https://beta.ruff.rs/docs/settings/)
|
||||
[_Configuration_](https://docs.astral.sh/ruff/configuration/), or [_Settings_](https://docs.astral.sh/ruff/settings/)
|
||||
for a complete list of all configuration options).
|
||||
|
||||
If left unspecified, the default configuration is equivalent to:
|
||||
@@ -238,7 +238,7 @@ isort, pyupgrade, and others. Regardless of the rule's origin, Ruff re-implement
|
||||
Rust as a first-party feature.
|
||||
|
||||
By default, Ruff enables Flake8's `E` and `F` rules. Ruff supports all rules from the `F` category,
|
||||
and a [subset](https://beta.ruff.rs/docs/rules/#error-e) of the `E` category, omitting those
|
||||
and a [subset](https://docs.astral.sh/ruff/rules/#error-e) of the `E` category, omitting those
|
||||
stylistic rules made obsolete by the use of an autoformatter, like
|
||||
[Black](https://github.com/psf/black).
|
||||
|
||||
@@ -274,6 +274,7 @@ quality tools, including:
|
||||
- [flake8-gettext](https://pypi.org/project/flake8-gettext/)
|
||||
- [flake8-implicit-str-concat](https://pypi.org/project/flake8-implicit-str-concat/)
|
||||
- [flake8-import-conventions](https://github.com/joaopalmeiro/flake8-import-conventions)
|
||||
- [flake8-logging](https://pypi.org/project/flake8-logging/)
|
||||
- [flake8-logging-format](https://pypi.org/project/flake8-logging-format/)
|
||||
- [flake8-no-pep420](https://pypi.org/project/flake8-no-pep420)
|
||||
- [flake8-pie](https://pypi.org/project/flake8-pie/)
|
||||
@@ -303,12 +304,12 @@ quality tools, including:
|
||||
- [tryceratops](https://pypi.org/project/tryceratops/)
|
||||
- [yesqa](https://pypi.org/project/yesqa/)
|
||||
|
||||
For a complete enumeration of the supported rules, see [_Rules_](https://beta.ruff.rs/docs/rules/).
|
||||
For a complete enumeration of the supported rules, see [_Rules_](https://docs.astral.sh/ruff/rules/).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome and highly appreciated. To get started, check out the
|
||||
[**contributing guidelines**](https://beta.ruff.rs/docs/contributing/).
|
||||
[**contributing guidelines**](https://docs.astral.sh/ruff/contributing/).
|
||||
|
||||
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5).
|
||||
|
||||
@@ -398,6 +399,7 @@ Ruff is used by a number of major open-source projects and companies, including:
|
||||
- [Pydantic](https://github.com/pydantic/pydantic)
|
||||
- [Pylint](https://github.com/PyCQA/pylint)
|
||||
- [Reflex](https://github.com/reflex-dev/reflex)
|
||||
- [Rippling](https://rippling.com)
|
||||
- [Robyn](https://github.com/sansyrox/robyn)
|
||||
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
|
||||
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "flake8-to-ruff"
|
||||
version = "0.0.285"
|
||||
version = "0.0.290"
|
||||
description = """
|
||||
Convert Flake8 configuration files to Ruff configuration files.
|
||||
"""
|
||||
@@ -13,13 +13,17 @@ repository = { workspace = true }
|
||||
license = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
ruff = { path = "../ruff", default-features = false }
|
||||
ruff_linter = { path = "../ruff_linter", default-features = false }
|
||||
ruff_workspace = { path = "../ruff_workspace" }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
colored = { workspace = true }
|
||||
configparser = { version = "3.0.2" }
|
||||
itertools = { workspace = true }
|
||||
log = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
pep440_rs = { version = "0.3.1", features = ["serde"] }
|
||||
regex = { workspace = true }
|
||||
rustc-hash = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
@@ -27,3 +31,6 @@ serde_json = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.3.0"
|
||||
|
||||
@@ -86,7 +86,7 @@ flake8-to-ruff path/to/.flake8 --plugin flake8-builtins --plugin flake8-quotes
|
||||
configuration options that don't exist in Flake8.)
|
||||
1. Ruff will omit any rule codes that are unimplemented or unsupported by Ruff, including rule
|
||||
codes from unsupported plugins. (See the
|
||||
[documentation](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) for the complete
|
||||
[documentation](https://docs.astral.sh/ruff/faq/#how-does-ruff-compare-to-flake8) for the complete
|
||||
list of supported plugins.)
|
||||
|
||||
## License
|
||||
|
||||
13
crates/flake8_to_ruff/src/black.rs
Normal file
13
crates/flake8_to_ruff/src/black.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
//! Extract Black configuration settings from a pyproject.toml.
|
||||
|
||||
use ruff_linter::line_width::LineLength;
|
||||
use ruff_linter::settings::types::PythonVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub(crate) struct Black {
|
||||
#[serde(alias = "line-length", alias = "line_length")]
|
||||
pub(crate) line_length: Option<LineLength>,
|
||||
#[serde(alias = "target-version", alias = "target_version")]
|
||||
pub(crate) target_version: Option<Vec<PythonVersion>>,
|
||||
}
|
||||
@@ -1,25 +1,25 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Result;
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::line_width::LineLength;
|
||||
use crate::registry::Linter;
|
||||
use crate::rule_selector::RuleSelector;
|
||||
use crate::rules::flake8_pytest_style::types::{
|
||||
use ruff_linter::line_width::LineLength;
|
||||
use ruff_linter::registry::Linter;
|
||||
use ruff_linter::rule_selector::RuleSelector;
|
||||
use ruff_linter::rules::flake8_pytest_style::types::{
|
||||
ParametrizeNameType, ParametrizeValuesRowType, ParametrizeValuesType,
|
||||
};
|
||||
use crate::rules::flake8_quotes::settings::Quote;
|
||||
use crate::rules::flake8_tidy_imports::settings::Strictness;
|
||||
use crate::rules::pydocstyle::settings::Convention;
|
||||
use crate::rules::{
|
||||
flake8_annotations, flake8_bugbear, flake8_builtins, flake8_errmsg, flake8_pytest_style,
|
||||
flake8_quotes, flake8_tidy_imports, mccabe, pep8_naming, pydocstyle,
|
||||
use ruff_linter::rules::flake8_quotes::settings::Quote;
|
||||
use ruff_linter::rules::flake8_tidy_imports::settings::Strictness;
|
||||
use ruff_linter::rules::pydocstyle::settings::Convention;
|
||||
use ruff_linter::settings::types::PythonVersion;
|
||||
use ruff_linter::warn_user;
|
||||
use ruff_workspace::options::{
|
||||
Flake8AnnotationsOptions, Flake8BugbearOptions, Flake8BuiltinsOptions, Flake8ErrMsgOptions,
|
||||
Flake8PytestStyleOptions, Flake8QuotesOptions, Flake8TidyImportsOptions, McCabeOptions,
|
||||
Options, Pep8NamingOptions, PydocstyleOptions,
|
||||
};
|
||||
use crate::settings::options::Options;
|
||||
use crate::settings::pyproject::Pyproject;
|
||||
use crate::settings::types::PythonVersion;
|
||||
use crate::warn_user;
|
||||
use ruff_workspace::pyproject::Pyproject;
|
||||
|
||||
use super::external_config::ExternalConfig;
|
||||
use super::plugin::Plugin;
|
||||
@@ -30,11 +30,11 @@ const DEFAULT_SELECTORS: &[RuleSelector] = &[
|
||||
RuleSelector::Linter(Linter::Pycodestyle),
|
||||
];
|
||||
|
||||
pub fn convert(
|
||||
pub(crate) fn convert(
|
||||
config: &HashMap<String, HashMap<String, Option<String>>>,
|
||||
external_config: &ExternalConfig,
|
||||
plugins: Option<Vec<Plugin>>,
|
||||
) -> Result<Pyproject> {
|
||||
) -> Pyproject {
|
||||
// Extract the Flake8 section.
|
||||
let flake8 = config
|
||||
.get("flake8")
|
||||
@@ -103,16 +103,16 @@ pub fn convert(
|
||||
|
||||
// Parse each supported option.
|
||||
let mut options = Options::default();
|
||||
let mut flake8_annotations = flake8_annotations::settings::Options::default();
|
||||
let mut flake8_bugbear = flake8_bugbear::settings::Options::default();
|
||||
let mut flake8_builtins = flake8_builtins::settings::Options::default();
|
||||
let mut flake8_errmsg = flake8_errmsg::settings::Options::default();
|
||||
let mut flake8_pytest_style = flake8_pytest_style::settings::Options::default();
|
||||
let mut flake8_quotes = flake8_quotes::settings::Options::default();
|
||||
let mut flake8_tidy_imports = flake8_tidy_imports::options::Options::default();
|
||||
let mut mccabe = mccabe::settings::Options::default();
|
||||
let mut pep8_naming = pep8_naming::settings::Options::default();
|
||||
let mut pydocstyle = pydocstyle::settings::Options::default();
|
||||
let mut flake8_annotations = Flake8AnnotationsOptions::default();
|
||||
let mut flake8_bugbear = Flake8BugbearOptions::default();
|
||||
let mut flake8_builtins = Flake8BuiltinsOptions::default();
|
||||
let mut flake8_errmsg = Flake8ErrMsgOptions::default();
|
||||
let mut flake8_pytest_style = Flake8PytestStyleOptions::default();
|
||||
let mut flake8_quotes = Flake8QuotesOptions::default();
|
||||
let mut flake8_tidy_imports = Flake8TidyImportsOptions::default();
|
||||
let mut mccabe = McCabeOptions::default();
|
||||
let mut pep8_naming = Pep8NamingOptions::default();
|
||||
let mut pydocstyle = PydocstyleOptions::default();
|
||||
for (key, value) in flake8 {
|
||||
if let Some(value) = value {
|
||||
match key.as_str() {
|
||||
@@ -120,10 +120,8 @@ pub fn convert(
|
||||
"builtins" => {
|
||||
options.builtins = Some(parser::parse_strings(value.as_ref()));
|
||||
}
|
||||
"max-line-length" | "max_line_length" => match value.parse::<usize>() {
|
||||
Ok(line_length) => {
|
||||
options.line_length = Some(LineLength::from(line_length));
|
||||
}
|
||||
"max-line-length" | "max_line_length" => match LineLength::from_str(value) {
|
||||
Ok(line_length) => options.line_length = Some(line_length),
|
||||
Err(e) => {
|
||||
warn_user!("Unable to parse '{key}' property: {e}");
|
||||
}
|
||||
@@ -372,41 +370,41 @@ pub fn convert(
|
||||
.sorted_by_key(RuleSelector::prefix_and_code)
|
||||
.collect(),
|
||||
);
|
||||
if flake8_annotations != flake8_annotations::settings::Options::default() {
|
||||
if flake8_annotations != Flake8AnnotationsOptions::default() {
|
||||
options.flake8_annotations = Some(flake8_annotations);
|
||||
}
|
||||
if flake8_bugbear != flake8_bugbear::settings::Options::default() {
|
||||
if flake8_bugbear != Flake8BugbearOptions::default() {
|
||||
options.flake8_bugbear = Some(flake8_bugbear);
|
||||
}
|
||||
if flake8_builtins != flake8_builtins::settings::Options::default() {
|
||||
if flake8_builtins != Flake8BuiltinsOptions::default() {
|
||||
options.flake8_builtins = Some(flake8_builtins);
|
||||
}
|
||||
if flake8_errmsg != flake8_errmsg::settings::Options::default() {
|
||||
if flake8_errmsg != Flake8ErrMsgOptions::default() {
|
||||
options.flake8_errmsg = Some(flake8_errmsg);
|
||||
}
|
||||
if flake8_pytest_style != flake8_pytest_style::settings::Options::default() {
|
||||
if flake8_pytest_style != Flake8PytestStyleOptions::default() {
|
||||
options.flake8_pytest_style = Some(flake8_pytest_style);
|
||||
}
|
||||
if flake8_quotes != flake8_quotes::settings::Options::default() {
|
||||
if flake8_quotes != Flake8QuotesOptions::default() {
|
||||
options.flake8_quotes = Some(flake8_quotes);
|
||||
}
|
||||
if flake8_tidy_imports != flake8_tidy_imports::options::Options::default() {
|
||||
if flake8_tidy_imports != Flake8TidyImportsOptions::default() {
|
||||
options.flake8_tidy_imports = Some(flake8_tidy_imports);
|
||||
}
|
||||
if mccabe != mccabe::settings::Options::default() {
|
||||
if mccabe != McCabeOptions::default() {
|
||||
options.mccabe = Some(mccabe);
|
||||
}
|
||||
if pep8_naming != pep8_naming::settings::Options::default() {
|
||||
if pep8_naming != Pep8NamingOptions::default() {
|
||||
options.pep8_naming = Some(pep8_naming);
|
||||
}
|
||||
if pydocstyle != pydocstyle::settings::Options::default() {
|
||||
if pydocstyle != PydocstyleOptions::default() {
|
||||
options.pydocstyle = Some(pydocstyle);
|
||||
}
|
||||
|
||||
// Extract any settings from the existing `pyproject.toml`.
|
||||
if let Some(black) = &external_config.black {
|
||||
if let Some(line_length) = &black.line_length {
|
||||
options.line_length = Some(LineLength::from(*line_length));
|
||||
options.line_length = Some(*line_length);
|
||||
}
|
||||
|
||||
if let Some(target_version) = &black.target_version {
|
||||
@@ -439,7 +437,7 @@ pub fn convert(
|
||||
}
|
||||
|
||||
// Create the pyproject.toml.
|
||||
Ok(Pyproject::new(options))
|
||||
Pyproject::new(options)
|
||||
}
|
||||
|
||||
/// Resolve the set of enabled `RuleSelector` values for the given
|
||||
@@ -458,19 +456,20 @@ mod tests {
|
||||
use anyhow::Result;
|
||||
use itertools::Itertools;
|
||||
use pep440_rs::VersionSpecifiers;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use crate::flake8_to_ruff::converter::DEFAULT_SELECTORS;
|
||||
use crate::flake8_to_ruff::pep621::Project;
|
||||
use crate::flake8_to_ruff::ExternalConfig;
|
||||
use crate::line_width::LineLength;
|
||||
use crate::registry::Linter;
|
||||
use crate::rule_selector::RuleSelector;
|
||||
use crate::rules::pydocstyle::settings::Convention;
|
||||
use crate::rules::{flake8_quotes, pydocstyle};
|
||||
use crate::settings::options::Options;
|
||||
use crate::settings::pyproject::Pyproject;
|
||||
use crate::settings::types::PythonVersion;
|
||||
use pretty_assertions::assert_eq;
|
||||
use ruff_linter::line_width::LineLength;
|
||||
use ruff_linter::registry::Linter;
|
||||
use ruff_linter::rule_selector::RuleSelector;
|
||||
use ruff_linter::rules::flake8_quotes;
|
||||
use ruff_linter::rules::pydocstyle::settings::Convention;
|
||||
use ruff_linter::settings::types::PythonVersion;
|
||||
use ruff_workspace::options::{Flake8QuotesOptions, Options, PydocstyleOptions};
|
||||
use ruff_workspace::pyproject::Pyproject;
|
||||
|
||||
use crate::converter::DEFAULT_SELECTORS;
|
||||
use crate::pep621::Project;
|
||||
use crate::ExternalConfig;
|
||||
|
||||
use super::super::plugin::Plugin;
|
||||
use super::convert;
|
||||
@@ -491,20 +490,18 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_converts_empty() -> Result<()> {
|
||||
fn it_converts_empty() {
|
||||
let actual = convert(
|
||||
&HashMap::from([("flake8".to_string(), HashMap::default())]),
|
||||
&ExternalConfig::default(),
|
||||
None,
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(default_options([]));
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_converts_dashes() -> Result<()> {
|
||||
fn it_converts_dashes() {
|
||||
let actual = convert(
|
||||
&HashMap::from([(
|
||||
"flake8".to_string(),
|
||||
@@ -512,18 +509,16 @@ mod tests {
|
||||
)]),
|
||||
&ExternalConfig::default(),
|
||||
Some(vec![]),
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(Options {
|
||||
line_length: Some(LineLength::from(100)),
|
||||
line_length: Some(LineLength::try_from(100).unwrap()),
|
||||
..default_options([])
|
||||
});
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_converts_underscores() -> Result<()> {
|
||||
fn it_converts_underscores() {
|
||||
let actual = convert(
|
||||
&HashMap::from([(
|
||||
"flake8".to_string(),
|
||||
@@ -531,18 +526,16 @@ mod tests {
|
||||
)]),
|
||||
&ExternalConfig::default(),
|
||||
Some(vec![]),
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(Options {
|
||||
line_length: Some(LineLength::from(100)),
|
||||
line_length: Some(LineLength::try_from(100).unwrap()),
|
||||
..default_options([])
|
||||
});
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_ignores_parse_errors() -> Result<()> {
|
||||
fn it_ignores_parse_errors() {
|
||||
let actual = convert(
|
||||
&HashMap::from([(
|
||||
"flake8".to_string(),
|
||||
@@ -550,15 +543,13 @@ mod tests {
|
||||
)]),
|
||||
&ExternalConfig::default(),
|
||||
Some(vec![]),
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(default_options([]));
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_converts_plugin_options() -> Result<()> {
|
||||
fn it_converts_plugin_options() {
|
||||
let actual = convert(
|
||||
&HashMap::from([(
|
||||
"flake8".to_string(),
|
||||
@@ -566,9 +557,9 @@ mod tests {
|
||||
)]),
|
||||
&ExternalConfig::default(),
|
||||
Some(vec![]),
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(Options {
|
||||
flake8_quotes: Some(flake8_quotes::settings::Options {
|
||||
flake8_quotes: Some(Flake8QuotesOptions {
|
||||
inline_quotes: Some(flake8_quotes::settings::Quote::Single),
|
||||
multiline_quotes: None,
|
||||
docstring_quotes: None,
|
||||
@@ -577,12 +568,10 @@ mod tests {
|
||||
..default_options([])
|
||||
});
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_converts_docstring_conventions() -> Result<()> {
|
||||
fn it_converts_docstring_conventions() {
|
||||
let actual = convert(
|
||||
&HashMap::from([(
|
||||
"flake8".to_string(),
|
||||
@@ -593,9 +582,9 @@ mod tests {
|
||||
)]),
|
||||
&ExternalConfig::default(),
|
||||
Some(vec![Plugin::Flake8Docstrings]),
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(Options {
|
||||
pydocstyle: Some(pydocstyle::settings::Options {
|
||||
pydocstyle: Some(PydocstyleOptions {
|
||||
convention: Some(Convention::Numpy),
|
||||
ignore_decorators: None,
|
||||
property_decorators: None,
|
||||
@@ -603,12 +592,10 @@ mod tests {
|
||||
..default_options([Linter::Pydocstyle.into()])
|
||||
});
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn it_infers_plugins_if_omitted() -> Result<()> {
|
||||
fn it_infers_plugins_if_omitted() {
|
||||
let actual = convert(
|
||||
&HashMap::from([(
|
||||
"flake8".to_string(),
|
||||
@@ -616,9 +603,9 @@ mod tests {
|
||||
)]),
|
||||
&ExternalConfig::default(),
|
||||
None,
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(Options {
|
||||
flake8_quotes: Some(flake8_quotes::settings::Options {
|
||||
flake8_quotes: Some(Flake8QuotesOptions {
|
||||
inline_quotes: Some(flake8_quotes::settings::Quote::Single),
|
||||
multiline_quotes: None,
|
||||
docstring_quotes: None,
|
||||
@@ -627,8 +614,6 @@ mod tests {
|
||||
..default_options([Linter::Flake8Quotes.into()])
|
||||
});
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -642,7 +627,7 @@ mod tests {
|
||||
..ExternalConfig::default()
|
||||
},
|
||||
Some(vec![]),
|
||||
)?;
|
||||
);
|
||||
let expected = Pyproject::new(Options {
|
||||
target_version: Some(PythonVersion::Py38),
|
||||
..default_options([])
|
||||
10
crates/flake8_to_ruff/src/external_config.rs
Normal file
10
crates/flake8_to_ruff/src/external_config.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
use super::black::Black;
|
||||
use super::isort::Isort;
|
||||
use super::pep621::Project;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct ExternalConfig<'a> {
|
||||
pub(crate) black: Option<&'a Black>,
|
||||
pub(crate) isort: Option<&'a Isort>,
|
||||
pub(crate) project: Option<&'a Project>,
|
||||
}
|
||||
@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The [isort configuration](https://pycqa.github.io/isort/docs/configuration/config_files.html).
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct Isort {
|
||||
pub(crate) struct Isort {
|
||||
#[serde(alias = "src-paths", alias = "src_paths")]
|
||||
pub src_paths: Option<Vec<String>>,
|
||||
pub(crate) src_paths: Option<Vec<String>>,
|
||||
}
|
||||
@@ -1,13 +1,25 @@
|
||||
//! Utility to generate Ruff's `pyproject.toml` section from a Flake8 INI file.
|
||||
|
||||
mod black;
|
||||
mod converter;
|
||||
mod external_config;
|
||||
mod isort;
|
||||
mod parser;
|
||||
mod pep621;
|
||||
mod plugin;
|
||||
mod pyproject;
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use configparser::ini::Ini;
|
||||
|
||||
use ruff::flake8_to_ruff::{self, ExternalConfig};
|
||||
use ruff::logging::{set_up_logging, LogLevel};
|
||||
use crate::converter::convert;
|
||||
use crate::external_config::ExternalConfig;
|
||||
use crate::plugin::Plugin;
|
||||
use crate::pyproject::parse;
|
||||
use ruff_linter::logging::{set_up_logging, LogLevel};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(
|
||||
@@ -25,7 +37,7 @@ struct Args {
|
||||
pyproject: Option<PathBuf>,
|
||||
/// List of plugins to enable.
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
plugin: Option<Vec<flake8_to_ruff::Plugin>>,
|
||||
plugin: Option<Vec<Plugin>>,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
@@ -39,7 +51,7 @@ fn main() -> Result<()> {
|
||||
let config = ini.load(args.file).map_err(|msg| anyhow::anyhow!(msg))?;
|
||||
|
||||
// Read the pyproject.toml file.
|
||||
let pyproject = args.pyproject.map(flake8_to_ruff::parse).transpose()?;
|
||||
let pyproject = args.pyproject.map(parse).transpose()?;
|
||||
let external_config = pyproject
|
||||
.as_ref()
|
||||
.and_then(|pyproject| pyproject.tool.as_ref())
|
||||
@@ -57,7 +69,7 @@ fn main() -> Result<()> {
|
||||
};
|
||||
|
||||
// Create Ruff's pyproject.toml section.
|
||||
let pyproject = flake8_to_ruff::convert(&config, &external_config, args.plugin)?;
|
||||
let pyproject = convert(&config, &external_config, args.plugin);
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
{
|
||||
|
||||
@@ -5,9 +5,8 @@ use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use crate::rule_selector::RuleSelector;
|
||||
use crate::settings::types::PatternPrefixPair;
|
||||
use crate::warn_user;
|
||||
use ruff_linter::settings::types::PatternPrefixPair;
|
||||
use ruff_linter::{warn_user, RuleSelector};
|
||||
|
||||
static COMMA_SEPARATED_LIST_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"[,\s]").unwrap());
|
||||
|
||||
@@ -195,10 +194,10 @@ pub(crate) fn collect_per_file_ignores(
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::codes;
|
||||
use crate::registry::Linter;
|
||||
use crate::rule_selector::RuleSelector;
|
||||
use crate::settings::types::PatternPrefixPair;
|
||||
use ruff_linter::codes;
|
||||
use ruff_linter::registry::Linter;
|
||||
use ruff_linter::settings::types::PatternPrefixPair;
|
||||
use ruff_linter::RuleSelector;
|
||||
|
||||
use super::{parse_files_to_codes_mapping, parse_prefix_codes, parse_strings};
|
||||
|
||||
@@ -4,7 +4,7 @@ use pep440_rs::VersionSpecifiers;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct Project {
|
||||
pub(crate) struct Project {
|
||||
#[serde(alias = "requires-python", alias = "requires_python")]
|
||||
pub requires_python: Option<VersionSpecifiers>,
|
||||
pub(crate) requires_python: Option<VersionSpecifiers>,
|
||||
}
|
||||
@@ -3,9 +3,9 @@ use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::anyhow;
|
||||
|
||||
use crate::registry::Linter;
|
||||
use crate::rule_selector::RuleSelector;
|
||||
use ruff_linter::registry::Linter;
|
||||
use ruff_linter::settings::types::PreviewMode;
|
||||
use ruff_linter::RuleSelector;
|
||||
|
||||
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
|
||||
pub enum Plugin {
|
||||
@@ -332,7 +332,7 @@ pub(crate) fn infer_plugins_from_codes(selectors: &HashSet<RuleSelector>) -> Vec
|
||||
.filter(|plugin| {
|
||||
for selector in selectors {
|
||||
if selector
|
||||
.into_iter()
|
||||
.rules(PreviewMode::Disabled)
|
||||
.any(|rule| Linter::from(plugin).rules().any(|r| r == rule))
|
||||
{
|
||||
return true;
|
||||
26
crates/flake8_to_ruff/src/pyproject.rs
Normal file
26
crates/flake8_to_ruff/src/pyproject.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::black::Black;
|
||||
use super::isort::Isort;
|
||||
use super::pep621::Project;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub(crate) struct Tools {
|
||||
pub(crate) black: Option<Black>,
|
||||
pub(crate) isort: Option<Isort>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub(crate) struct Pyproject {
|
||||
pub(crate) tool: Option<Tools>,
|
||||
pub(crate) project: Option<Project>,
|
||||
}
|
||||
|
||||
pub(crate) fn parse<P: AsRef<Path>>(path: P) -> Result<Pyproject> {
|
||||
let contents = std::fs::read_to_string(path)?;
|
||||
let pyproject = toml::from_str::<Pyproject>(&contents)?;
|
||||
Ok(pyproject)
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
[package]
|
||||
name = "ruff"
|
||||
version = "0.0.285"
|
||||
publish = false
|
||||
authors = { workspace = true }
|
||||
edition = { workspace = true }
|
||||
rust-version = { workspace = true }
|
||||
homepage = { workspace = true }
|
||||
documentation = { workspace = true }
|
||||
repository = { workspace = true }
|
||||
license = { workspace = true }
|
||||
readme = "README.md"
|
||||
|
||||
[lib]
|
||||
name = "ruff"
|
||||
|
||||
[dependencies]
|
||||
ruff_cache = { path = "../ruff_cache" }
|
||||
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
|
||||
ruff_index = { path = "../ruff_index" }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
|
||||
ruff_python_codegen = { path = "../ruff_python_codegen" }
|
||||
ruff_python_index = { path = "../ruff_python_index" }
|
||||
ruff_python_literal = { path = "../ruff_python_literal" }
|
||||
ruff_python_semantic = { path = "../ruff_python_semantic" }
|
||||
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
|
||||
ruff_python_trivia = { path = "../ruff_python_trivia" }
|
||||
ruff_python_parser = { path = "../ruff_python_parser" }
|
||||
ruff_source_file = { path = "../ruff_source_file", features = ["serde"] }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
|
||||
annotate-snippets = { version = "0.9.1", features = ["color"] }
|
||||
anyhow = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive", "string"], optional = true }
|
||||
colored = { workspace = true }
|
||||
dirs = { version = "5.0.0" }
|
||||
fern = { version = "0.6.1" }
|
||||
glob = { workspace = true }
|
||||
globset = { workspace = true }
|
||||
ignore = { workspace = true }
|
||||
imperative = { version = "1.0.4" }
|
||||
is-macro = { workspace = true }
|
||||
itertools = { workspace = true }
|
||||
libcst = { workspace = true }
|
||||
log = { workspace = true }
|
||||
memchr = { workspace = true }
|
||||
natord = { version = "1.0.9" }
|
||||
num-bigint = { workspace = true }
|
||||
num-traits = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
path-absolutize = { workspace = true, features = [
|
||||
"once_cell_cache",
|
||||
"use_unix_paths_on_wasm",
|
||||
] }
|
||||
pathdiff = { version = "0.2.1" }
|
||||
pep440_rs = { version = "0.3.1", features = ["serde"] }
|
||||
pyproject-toml = { version = "0.6.0" }
|
||||
quick-junit = { version = "0.3.2" }
|
||||
regex = { workspace = true }
|
||||
result-like = { version = "0.4.6" }
|
||||
rustc-hash = { workspace = true }
|
||||
schemars = { workspace = true, optional = true }
|
||||
semver = { version = "1.0.16" }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_with = { version = "3.0.0" }
|
||||
similar = { workspace = true }
|
||||
shellexpand = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
thiserror = { version = "1.0.43" }
|
||||
toml = { workspace = true }
|
||||
typed-arena = { version = "2.0.2" }
|
||||
unicode-width = { workspace = true }
|
||||
unicode_names2 = { version = "0.6.0", git = "https://github.com/youknowone/unicode_names2.git", rev = "4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde" }
|
||||
wsl = { version = "0.1.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
insta = { workspace = true }
|
||||
pretty_assertions = "1.3.0"
|
||||
test-case = { workspace = true }
|
||||
# Disable colored output in tests
|
||||
colored = { workspace = true, features = ["no-color"] }
|
||||
tempfile = "3.6.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
schemars = ["dep:schemars"]
|
||||
# Enables the UnreachableCode rule
|
||||
unreachable-code = []
|
||||
@@ -1,21 +0,0 @@
|
||||
#import os
|
||||
# from foo import junk
|
||||
#a = 3
|
||||
a = 4
|
||||
#foo(1, 2, 3)
|
||||
|
||||
def foo(x, y, z):
|
||||
content = 1 # print('hello')
|
||||
print(x, y, z)
|
||||
|
||||
# This is a real comment.
|
||||
# # This is a (nested) comment.
|
||||
#return True
|
||||
return False
|
||||
|
||||
#import os # noqa: ERA001
|
||||
|
||||
|
||||
class A():
|
||||
pass
|
||||
# b = c
|
||||
@@ -1,45 +0,0 @@
|
||||
"""
|
||||
Should emit:
|
||||
B009 - Line 19, 20, 21, 22, 23, 24
|
||||
B010 - Line 40, 41, 42, 43, 44, 45
|
||||
"""
|
||||
|
||||
# Valid getattr usage
|
||||
getattr(foo, bar)
|
||||
getattr(foo, "bar", None)
|
||||
getattr(foo, "bar{foo}".format(foo="a"), None)
|
||||
getattr(foo, "bar{foo}".format(foo="a"))
|
||||
getattr(foo, bar, None)
|
||||
getattr(foo, "123abc")
|
||||
getattr(foo, r"123\abc")
|
||||
getattr(foo, "except")
|
||||
getattr(foo, "__123abc")
|
||||
|
||||
# Invalid usage
|
||||
getattr(foo, "bar")
|
||||
getattr(foo, "_123abc")
|
||||
getattr(foo, "__123abc__")
|
||||
getattr(foo, "abc123")
|
||||
getattr(foo, r"abc123")
|
||||
_ = lambda x: getattr(x, "bar")
|
||||
if getattr(x, "bar"):
|
||||
pass
|
||||
|
||||
# Valid setattr usage
|
||||
setattr(foo, bar, None)
|
||||
setattr(foo, "bar{foo}".format(foo="a"), None)
|
||||
setattr(foo, "123abc", None)
|
||||
setattr(foo, "__123abc", None)
|
||||
setattr(foo, r"123\abc", None)
|
||||
setattr(foo, "except", None)
|
||||
_ = lambda x: setattr(x, "bar", 1)
|
||||
if setattr(x, "bar", 1):
|
||||
pass
|
||||
|
||||
# Invalid usage
|
||||
setattr(foo, "bar", None)
|
||||
setattr(foo, "_123abc", None)
|
||||
setattr(foo, "__123abc__", None)
|
||||
setattr(foo, "abc123", None)
|
||||
setattr(foo, r"abc123", None)
|
||||
setattr(foo.bar, r"baz", None)
|
||||
@@ -1,8 +0,0 @@
|
||||
try:
|
||||
pass
|
||||
except (ValueError,):
|
||||
pass
|
||||
except AttributeError:
|
||||
pass
|
||||
except (ImportError, TypeError):
|
||||
pass
|
||||
@@ -1,18 +0,0 @@
|
||||
dict((x, x) for x in range(3))
|
||||
dict(
|
||||
(x, x) for x in range(3)
|
||||
)
|
||||
dict(((x, x) for x in range(3)), z=3)
|
||||
y = f'{dict((x, x) for x in range(3))}'
|
||||
print(f'Hello {dict((x, x) for x in range(3))} World')
|
||||
print(f"Hello {dict((x, x) for x in 'abc')} World")
|
||||
print(f'Hello {dict((x, x) for x in "abc")} World')
|
||||
print(f'Hello {dict((x,x) for x in "abc")} World')
|
||||
|
||||
f'{dict((x, x) for x in range(3)) | dict((x, x) for x in range(3))}'
|
||||
f'{ dict((x, x) for x in range(3)) | dict((x, x) for x in range(3)) }'
|
||||
|
||||
def f(x):
|
||||
return x
|
||||
|
||||
print(f'Hello {dict((x,f(x)) for x in "abc")} World')
|
||||
@@ -1,13 +0,0 @@
|
||||
dict([(i, i) for i in range(3)])
|
||||
dict([(i, i) for i in range(3)], z=4)
|
||||
|
||||
def f(x):
|
||||
return x
|
||||
|
||||
f'{dict([(s,s) for s in "ab"])}'
|
||||
f"{dict([(s,s) for s in 'ab'])}"
|
||||
f"{dict([(s, s) for s in 'ab'])}"
|
||||
f"{dict([(s,f(s)) for s in 'ab'])}"
|
||||
|
||||
f'{dict([(s,s) for s in "ab"]) | dict([(s,s) for s in "ab"])}'
|
||||
f'{ dict([(s,s) for s in "ab"]) | dict([(s,s) for s in "ab"]) }'
|
||||
@@ -1,15 +0,0 @@
|
||||
x = [2, 3, 1]
|
||||
list(x)
|
||||
list(sorted(x))
|
||||
reversed(sorted(x))
|
||||
reversed(sorted(x, key=lambda e: e))
|
||||
reversed(sorted(x, reverse=True))
|
||||
reversed(sorted(x, key=lambda e: e, reverse=True))
|
||||
reversed(sorted(x, reverse=True, key=lambda e: e))
|
||||
reversed(sorted(x, reverse=False))
|
||||
|
||||
def reversed(*args, **kwargs):
|
||||
return None
|
||||
|
||||
|
||||
reversed(sorted(x, reverse=True))
|
||||
@@ -1,19 +0,0 @@
|
||||
x = [1, 2, 3]
|
||||
y = [("a", 1), ("b", 2), ("c", 3)]
|
||||
z = [(1,), (2,), (3,)]
|
||||
d = {"a": 1, "b": 2, "c": 3}
|
||||
|
||||
[i for i in x]
|
||||
{i for i in x}
|
||||
{k: v for k, v in y}
|
||||
{k: v for k, v in d.items()}
|
||||
|
||||
[i for i, in z]
|
||||
[i for i, j in y]
|
||||
[i for i in x if i > 1]
|
||||
[i for i in x for j in x]
|
||||
|
||||
{v: k for k, v in y}
|
||||
{k.foo: k for k in y}
|
||||
{k["foo"]: k for k in y}
|
||||
{k: v if v else None for k, v in y}
|
||||
@@ -1,18 +0,0 @@
|
||||
def bar():
|
||||
... # OK
|
||||
|
||||
|
||||
def foo():
|
||||
"""foo""" # OK
|
||||
|
||||
|
||||
def buzz():
|
||||
print("buzz") # OK, not in stub file
|
||||
|
||||
|
||||
def foo2():
|
||||
123 # OK, not in a stub file
|
||||
|
||||
|
||||
def bizz():
|
||||
x = 123 # OK, not in a stub file
|
||||
@@ -1,12 +0,0 @@
|
||||
def bar(): ... # OK
|
||||
def foo():
|
||||
"""foo""" # OK, strings are handled by another rule
|
||||
|
||||
def buzz():
|
||||
print("buzz") # ERROR PYI010
|
||||
|
||||
def foo2():
|
||||
123 # ERROR PYI010
|
||||
|
||||
def bizz():
|
||||
x = 123 # ERROR PYI010
|
||||
@@ -1,19 +0,0 @@
|
||||
def bar(): # OK
|
||||
...
|
||||
|
||||
|
||||
def oof(): # OK, docstrings are handled by another rule
|
||||
"""oof"""
|
||||
print("foo")
|
||||
|
||||
|
||||
def foo(): # Ok not in Stub file
|
||||
"""foo"""
|
||||
print("foo")
|
||||
print("foo")
|
||||
|
||||
|
||||
def buzz(): # Ok not in Stub file
|
||||
print("fizz")
|
||||
print("buzz")
|
||||
print("test")
|
||||
@@ -1,20 +0,0 @@
|
||||
def bar():
|
||||
... # OK
|
||||
|
||||
|
||||
def oof(): # OK, docstrings are handled by another rule
|
||||
"""oof"""
|
||||
print("foo")
|
||||
|
||||
|
||||
|
||||
def foo(): # ERROR PYI048
|
||||
"""foo"""
|
||||
print("foo")
|
||||
print("foo")
|
||||
|
||||
|
||||
def buzz(): # ERROR PYI048
|
||||
print("fizz")
|
||||
print("buzz")
|
||||
print("test")
|
||||
@@ -1,54 +0,0 @@
|
||||
import pytest
|
||||
|
||||
|
||||
def test_ok():
|
||||
assert something
|
||||
assert something or something_else
|
||||
assert something or something_else and something_third
|
||||
assert not (something and something_else)
|
||||
assert something, "something message"
|
||||
assert something or something_else and something_third, "another message"
|
||||
|
||||
|
||||
def test_error():
|
||||
assert something and something_else
|
||||
assert something and something_else and something_third
|
||||
assert something and not something_else
|
||||
assert something and (something_else or something_third)
|
||||
assert not something and something_else
|
||||
assert not (something or something_else)
|
||||
assert not (something or something_else or something_third)
|
||||
assert something and something_else == """error
|
||||
message
|
||||
"""
|
||||
assert (
|
||||
something
|
||||
and something_else
|
||||
== """error
|
||||
message
|
||||
"""
|
||||
)
|
||||
|
||||
# recursive case
|
||||
assert not (a or not (b or c))
|
||||
assert not (a or not (b and c))
|
||||
|
||||
# detected, but no autofix for messages
|
||||
assert something and something_else, "error message"
|
||||
assert not (something or something_else and something_third), "with message"
|
||||
# detected, but no autofix for mixed conditions (e.g. `a or b and c`)
|
||||
assert not (something or something_else and something_third)
|
||||
|
||||
|
||||
assert something # OK
|
||||
assert something and something_else # Error
|
||||
assert something and something_else and something_third # Error
|
||||
|
||||
|
||||
def test_multiline():
|
||||
assert something and something_else; x = 1
|
||||
|
||||
x = 1; assert something and something_else
|
||||
|
||||
x = 1; \
|
||||
assert something and something_else
|
||||
@@ -1,44 +0,0 @@
|
||||
key in obj.keys() # SIM118
|
||||
|
||||
key not in obj.keys() # SIM118
|
||||
|
||||
foo["bar"] in obj.keys() # SIM118
|
||||
|
||||
foo["bar"] not in obj.keys() # SIM118
|
||||
|
||||
foo['bar'] in obj.keys() # SIM118
|
||||
|
||||
foo['bar'] not in obj.keys() # SIM118
|
||||
|
||||
foo() in obj.keys() # SIM118
|
||||
|
||||
foo() not in obj.keys() # SIM118
|
||||
|
||||
for key in obj.keys(): # SIM118
|
||||
pass
|
||||
|
||||
for key in list(obj.keys()):
|
||||
if some_property(key):
|
||||
del obj[key]
|
||||
|
||||
[k for k in obj.keys()] # SIM118
|
||||
|
||||
{k for k in obj.keys()} # SIM118
|
||||
|
||||
{k: k for k in obj.keys()} # SIM118
|
||||
|
||||
(k for k in obj.keys()) # SIM118
|
||||
|
||||
key in (obj or {}).keys() # SIM118
|
||||
|
||||
(key) in (obj or {}).keys() # SIM118
|
||||
|
||||
from typing import KeysView
|
||||
|
||||
|
||||
class Foo:
|
||||
def keys(self) -> KeysView[object]:
|
||||
...
|
||||
|
||||
def __contains__(self, key: object) -> bool:
|
||||
return key in self.keys() # OK
|
||||
@@ -1,15 +0,0 @@
|
||||
a = True if b else False # SIM210
|
||||
|
||||
a = True if b != c else False # SIM210
|
||||
|
||||
a = True if b + c else False # SIM210
|
||||
|
||||
a = False if b else True # OK
|
||||
|
||||
|
||||
def f():
|
||||
# OK
|
||||
def bool():
|
||||
return False
|
||||
|
||||
a = True if b else False
|
||||
@@ -1,15 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
np.round_(np.random.rand(5, 5), 2)
|
||||
np.product(np.random.rand(5, 5))
|
||||
np.cumproduct(np.random.rand(5, 5))
|
||||
np.sometrue(np.random.rand(5, 5))
|
||||
np.alltrue(np.random.rand(5, 5))
|
||||
|
||||
from numpy import round_, product, cumproduct, sometrue, alltrue
|
||||
|
||||
round_(np.random.rand(5, 5), 2)
|
||||
product(np.random.rand(5, 5))
|
||||
cumproduct(np.random.rand(5, 5))
|
||||
sometrue(np.random.rand(5, 5))
|
||||
alltrue(np.random.rand(5, 5))
|
||||
@@ -1,40 +0,0 @@
|
||||
import collections
|
||||
from collections import namedtuple
|
||||
from typing import TypeVar
|
||||
from typing import NewType
|
||||
from typing import NamedTuple, TypedDict
|
||||
|
||||
GLOBAL: str = "foo"
|
||||
|
||||
|
||||
def assign():
|
||||
global GLOBAL
|
||||
GLOBAL = "bar"
|
||||
lower = 0
|
||||
Camel = 0
|
||||
CONSTANT = 0
|
||||
_ = 0
|
||||
|
||||
MyObj1 = collections.namedtuple("MyObj1", ["a", "b"])
|
||||
MyObj2 = namedtuple("MyObj12", ["a", "b"])
|
||||
|
||||
T = TypeVar("T")
|
||||
UserId = NewType("UserId", int)
|
||||
|
||||
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
|
||||
|
||||
Point2D = TypedDict('Point2D', {'in': int, 'x-y': int})
|
||||
|
||||
|
||||
def aug_assign(rank, world_size):
|
||||
global CURRENT_PORT
|
||||
|
||||
CURRENT_PORT += 1
|
||||
if CURRENT_PORT > MAX_PORT:
|
||||
CURRENT_PORT = START_PORT
|
||||
|
||||
|
||||
def loop_assign():
|
||||
global CURRENT_PORT
|
||||
for CURRENT_PORT in range(5):
|
||||
pass
|
||||
@@ -1,26 +0,0 @@
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = []
|
||||
for i in items:
|
||||
result.append(i) # PERF402
|
||||
|
||||
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = []
|
||||
for i in items:
|
||||
result.insert(0, i) # PERF402
|
||||
|
||||
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = []
|
||||
for i in items:
|
||||
result.append(i * i) # OK
|
||||
|
||||
|
||||
def f():
|
||||
items = [1, 2, 3, 4]
|
||||
result = {}
|
||||
for i in items:
|
||||
result[i].append(i * i) # OK
|
||||
@@ -1,46 +0,0 @@
|
||||
class Apples:
|
||||
def _init_(self): # [bad-dunder-name]
|
||||
pass
|
||||
|
||||
def __hello__(self): # [bad-dunder-name]
|
||||
print("hello")
|
||||
|
||||
def __init_(self): # [bad-dunder-name]
|
||||
# author likely unintentionally misspelled the correct init dunder.
|
||||
pass
|
||||
|
||||
def _init_(self): # [bad-dunder-name]
|
||||
# author likely unintentionally misspelled the correct init dunder.
|
||||
pass
|
||||
|
||||
def ___neg__(self): # [bad-dunder-name]
|
||||
# author likely accidentally added an additional `_`
|
||||
pass
|
||||
|
||||
def __inv__(self): # [bad-dunder-name]
|
||||
# author likely meant to call the invert dunder method
|
||||
pass
|
||||
|
||||
def hello(self):
|
||||
print("hello")
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def init(self):
|
||||
# valid name even though someone could accidentally mean __init__
|
||||
pass
|
||||
|
||||
def _protected_method(self):
|
||||
print("Protected")
|
||||
|
||||
def __private_method(self):
|
||||
print("Private")
|
||||
|
||||
@property
|
||||
def __doc__(self):
|
||||
return "Docstring"
|
||||
|
||||
|
||||
def __foo_bar__(): # this is not checked by the [bad-dunder-name] rule
|
||||
...
|
||||
@@ -1,32 +0,0 @@
|
||||
# pylint: disable=missing-docstring,consider-using-f-string, pointless-statement
|
||||
|
||||
## Old style formatting
|
||||
|
||||
"%s %z" % ("hello", "world") # [bad-format-character]
|
||||
|
||||
"%s" "%z" % ("hello", "world") # [bad-format-character]
|
||||
|
||||
"""%s %z""" % ("hello", "world") # [bad-format-character]
|
||||
|
||||
"""%s""" """%z""" % ("hello", "world") # [bad-format-character]
|
||||
|
||||
## New style formatting
|
||||
|
||||
"{:s} {:y}".format("hello", "world") # [bad-format-character]
|
||||
|
||||
"{:*^30s}".format("centered") # OK
|
||||
"{:{s}}".format("hello", s="s") # OK (nested replacement value not checked)
|
||||
|
||||
"{:{s:y}}".format("hello", s="s") # [bad-format-character] (nested replacement format spec checked)
|
||||
|
||||
## f-strings
|
||||
|
||||
H, W = "hello", "world"
|
||||
f"{H} {W}"
|
||||
f"{H:s} {W:z}" # [bad-format-character]
|
||||
|
||||
f"{1:z}" # [bad-format-character]
|
||||
|
||||
## False negatives
|
||||
|
||||
print(("%" "z") % 1)
|
||||
@@ -1,15 +0,0 @@
|
||||
type("")
|
||||
type(b"")
|
||||
type(0)
|
||||
type(0.0)
|
||||
type(0j)
|
||||
|
||||
# OK
|
||||
type(arg)(" ")
|
||||
|
||||
# OK
|
||||
y = x.dtype.type(0.0)
|
||||
|
||||
# OK
|
||||
type = lambda *args, **kwargs: None
|
||||
type("")
|
||||
@@ -1,61 +0,0 @@
|
||||
import typing
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
|
||||
def f(x: Optional[str]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Optional[str]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: Union[str, int, Union[float, bytes]]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[str, int]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[(str, int)]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[(str, int), float]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[(int,)]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: typing.Union[()]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: "Union[str, int, Union[float, bytes]]") -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: "typing.Union[str, int]") -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: Union["str", int]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f(x: Union[("str", "int"), float]) -> None:
|
||||
...
|
||||
|
||||
|
||||
def f() -> None:
|
||||
x: Optional[str]
|
||||
x = Optional[str]
|
||||
|
||||
x = Union[str, int]
|
||||
x = Union["str", "int"]
|
||||
x: Union[str, int]
|
||||
x: Union["str", "int"]
|
||||
@@ -1,12 +0,0 @@
|
||||
import subprocess
|
||||
import subprocess as somename
|
||||
from subprocess import run
|
||||
from subprocess import run as anothername
|
||||
|
||||
subprocess.run(["foo"], universal_newlines=True, check=True)
|
||||
somename.run(["foo"], universal_newlines=True)
|
||||
|
||||
run(["foo"], universal_newlines=True, check=False)
|
||||
anothername(["foo"], universal_newlines=True)
|
||||
|
||||
subprocess.run(["foo"], check=True)
|
||||
@@ -1,42 +0,0 @@
|
||||
from subprocess import run
|
||||
import subprocess
|
||||
|
||||
output = run(["foo"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
output = subprocess.run(["foo"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
output = subprocess.run(stdout=subprocess.PIPE, args=["foo"], stderr=subprocess.PIPE)
|
||||
|
||||
output = subprocess.run(
|
||||
["foo"], stdout=subprocess.PIPE, check=True, stderr=subprocess.PIPE
|
||||
)
|
||||
|
||||
output = subprocess.run(
|
||||
["foo"], stderr=subprocess.PIPE, check=True, stdout=subprocess.PIPE
|
||||
)
|
||||
|
||||
output = subprocess.run(
|
||||
["foo"],
|
||||
stdout=subprocess.PIPE,
|
||||
check=True,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
encoding="utf-8",
|
||||
close_fds=True,
|
||||
)
|
||||
|
||||
if output:
|
||||
output = subprocess.run(
|
||||
["foo"],
|
||||
stdout=subprocess.PIPE,
|
||||
check=True,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
# Examples that should NOT trigger the rule
|
||||
from foo import PIPE
|
||||
subprocess.run(["foo"], stdout=PIPE, stderr=PIPE)
|
||||
run(["foo"], stdout=None, stderr=PIPE)
|
||||
@@ -1,14 +0,0 @@
|
||||
x = [1, 2, 3]
|
||||
y = [4, 5, 6]
|
||||
|
||||
# RUF017
|
||||
sum([x, y], start=[])
|
||||
sum([x, y], [])
|
||||
sum([[1, 2, 3], [4, 5, 6]], start=[])
|
||||
sum([[1, 2, 3], [4, 5, 6]], [])
|
||||
sum([[1, 2, 3], [4, 5, 6]],
|
||||
[])
|
||||
|
||||
# OK
|
||||
sum([x, y])
|
||||
sum([[1, 2, 3], [4, 5, 6]])
|
||||
@@ -1,96 +0,0 @@
|
||||
# project
|
||||
|
||||
An example multi-package Python project used to test setting resolution and other complex
|
||||
behaviors.
|
||||
|
||||
## Expected behavior
|
||||
|
||||
Running from the repo root should pick up and enforce the appropriate settings for each package:
|
||||
|
||||
```console
|
||||
∴ cargo run -p ruff_cli -- check crates/ruff/resources/test/project/
|
||||
crates/ruff/resources/test/project/examples/.dotfiles/script.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
crates/ruff/resources/test/project/examples/.dotfiles/script.py:1:8: F401 [*] `numpy` imported but unused
|
||||
crates/ruff/resources/test/project/examples/.dotfiles/script.py:2:17: F401 [*] `app.app_file` imported but unused
|
||||
crates/ruff/resources/test/project/examples/docs/docs/file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
crates/ruff/resources/test/project/examples/docs/docs/file.py:8:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
crates/ruff/resources/test/project/project/file.py:1:8: F401 [*] `os` imported but unused
|
||||
crates/ruff/resources/test/project/project/import_file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
Found 7 errors.
|
||||
[*] 7 potentially fixable with the --fix option.
|
||||
```
|
||||
|
||||
Running from the project directory itself should exhibit the same behavior:
|
||||
|
||||
```console
|
||||
∴ (cd crates/ruff/resources/test/project/ && cargo run -p ruff_cli -- check .)
|
||||
examples/.dotfiles/script.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
examples/.dotfiles/script.py:1:8: F401 [*] `numpy` imported but unused
|
||||
examples/.dotfiles/script.py:2:17: F401 [*] `app.app_file` imported but unused
|
||||
examples/docs/docs/file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
examples/docs/docs/file.py:8:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
project/file.py:1:8: F401 [*] `os` imported but unused
|
||||
project/import_file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
Found 7 errors.
|
||||
[*] 7 potentially fixable with the --fix option.
|
||||
```
|
||||
|
||||
Running from the sub-package directory should exhibit the same behavior, but omit the top-level
|
||||
files:
|
||||
|
||||
```console
|
||||
∴ (cd crates/ruff/resources/test/project/examples/docs && cargo run -p ruff_cli -- check .)
|
||||
docs/file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
docs/file.py:8:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
Found 2 errors.
|
||||
[*] 2 potentially fixable with the --fix option.
|
||||
```
|
||||
|
||||
`--config` should force Ruff to use the specified `pyproject.toml` for all files, and resolve
|
||||
file paths from the current working directory:
|
||||
|
||||
```console
|
||||
∴ (cargo run -p ruff_cli -- check --config=crates/ruff/resources/test/project/pyproject.toml crates/ruff/resources/test/project/)
|
||||
crates/ruff/resources/test/project/examples/.dotfiles/script.py:1:8: F401 [*] `numpy` imported but unused
|
||||
crates/ruff/resources/test/project/examples/.dotfiles/script.py:2:17: F401 [*] `app.app_file` imported but unused
|
||||
crates/ruff/resources/test/project/examples/docs/docs/concepts/file.py:1:8: F401 [*] `os` imported but unused
|
||||
crates/ruff/resources/test/project/examples/docs/docs/file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
crates/ruff/resources/test/project/examples/docs/docs/file.py:1:8: F401 [*] `os` imported but unused
|
||||
crates/ruff/resources/test/project/examples/docs/docs/file.py:3:8: F401 [*] `numpy` imported but unused
|
||||
crates/ruff/resources/test/project/examples/docs/docs/file.py:4:27: F401 [*] `docs.concepts.file` imported but unused
|
||||
crates/ruff/resources/test/project/examples/excluded/script.py:1:8: F401 [*] `os` imported but unused
|
||||
crates/ruff/resources/test/project/project/file.py:1:8: F401 [*] `os` imported but unused
|
||||
Found 9 errors.
|
||||
[*] 9 potentially fixable with the --fix option.
|
||||
```
|
||||
|
||||
Running from a parent directory should "ignore" the `exclude` (hence, `concepts/file.py` gets
|
||||
included in the output):
|
||||
|
||||
```console
|
||||
∴ (cd crates/ruff/resources/test/project/examples && cargo run -p ruff_cli -- check --config=docs/ruff.toml .)
|
||||
docs/docs/concepts/file.py:5:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
docs/docs/file.py:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
docs/docs/file.py:8:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
excluded/script.py:5:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
Found 4 errors.
|
||||
[*] 4 potentially fixable with the --fix option.
|
||||
```
|
||||
|
||||
Passing an excluded directory directly should report errors in the contained files:
|
||||
|
||||
```console
|
||||
∴ cargo run -p ruff_cli -- check crates/ruff/resources/test/project/examples/excluded/
|
||||
crates/ruff/resources/test/project/examples/excluded/script.py:1:8: F401 [*] `os` imported but unused
|
||||
Found 1 error.
|
||||
[*] 1 potentially fixable with the --fix option.
|
||||
```
|
||||
|
||||
Unless we `--force-exclude`:
|
||||
|
||||
```console
|
||||
∴ cargo run -p ruff_cli -- check crates/ruff/resources/test/project/examples/excluded/ --force-exclude
|
||||
warning: No Python files found under the given path(s)
|
||||
∴ cargo run -p ruff_cli -- check crates/ruff/resources/test/project/examples/excluded/script.py --force-exclude
|
||||
warning: No Python files found under the given path(s)
|
||||
```
|
||||
@@ -1,398 +0,0 @@
|
||||
use itertools::Itertools;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use rustc_hash::{FxHashMap, FxHashSet};
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Edit, Fix, IsolationLevel};
|
||||
use ruff_source_file::Locator;
|
||||
|
||||
use crate::autofix::source_map::SourceMap;
|
||||
use crate::linter::FixTable;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
|
||||
pub(crate) mod codemods;
|
||||
pub(crate) mod edits;
|
||||
pub(crate) mod snippet;
|
||||
pub(crate) mod source_map;
|
||||
|
||||
pub(crate) struct FixResult {
|
||||
/// The resulting source code, after applying all fixes.
|
||||
pub(crate) code: String,
|
||||
/// The number of fixes applied for each [`Rule`].
|
||||
pub(crate) fixes: FixTable,
|
||||
/// Source map for the fixed source code.
|
||||
pub(crate) source_map: SourceMap,
|
||||
}
|
||||
|
||||
/// Auto-fix errors in a file, and write the fixed source code to disk.
|
||||
pub(crate) fn fix_file(diagnostics: &[Diagnostic], locator: &Locator) -> Option<FixResult> {
|
||||
let mut with_fixes = diagnostics
|
||||
.iter()
|
||||
.filter(|diag| diag.fix.is_some())
|
||||
.peekable();
|
||||
|
||||
if with_fixes.peek().is_none() {
|
||||
None
|
||||
} else {
|
||||
Some(apply_fixes(with_fixes, locator))
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a series of fixes.
|
||||
fn apply_fixes<'a>(
|
||||
diagnostics: impl Iterator<Item = &'a Diagnostic>,
|
||||
locator: &'a Locator<'a>,
|
||||
) -> FixResult {
|
||||
let mut output = String::with_capacity(locator.len());
|
||||
let mut last_pos: Option<TextSize> = None;
|
||||
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
|
||||
let mut isolated: FxHashSet<u32> = FxHashSet::default();
|
||||
let mut fixed = FxHashMap::default();
|
||||
let mut source_map = SourceMap::default();
|
||||
|
||||
for (rule, fix) in diagnostics
|
||||
.filter_map(|diagnostic| {
|
||||
diagnostic
|
||||
.fix
|
||||
.as_ref()
|
||||
.map(|fix| (diagnostic.kind.rule(), fix))
|
||||
})
|
||||
.sorted_by(|(rule1, fix1), (rule2, fix2)| cmp_fix(*rule1, *rule2, fix1, fix2))
|
||||
{
|
||||
let mut edits = fix
|
||||
.edits()
|
||||
.iter()
|
||||
.filter(|edit| !applied.contains(edit))
|
||||
.peekable();
|
||||
|
||||
// If the fix contains at least one new edit, enforce isolation and positional requirements.
|
||||
if let Some(first) = edits.peek() {
|
||||
// If this fix requires isolation, and we've already applied another fix in the
|
||||
// same isolation group, skip it.
|
||||
if let IsolationLevel::Group(id) = fix.isolation() {
|
||||
if !isolated.insert(id) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// If this fix overlaps with a fix we've already applied, skip it.
|
||||
if last_pos.is_some_and(|last_pos| last_pos >= first.start()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let mut applied_edits = Vec::with_capacity(fix.edits().len());
|
||||
for edit in edits {
|
||||
// Add all contents from `last_pos` to `fix.location`.
|
||||
let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
|
||||
output.push_str(slice);
|
||||
|
||||
// Add the start source marker for the patch.
|
||||
source_map.push_start_marker(edit, output.text_len());
|
||||
|
||||
// Add the patch itself.
|
||||
output.push_str(edit.content().unwrap_or_default());
|
||||
|
||||
// Add the end source marker for the added patch.
|
||||
source_map.push_end_marker(edit, output.text_len());
|
||||
|
||||
// Track that the edit was applied.
|
||||
last_pos = Some(edit.end());
|
||||
applied_edits.push(edit);
|
||||
}
|
||||
|
||||
applied.extend(applied_edits.drain(..));
|
||||
*fixed.entry(rule).or_default() += 1;
|
||||
}
|
||||
|
||||
// Add the remaining content.
|
||||
let slice = locator.after(last_pos.unwrap_or_default());
|
||||
output.push_str(slice);
|
||||
|
||||
FixResult {
|
||||
code: output,
|
||||
fixes: fixed,
|
||||
source_map,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compare two fixes.
|
||||
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
|
||||
fix1.min_start()
|
||||
.cmp(&fix2.min_start())
|
||||
.then_with(|| match (&rule1, &rule2) {
|
||||
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
|
||||
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
|
||||
(Rule::NewLineAfterLastParagraph, Rule::EndsInPeriod) => std::cmp::Ordering::Greater,
|
||||
// Apply `IfElseBlockInsteadOfDictGet` fixes before `IfElseBlockInsteadOfIfExp` fixes.
|
||||
(Rule::IfElseBlockInsteadOfDictGet, Rule::IfElseBlockInsteadOfIfExp) => {
|
||||
std::cmp::Ordering::Less
|
||||
}
|
||||
(Rule::IfElseBlockInsteadOfIfExp, Rule::IfElseBlockInsteadOfDictGet) => {
|
||||
std::cmp::Ordering::Greater
|
||||
}
|
||||
_ => std::cmp::Ordering::Equal,
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ruff_text_size::TextSize;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_diagnostics::Fix;
|
||||
use ruff_source_file::Locator;
|
||||
|
||||
use crate::autofix::source_map::SourceMarker;
|
||||
use crate::autofix::{apply_fixes, FixResult};
|
||||
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
|
||||
|
||||
#[allow(deprecated)]
|
||||
fn create_diagnostics(edit: impl IntoIterator<Item = Edit>) -> Vec<Diagnostic> {
|
||||
edit.into_iter()
|
||||
.map(|edit| Diagnostic {
|
||||
// The choice of rule here is arbitrary.
|
||||
kind: MissingNewlineAtEndOfFile.into(),
|
||||
range: edit.range(),
|
||||
fix: Some(Fix::unspecified(edit)),
|
||||
parent: None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_file() {
|
||||
let locator = Locator::new(r#""#);
|
||||
let diagnostics = create_diagnostics([]);
|
||||
let FixResult {
|
||||
code,
|
||||
fixes,
|
||||
source_map,
|
||||
} = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(code, "");
|
||||
assert_eq!(fixes.values().sum::<usize>(), 0);
|
||||
assert!(source_map.markers().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn apply_one_insertion() {
|
||||
let locator = Locator::new(
|
||||
r#"
|
||||
import os
|
||||
|
||||
print("hello world")
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([Edit::insertion(
|
||||
"import sys\n".to_string(),
|
||||
TextSize::new(10),
|
||||
)]);
|
||||
let FixResult {
|
||||
code,
|
||||
fixes,
|
||||
source_map,
|
||||
} = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
code,
|
||||
r#"
|
||||
import os
|
||||
import sys
|
||||
|
||||
print("hello world")
|
||||
"#
|
||||
.trim()
|
||||
);
|
||||
assert_eq!(fixes.values().sum::<usize>(), 1);
|
||||
assert_eq!(
|
||||
source_map.markers(),
|
||||
&[
|
||||
SourceMarker {
|
||||
source: 10.into(),
|
||||
dest: 10.into(),
|
||||
},
|
||||
SourceMarker {
|
||||
source: 10.into(),
|
||||
dest: 21.into(),
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn apply_one_replacement() {
|
||||
let locator = Locator::new(
|
||||
r#"
|
||||
class A(object):
|
||||
...
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([Edit::replacement(
|
||||
"Bar".to_string(),
|
||||
TextSize::new(8),
|
||||
TextSize::new(14),
|
||||
)]);
|
||||
let FixResult {
|
||||
code,
|
||||
fixes,
|
||||
source_map,
|
||||
} = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
code,
|
||||
r#"
|
||||
class A(Bar):
|
||||
...
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
assert_eq!(fixes.values().sum::<usize>(), 1);
|
||||
assert_eq!(
|
||||
source_map.markers(),
|
||||
&[
|
||||
SourceMarker {
|
||||
source: 8.into(),
|
||||
dest: 8.into(),
|
||||
},
|
||||
SourceMarker {
|
||||
source: 14.into(),
|
||||
dest: 11.into(),
|
||||
},
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn apply_one_removal() {
|
||||
let locator = Locator::new(
|
||||
r#"
|
||||
class A(object):
|
||||
...
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([Edit::deletion(TextSize::new(7), TextSize::new(15))]);
|
||||
let FixResult {
|
||||
code,
|
||||
fixes,
|
||||
source_map,
|
||||
} = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
code,
|
||||
r#"
|
||||
class A:
|
||||
...
|
||||
"#
|
||||
.trim()
|
||||
);
|
||||
assert_eq!(fixes.values().sum::<usize>(), 1);
|
||||
assert_eq!(
|
||||
source_map.markers(),
|
||||
&[
|
||||
SourceMarker {
|
||||
source: 7.into(),
|
||||
dest: 7.into()
|
||||
},
|
||||
SourceMarker {
|
||||
source: 15.into(),
|
||||
dest: 7.into()
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn apply_two_removals() {
|
||||
let locator = Locator::new(
|
||||
r#"
|
||||
class A(object, object, object):
|
||||
...
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([
|
||||
Edit::deletion(TextSize::from(8), TextSize::from(16)),
|
||||
Edit::deletion(TextSize::from(22), TextSize::from(30)),
|
||||
]);
|
||||
let FixResult {
|
||||
code,
|
||||
fixes,
|
||||
source_map,
|
||||
} = apply_fixes(diagnostics.iter(), &locator);
|
||||
|
||||
assert_eq!(
|
||||
code,
|
||||
r#"
|
||||
class A(object):
|
||||
...
|
||||
"#
|
||||
.trim()
|
||||
);
|
||||
assert_eq!(fixes.values().sum::<usize>(), 2);
|
||||
assert_eq!(
|
||||
source_map.markers(),
|
||||
&[
|
||||
SourceMarker {
|
||||
source: 8.into(),
|
||||
dest: 8.into()
|
||||
},
|
||||
SourceMarker {
|
||||
source: 16.into(),
|
||||
dest: 8.into()
|
||||
},
|
||||
SourceMarker {
|
||||
source: 22.into(),
|
||||
dest: 14.into(),
|
||||
},
|
||||
SourceMarker {
|
||||
source: 30.into(),
|
||||
dest: 14.into(),
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignore_overlapping_fixes() {
|
||||
let locator = Locator::new(
|
||||
r#"
|
||||
class A(object):
|
||||
...
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
let diagnostics = create_diagnostics([
|
||||
Edit::deletion(TextSize::from(7), TextSize::from(15)),
|
||||
Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
|
||||
]);
|
||||
let FixResult {
|
||||
code,
|
||||
fixes,
|
||||
source_map,
|
||||
} = apply_fixes(diagnostics.iter(), &locator);
|
||||
assert_eq!(
|
||||
code,
|
||||
r#"
|
||||
class A:
|
||||
...
|
||||
"#
|
||||
.trim(),
|
||||
);
|
||||
assert_eq!(fixes.values().sum::<usize>(), 1);
|
||||
assert_eq!(
|
||||
source_map.markers(),
|
||||
&[
|
||||
SourceMarker {
|
||||
source: 7.into(),
|
||||
dest: 7.into(),
|
||||
},
|
||||
SourceMarker {
|
||||
source: 15.into(),
|
||||
dest: 7.into(),
|
||||
}
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
use ruff_text_size::TextSize;
|
||||
|
||||
use ruff_diagnostics::Edit;
|
||||
|
||||
/// Lightweight sourcemap marker representing the source and destination
|
||||
/// position for an [`Edit`].
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub(crate) struct SourceMarker {
|
||||
/// Position of the marker in the original source.
|
||||
pub(crate) source: TextSize,
|
||||
/// Position of the marker in the transformed code.
|
||||
pub(crate) dest: TextSize,
|
||||
}
|
||||
|
||||
/// A collection of [`SourceMarker`].
|
||||
///
|
||||
/// Sourcemaps are used to map positions in the original source to positions in
|
||||
/// the transformed code. Here, only the boundaries of edits are tracked instead
|
||||
/// of every single character.
|
||||
#[derive(Default, PartialEq, Eq)]
|
||||
pub(crate) struct SourceMap(Vec<SourceMarker>);
|
||||
|
||||
impl SourceMap {
|
||||
/// Returns a slice of all the markers in the sourcemap in the order they
|
||||
/// were added.
|
||||
pub(crate) fn markers(&self) -> &[SourceMarker] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// Push the start marker for an [`Edit`].
|
||||
///
|
||||
/// The `output_length` is the length of the transformed string before the
|
||||
/// edit is applied.
|
||||
pub(crate) fn push_start_marker(&mut self, edit: &Edit, output_length: TextSize) {
|
||||
self.0.push(SourceMarker {
|
||||
source: edit.start(),
|
||||
dest: output_length,
|
||||
});
|
||||
}
|
||||
|
||||
/// Push the end marker for an [`Edit`].
|
||||
///
|
||||
/// The `output_length` is the length of the transformed string after the
|
||||
/// edit has been applied.
|
||||
pub(crate) fn push_end_marker(&mut self, edit: &Edit, output_length: TextSize) {
|
||||
if edit.is_insertion() {
|
||||
self.0.push(SourceMarker {
|
||||
source: edit.start(),
|
||||
dest: output_length,
|
||||
});
|
||||
} else {
|
||||
// Deletion or replacement
|
||||
self.0.push(SourceMarker {
|
||||
source: edit.end(),
|
||||
dest: output_length,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,134 +0,0 @@
|
||||
//! Lint rules based on import analysis.
|
||||
use std::borrow::Cow;
|
||||
use std::path::Path;
|
||||
|
||||
use ruff_python_ast::{self as ast, PySourceType, Ranged, Stmt, Suite};
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::helpers::to_module_path;
|
||||
use ruff_python_ast::imports::{ImportMap, ModuleImport};
|
||||
use ruff_python_ast::statement_visitor::StatementVisitor;
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_index::Indexer;
|
||||
|
||||
use ruff_source_file::Locator;
|
||||
|
||||
use crate::directives::IsortDirectives;
|
||||
use crate::registry::Rule;
|
||||
use crate::rules::isort;
|
||||
use crate::rules::isort::block::{Block, BlockBuilder};
|
||||
use crate::settings::Settings;
|
||||
use crate::source_kind::SourceKind;
|
||||
|
||||
fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) -> Option<ImportMap> {
|
||||
let Some(package) = package else {
|
||||
return None;
|
||||
};
|
||||
let Some(module_path) = to_module_path(package, path) else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let num_imports = blocks.iter().map(|block| block.imports.len()).sum();
|
||||
let mut module_imports = Vec::with_capacity(num_imports);
|
||||
for stmt in blocks.iter().flat_map(|block| &block.imports) {
|
||||
match stmt {
|
||||
Stmt::Import(ast::StmtImport { names, range: _ }) => {
|
||||
module_imports.extend(
|
||||
names
|
||||
.iter()
|
||||
.map(|name| ModuleImport::new(name.name.to_string(), stmt.range())),
|
||||
);
|
||||
}
|
||||
Stmt::ImportFrom(ast::StmtImportFrom {
|
||||
module,
|
||||
names,
|
||||
level,
|
||||
range: _,
|
||||
}) => {
|
||||
let level = level.map_or(0, |level| level.to_usize());
|
||||
let module = if let Some(module) = module {
|
||||
let module: &String = module.as_ref();
|
||||
if level == 0 {
|
||||
Cow::Borrowed(module)
|
||||
} else {
|
||||
if module_path.len() <= level {
|
||||
continue;
|
||||
}
|
||||
let prefix = module_path[..module_path.len() - level].join(".");
|
||||
Cow::Owned(format!("{prefix}.{module}"))
|
||||
}
|
||||
} else {
|
||||
if module_path.len() <= level {
|
||||
continue;
|
||||
}
|
||||
Cow::Owned(module_path[..module_path.len() - level].join("."))
|
||||
};
|
||||
module_imports.extend(names.iter().map(|name| {
|
||||
ModuleImport::new(format!("{}.{}", module, name.name), name.range())
|
||||
}));
|
||||
}
|
||||
_ => panic!("Expected Stmt::Import | Stmt::ImportFrom"),
|
||||
}
|
||||
}
|
||||
|
||||
let mut import_map = ImportMap::default();
|
||||
import_map.insert(module_path.join("."), module_imports);
|
||||
Some(import_map)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn check_imports(
|
||||
python_ast: &Suite,
|
||||
locator: &Locator,
|
||||
indexer: &Indexer,
|
||||
directives: &IsortDirectives,
|
||||
settings: &Settings,
|
||||
stylist: &Stylist,
|
||||
path: &Path,
|
||||
package: Option<&Path>,
|
||||
source_kind: Option<&SourceKind>,
|
||||
source_type: PySourceType,
|
||||
) -> (Vec<Diagnostic>, Option<ImportMap>) {
|
||||
// Extract all import blocks from the AST.
|
||||
let tracker = {
|
||||
let mut tracker =
|
||||
BlockBuilder::new(locator, directives, source_type.is_stub(), source_kind);
|
||||
tracker.visit_body(python_ast);
|
||||
tracker
|
||||
};
|
||||
let blocks: Vec<&Block> = tracker.iter().collect();
|
||||
|
||||
// Enforce import rules.
|
||||
let mut diagnostics = vec![];
|
||||
if settings.rules.enabled(Rule::UnsortedImports) {
|
||||
for block in &blocks {
|
||||
if !block.imports.is_empty() {
|
||||
if let Some(diagnostic) = isort::rules::organize_imports(
|
||||
block,
|
||||
locator,
|
||||
stylist,
|
||||
indexer,
|
||||
settings,
|
||||
package,
|
||||
source_type,
|
||||
) {
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if settings.rules.enabled(Rule::MissingRequiredImport) {
|
||||
diagnostics.extend(isort::rules::add_required_imports(
|
||||
python_ast,
|
||||
locator,
|
||||
stylist,
|
||||
settings,
|
||||
source_type,
|
||||
));
|
||||
}
|
||||
|
||||
// Extract import map.
|
||||
let imports = extract_import_map(path, package, &blocks);
|
||||
|
||||
(diagnostics, imports)
|
||||
}
|
||||
@@ -1,243 +0,0 @@
|
||||
//! `NoQA` enforcement and validation.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use itertools::Itertools;
|
||||
use ruff_python_ast::Ranged;
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Edit, Fix};
|
||||
use ruff_source_file::Locator;
|
||||
|
||||
use crate::noqa;
|
||||
use crate::noqa::{Directive, FileExemption, NoqaDirectives, NoqaMapping};
|
||||
use crate::registry::{AsRule, Rule};
|
||||
use crate::rule_redirects::get_redirect_target;
|
||||
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
|
||||
use crate::settings::Settings;
|
||||
|
||||
pub(crate) fn check_noqa(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
path: &Path,
|
||||
locator: &Locator,
|
||||
comment_ranges: &[TextRange],
|
||||
noqa_line_for: &NoqaMapping,
|
||||
analyze_directives: bool,
|
||||
settings: &Settings,
|
||||
) -> Vec<usize> {
|
||||
// Identify any codes that are globally exempted (within the current file).
|
||||
let exemption = FileExemption::try_extract(locator.contents(), comment_ranges, path, locator);
|
||||
|
||||
// Extract all `noqa` directives.
|
||||
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, path, locator);
|
||||
|
||||
// Indices of diagnostics that were ignored by a `noqa` directive.
|
||||
let mut ignored_diagnostics = vec![];
|
||||
|
||||
// Remove any ignored diagnostics.
|
||||
'outer: for (index, diagnostic) in diagnostics.iter().enumerate() {
|
||||
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
|
||||
continue;
|
||||
}
|
||||
|
||||
match &exemption {
|
||||
Some(FileExemption::All) => {
|
||||
// If the file is exempted, ignore all diagnostics.
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
Some(FileExemption::Codes(codes)) => {
|
||||
// If the diagnostic is ignored by a global exemption, ignore it.
|
||||
if codes.contains(&diagnostic.kind.rule().noqa_code()) {
|
||||
ignored_diagnostics.push(index);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
let noqa_offsets = diagnostic
|
||||
.parent
|
||||
.into_iter()
|
||||
.chain(std::iter::once(diagnostic.start()))
|
||||
.map(|position| noqa_line_for.resolve(position))
|
||||
.unique();
|
||||
|
||||
for noqa_offset in noqa_offsets {
|
||||
if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
|
||||
{
|
||||
let suppressed = match &directive_line.directive {
|
||||
Directive::All(_) => {
|
||||
directive_line
|
||||
.matches
|
||||
.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
true
|
||||
}
|
||||
Directive::Codes(directive) => {
|
||||
if noqa::includes(diagnostic.kind.rule(), directive.codes()) {
|
||||
directive_line
|
||||
.matches
|
||||
.push(diagnostic.kind.rule().noqa_code());
|
||||
ignored_diagnostics.push(index);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if suppressed {
|
||||
continue 'outer;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce that the noqa directive was actually used (RUF100), unless RUF100 was itself
|
||||
// suppressed.
|
||||
if settings.rules.enabled(Rule::UnusedNOQA)
|
||||
&& analyze_directives
|
||||
&& !exemption.is_some_and(|exemption| match exemption {
|
||||
FileExemption::All => true,
|
||||
FileExemption::Codes(codes) => codes.contains(&Rule::UnusedNOQA.noqa_code()),
|
||||
})
|
||||
{
|
||||
for line in noqa_directives.lines() {
|
||||
match &line.directive {
|
||||
Directive::All(directive) => {
|
||||
if line.matches.is_empty() {
|
||||
let mut diagnostic =
|
||||
Diagnostic::new(UnusedNOQA { codes: None }, directive.range());
|
||||
if settings.rules.should_fix(diagnostic.kind.rule()) {
|
||||
diagnostic
|
||||
.set_fix(Fix::automatic(delete_noqa(directive.range(), locator)));
|
||||
}
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
Directive::Codes(directive) => {
|
||||
let mut disabled_codes = vec![];
|
||||
let mut unknown_codes = vec![];
|
||||
let mut unmatched_codes = vec![];
|
||||
let mut valid_codes = vec![];
|
||||
let mut self_ignore = false;
|
||||
for code in directive.codes() {
|
||||
let code = get_redirect_target(code).unwrap_or(code);
|
||||
if Rule::UnusedNOQA.noqa_code() == code {
|
||||
self_ignore = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if line.matches.iter().any(|match_| *match_ == code)
|
||||
|| settings.external.contains(code)
|
||||
{
|
||||
valid_codes.push(code);
|
||||
} else {
|
||||
if let Ok(rule) = Rule::from_code(code) {
|
||||
if settings.rules.enabled(rule) {
|
||||
unmatched_codes.push(code);
|
||||
} else {
|
||||
disabled_codes.push(code);
|
||||
}
|
||||
} else {
|
||||
unknown_codes.push(code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self_ignore {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !(disabled_codes.is_empty()
|
||||
&& unknown_codes.is_empty()
|
||||
&& unmatched_codes.is_empty())
|
||||
{
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
UnusedNOQA {
|
||||
codes: Some(UnusedCodes {
|
||||
disabled: disabled_codes
|
||||
.iter()
|
||||
.map(|code| (*code).to_string())
|
||||
.collect(),
|
||||
unknown: unknown_codes
|
||||
.iter()
|
||||
.map(|code| (*code).to_string())
|
||||
.collect(),
|
||||
unmatched: unmatched_codes
|
||||
.iter()
|
||||
.map(|code| (*code).to_string())
|
||||
.collect(),
|
||||
}),
|
||||
},
|
||||
directive.range(),
|
||||
);
|
||||
if settings.rules.should_fix(diagnostic.kind.rule()) {
|
||||
if valid_codes.is_empty() {
|
||||
diagnostic.set_fix(Fix::automatic(delete_noqa(
|
||||
directive.range(),
|
||||
locator,
|
||||
)));
|
||||
} else {
|
||||
diagnostic.set_fix(Fix::automatic(Edit::range_replacement(
|
||||
format!("# noqa: {}", valid_codes.join(", ")),
|
||||
directive.range(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
diagnostics.push(diagnostic);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ignored_diagnostics.sort_unstable();
|
||||
ignored_diagnostics
|
||||
}
|
||||
|
||||
/// Generate a [`Edit`] to delete a `noqa` directive.
|
||||
fn delete_noqa(range: TextRange, locator: &Locator) -> Edit {
|
||||
let line_range = locator.line_range(range.start());
|
||||
|
||||
// Compute the leading space.
|
||||
let prefix = locator.slice(TextRange::new(line_range.start(), range.start()));
|
||||
let leading_space = prefix
|
||||
.rfind(|c: char| !c.is_whitespace())
|
||||
.map_or(prefix.len(), |i| prefix.len() - i - 1);
|
||||
let leading_space_len = TextSize::try_from(leading_space).unwrap();
|
||||
|
||||
// Compute the trailing space.
|
||||
let suffix = locator.slice(TextRange::new(range.end(), line_range.end()));
|
||||
let trailing_space = suffix
|
||||
.find(|c: char| !c.is_whitespace())
|
||||
.map_or(suffix.len(), |i| i);
|
||||
let trailing_space_len = TextSize::try_from(trailing_space).unwrap();
|
||||
|
||||
// Ex) `# noqa`
|
||||
if line_range
|
||||
== TextRange::new(
|
||||
range.start() - leading_space_len,
|
||||
range.end() + trailing_space_len,
|
||||
)
|
||||
{
|
||||
let full_line_end = locator.full_line_end(line_range.end());
|
||||
Edit::deletion(line_range.start(), full_line_end)
|
||||
}
|
||||
// Ex) `x = 1 # noqa`
|
||||
else if range.end() + trailing_space_len == line_range.end() {
|
||||
Edit::deletion(range.start() - leading_space_len, line_range.end())
|
||||
}
|
||||
// Ex) `x = 1 # noqa # type: ignore`
|
||||
else if locator.contents()[usize::from(range.end() + trailing_space_len)..].starts_with('#') {
|
||||
Edit::deletion(range.start(), range.end() + trailing_space_len)
|
||||
}
|
||||
// Ex) `x = 1 # noqa here`
|
||||
else {
|
||||
Edit::deletion(
|
||||
range.start() + "# ".text_len(),
|
||||
range.end() + trailing_space_len,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
None
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
None
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
Some(
|
||||
ShebangDirective(
|
||||
"/usr/bin/env python",
|
||||
),
|
||||
)
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
Some(
|
||||
ShebangDirective(
|
||||
"/usr/bin/env python # trailing comment",
|
||||
),
|
||||
)
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/comments/shebang.rs
|
||||
expression: "ShebangDirective::try_extract(source)"
|
||||
---
|
||||
None
|
||||
@@ -1,38 +0,0 @@
|
||||
use libcst_native::{Expression, NameOrAttribute};
|
||||
|
||||
fn compose_call_path_inner<'a>(expr: &'a Expression, parts: &mut Vec<&'a str>) {
|
||||
match expr {
|
||||
Expression::Call(expr) => {
|
||||
compose_call_path_inner(&expr.func, parts);
|
||||
}
|
||||
Expression::Attribute(expr) => {
|
||||
compose_call_path_inner(&expr.value, parts);
|
||||
parts.push(expr.attr.value);
|
||||
}
|
||||
Expression::Name(expr) => {
|
||||
parts.push(expr.value);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compose_call_path(expr: &Expression) -> Option<String> {
|
||||
let mut segments = vec![];
|
||||
compose_call_path_inner(expr, &mut segments);
|
||||
if segments.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(segments.join("."))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compose_module_path(module: &NameOrAttribute) -> String {
|
||||
match module {
|
||||
NameOrAttribute::N(name) => name.value.to_string(),
|
||||
NameOrAttribute::A(attr) => {
|
||||
let name = attr.attr.value;
|
||||
let prefix = compose_call_path(&attr.value);
|
||||
prefix.map_or_else(|| name.to_string(), |prefix| format!("{prefix}.{name}"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::ops::Deref;
|
||||
|
||||
use ruff_python_ast::{Expr, Ranged};
|
||||
use ruff_python_semantic::Definition;
|
||||
use ruff_text_size::TextRange;
|
||||
|
||||
pub(crate) mod extraction;
|
||||
pub(crate) mod google;
|
||||
pub(crate) mod numpy;
|
||||
pub(crate) mod sections;
|
||||
pub(crate) mod styles;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Docstring<'a> {
|
||||
pub(crate) definition: &'a Definition<'a>,
|
||||
pub(crate) expr: &'a Expr,
|
||||
/// The content of the docstring, including the leading and trailing quotes.
|
||||
pub(crate) contents: &'a str,
|
||||
/// The range of the docstring body (without the quotes). The range is relative to [`Self::contents`].
|
||||
pub(crate) body_range: TextRange,
|
||||
pub(crate) indentation: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> Docstring<'a> {
|
||||
pub(crate) fn body(&self) -> DocstringBody {
|
||||
DocstringBody { docstring: self }
|
||||
}
|
||||
|
||||
pub(crate) fn leading_quote(&self) -> &'a str {
|
||||
&self.contents[TextRange::up_to(self.body_range.start())]
|
||||
}
|
||||
}
|
||||
|
||||
impl Ranged for Docstring<'_> {
|
||||
fn range(&self) -> TextRange {
|
||||
self.expr.range()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub(crate) struct DocstringBody<'a> {
|
||||
docstring: &'a Docstring<'a>,
|
||||
}
|
||||
|
||||
impl<'a> DocstringBody<'a> {
|
||||
pub(crate) fn as_str(self) -> &'a str {
|
||||
&self.docstring.contents[self.docstring.body_range]
|
||||
}
|
||||
}
|
||||
|
||||
impl Ranged for DocstringBody<'_> {
|
||||
fn range(&self) -> TextRange {
|
||||
self.docstring.body_range + self.docstring.start()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for DocstringBody<'_> {
|
||||
type Target = str;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for DocstringBody<'_> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DocstringBody")
|
||||
.field("text", &self.as_str())
|
||||
.field("range", &self.range())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -1,438 +0,0 @@
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::iter::FusedIterator;
|
||||
|
||||
use ruff_python_ast::docstrings::{leading_space, leading_words};
|
||||
use ruff_python_ast::Ranged;
|
||||
use ruff_text_size::{TextLen, TextRange, TextSize};
|
||||
use strum_macros::EnumIter;
|
||||
|
||||
use ruff_source_file::{Line, UniversalNewlineIterator, UniversalNewlines};
|
||||
|
||||
use crate::docstrings::styles::SectionStyle;
|
||||
use crate::docstrings::{Docstring, DocstringBody};
|
||||
|
||||
#[derive(EnumIter, PartialEq, Eq, Debug, Clone, Copy)]
|
||||
pub(crate) enum SectionKind {
|
||||
Args,
|
||||
Arguments,
|
||||
Attention,
|
||||
Attributes,
|
||||
Caution,
|
||||
Danger,
|
||||
Error,
|
||||
Example,
|
||||
Examples,
|
||||
ExtendedSummary,
|
||||
Hint,
|
||||
Important,
|
||||
KeywordArgs,
|
||||
KeywordArguments,
|
||||
Methods,
|
||||
Note,
|
||||
Notes,
|
||||
OtherArgs,
|
||||
OtherArguments,
|
||||
OtherParams,
|
||||
OtherParameters,
|
||||
Parameters,
|
||||
Raises,
|
||||
References,
|
||||
Return,
|
||||
Returns,
|
||||
SeeAlso,
|
||||
ShortSummary,
|
||||
Tip,
|
||||
Todo,
|
||||
Warning,
|
||||
Warnings,
|
||||
Warns,
|
||||
Yield,
|
||||
Yields,
|
||||
}
|
||||
|
||||
impl SectionKind {
|
||||
pub(crate) fn from_str(s: &str) -> Option<Self> {
|
||||
match s.to_ascii_lowercase().as_str() {
|
||||
"args" => Some(Self::Args),
|
||||
"arguments" => Some(Self::Arguments),
|
||||
"attention" => Some(Self::Attention),
|
||||
"attributes" => Some(Self::Attributes),
|
||||
"caution" => Some(Self::Caution),
|
||||
"danger" => Some(Self::Danger),
|
||||
"error" => Some(Self::Error),
|
||||
"example" => Some(Self::Example),
|
||||
"examples" => Some(Self::Examples),
|
||||
"extended summary" => Some(Self::ExtendedSummary),
|
||||
"hint" => Some(Self::Hint),
|
||||
"important" => Some(Self::Important),
|
||||
"keyword args" => Some(Self::KeywordArgs),
|
||||
"keyword arguments" => Some(Self::KeywordArguments),
|
||||
"methods" => Some(Self::Methods),
|
||||
"note" => Some(Self::Note),
|
||||
"notes" => Some(Self::Notes),
|
||||
"other args" => Some(Self::OtherArgs),
|
||||
"other arguments" => Some(Self::OtherArguments),
|
||||
"other params" => Some(Self::OtherParams),
|
||||
"other parameters" => Some(Self::OtherParameters),
|
||||
"parameters" => Some(Self::Parameters),
|
||||
"raises" => Some(Self::Raises),
|
||||
"references" => Some(Self::References),
|
||||
"return" => Some(Self::Return),
|
||||
"returns" => Some(Self::Returns),
|
||||
"see also" => Some(Self::SeeAlso),
|
||||
"short summary" => Some(Self::ShortSummary),
|
||||
"tip" => Some(Self::Tip),
|
||||
"todo" => Some(Self::Todo),
|
||||
"warning" => Some(Self::Warning),
|
||||
"warnings" => Some(Self::Warnings),
|
||||
"warns" => Some(Self::Warns),
|
||||
"yield" => Some(Self::Yield),
|
||||
"yields" => Some(Self::Yields),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Args => "Args",
|
||||
Self::Arguments => "Arguments",
|
||||
Self::Attention => "Attention",
|
||||
Self::Attributes => "Attributes",
|
||||
Self::Caution => "Caution",
|
||||
Self::Danger => "Danger",
|
||||
Self::Error => "Error",
|
||||
Self::Example => "Example",
|
||||
Self::Examples => "Examples",
|
||||
Self::ExtendedSummary => "Extended Summary",
|
||||
Self::Hint => "Hint",
|
||||
Self::Important => "Important",
|
||||
Self::KeywordArgs => "Keyword Args",
|
||||
Self::KeywordArguments => "Keyword Arguments",
|
||||
Self::Methods => "Methods",
|
||||
Self::Note => "Note",
|
||||
Self::Notes => "Notes",
|
||||
Self::OtherArgs => "Other Args",
|
||||
Self::OtherArguments => "Other Arguments",
|
||||
Self::OtherParams => "Other Params",
|
||||
Self::OtherParameters => "Other Parameters",
|
||||
Self::Parameters => "Parameters",
|
||||
Self::Raises => "Raises",
|
||||
Self::References => "References",
|
||||
Self::Return => "Return",
|
||||
Self::Returns => "Returns",
|
||||
Self::SeeAlso => "See Also",
|
||||
Self::ShortSummary => "Short Summary",
|
||||
Self::Tip => "Tip",
|
||||
Self::Todo => "Todo",
|
||||
Self::Warning => "Warning",
|
||||
Self::Warnings => "Warnings",
|
||||
Self::Warns => "Warns",
|
||||
Self::Yield => "Yield",
|
||||
Self::Yields => "Yields",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct SectionContexts<'a> {
|
||||
contexts: Vec<SectionContextData>,
|
||||
docstring: &'a Docstring<'a>,
|
||||
}
|
||||
|
||||
impl<'a> SectionContexts<'a> {
|
||||
/// Extract all `SectionContext` values from a docstring.
|
||||
pub(crate) fn from_docstring(docstring: &'a Docstring<'a>, style: SectionStyle) -> Self {
|
||||
let contents = docstring.body();
|
||||
|
||||
let mut contexts = Vec::new();
|
||||
let mut last: Option<SectionContextData> = None;
|
||||
|
||||
let mut lines = contents.universal_newlines().peekable();
|
||||
|
||||
// Skip the first line, which is the summary.
|
||||
let mut previous_line = lines.next();
|
||||
|
||||
while let Some(line) = lines.next() {
|
||||
if let Some(section_kind) = suspected_as_section(&line, style) {
|
||||
let indent = leading_space(&line);
|
||||
let section_name = leading_words(&line);
|
||||
|
||||
let section_name_range = TextRange::at(indent.text_len(), section_name.text_len());
|
||||
|
||||
if is_docstring_section(
|
||||
&line,
|
||||
section_name_range,
|
||||
previous_line.as_ref(),
|
||||
lines.peek(),
|
||||
) {
|
||||
if let Some(mut last) = last.take() {
|
||||
last.range = TextRange::new(last.range.start(), line.start());
|
||||
contexts.push(last);
|
||||
}
|
||||
|
||||
last = Some(SectionContextData {
|
||||
kind: section_kind,
|
||||
name_range: section_name_range + line.start(),
|
||||
range: TextRange::empty(line.start()),
|
||||
summary_full_end: line.full_end(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
previous_line = Some(line);
|
||||
}
|
||||
|
||||
if let Some(mut last) = last.take() {
|
||||
last.range = TextRange::new(last.range.start(), contents.text_len());
|
||||
contexts.push(last);
|
||||
}
|
||||
|
||||
Self {
|
||||
contexts,
|
||||
docstring,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.contexts.len()
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self) -> SectionContextsIter {
|
||||
SectionContextsIter {
|
||||
docstring_body: self.docstring.body(),
|
||||
inner: self.contexts.iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoIterator for &'a SectionContexts<'a> {
|
||||
type IntoIter = SectionContextsIter<'a>;
|
||||
type Item = SectionContext<'a>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for SectionContexts<'_> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_list().entries(self.iter()).finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct SectionContextsIter<'a> {
|
||||
docstring_body: DocstringBody<'a>,
|
||||
inner: std::slice::Iter<'a, SectionContextData>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for SectionContextsIter<'a> {
|
||||
type Item = SectionContext<'a>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let next = self.inner.next()?;
|
||||
|
||||
Some(SectionContext {
|
||||
data: next,
|
||||
docstring_body: self.docstring_body,
|
||||
})
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.inner.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DoubleEndedIterator for SectionContextsIter<'a> {
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
let back = self.inner.next_back()?;
|
||||
Some(SectionContext {
|
||||
data: back,
|
||||
docstring_body: self.docstring_body,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl FusedIterator for SectionContextsIter<'_> {}
|
||||
impl ExactSizeIterator for SectionContextsIter<'_> {}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SectionContextData {
|
||||
kind: SectionKind,
|
||||
|
||||
/// Range of the section name, relative to the [`Docstring::body`]
|
||||
name_range: TextRange,
|
||||
|
||||
/// Range from the start to the end of the section, relative to the [`Docstring::body`]
|
||||
range: TextRange,
|
||||
|
||||
/// End of the summary, relative to the [`Docstring::body`]
|
||||
summary_full_end: TextSize,
|
||||
}
|
||||
|
||||
pub(crate) struct SectionContext<'a> {
|
||||
data: &'a SectionContextData,
|
||||
docstring_body: DocstringBody<'a>,
|
||||
}
|
||||
|
||||
impl<'a> SectionContext<'a> {
|
||||
/// The `kind` of the section, e.g. [`SectionKind::Args`] or [`SectionKind::Returns`].
|
||||
pub(crate) const fn kind(&self) -> SectionKind {
|
||||
self.data.kind
|
||||
}
|
||||
|
||||
/// The name of the section as it appears in the docstring, e.g. "Args" or "Returns".
|
||||
pub(crate) fn section_name(&self) -> &'a str {
|
||||
&self.docstring_body.as_str()[self.data.name_range]
|
||||
}
|
||||
|
||||
/// Returns the rest of the summary line after the section name.
|
||||
pub(crate) fn summary_after_section_name(&self) -> &'a str {
|
||||
&self.summary_line()[usize::from(self.data.name_range.end() - self.data.range.start())..]
|
||||
}
|
||||
|
||||
fn offset(&self) -> TextSize {
|
||||
self.docstring_body.start()
|
||||
}
|
||||
|
||||
/// The absolute range of the section name
|
||||
pub(crate) fn section_name_range(&self) -> TextRange {
|
||||
self.data.name_range + self.offset()
|
||||
}
|
||||
|
||||
/// The absolute range of the summary line, excluding any trailing newline character.
|
||||
pub(crate) fn summary_range(&self) -> TextRange {
|
||||
TextRange::at(self.range().start(), self.summary_line().text_len())
|
||||
}
|
||||
|
||||
/// Range of the summary line relative to [`Docstring::body`], including the trailing newline character.
|
||||
fn summary_full_range_relative(&self) -> TextRange {
|
||||
TextRange::new(self.range_relative().start(), self.data.summary_full_end)
|
||||
}
|
||||
|
||||
/// Returns the range of this section relative to [`Docstring::body`]
|
||||
const fn range_relative(&self) -> TextRange {
|
||||
self.data.range
|
||||
}
|
||||
|
||||
/// The absolute range of the full-section.
|
||||
pub(crate) fn range(&self) -> TextRange {
|
||||
self.range_relative() + self.offset()
|
||||
}
|
||||
|
||||
/// Summary line without the trailing newline characters
|
||||
pub(crate) fn summary_line(&self) -> &'a str {
|
||||
let full_summary = &self.docstring_body.as_str()[self.summary_full_range_relative()];
|
||||
|
||||
let mut bytes = full_summary.bytes().rev();
|
||||
|
||||
let newline_width = match bytes.next() {
|
||||
Some(b'\n') => {
|
||||
if bytes.next() == Some(b'\r') {
|
||||
2
|
||||
} else {
|
||||
1
|
||||
}
|
||||
}
|
||||
Some(b'\r') => 1,
|
||||
_ => 0,
|
||||
};
|
||||
|
||||
&full_summary[..full_summary.len() - newline_width]
|
||||
}
|
||||
|
||||
/// Returns the text of the last line of the previous section or an empty string if it is the first section.
|
||||
pub(crate) fn previous_line(&self) -> Option<&'a str> {
|
||||
let previous =
|
||||
&self.docstring_body.as_str()[TextRange::up_to(self.range_relative().start())];
|
||||
previous.universal_newlines().last().map(|l| l.as_str())
|
||||
}
|
||||
|
||||
/// Returns the lines belonging to this section after the summary line.
|
||||
pub(crate) fn following_lines(&self) -> UniversalNewlineIterator<'a> {
|
||||
let lines = self.following_lines_str();
|
||||
UniversalNewlineIterator::with_offset(lines, self.offset() + self.data.summary_full_end)
|
||||
}
|
||||
|
||||
fn following_lines_str(&self) -> &'a str {
|
||||
&self.docstring_body.as_str()[self.following_range_relative()]
|
||||
}
|
||||
|
||||
/// Returns the range to the following lines relative to [`Docstring::body`].
|
||||
const fn following_range_relative(&self) -> TextRange {
|
||||
TextRange::new(self.data.summary_full_end, self.range_relative().end())
|
||||
}
|
||||
|
||||
/// Returns the absolute range of the following lines.
|
||||
pub(crate) fn following_range(&self) -> TextRange {
|
||||
self.following_range_relative() + self.offset()
|
||||
}
|
||||
}
|
||||
|
||||
impl Ranged for SectionContext<'_> {
|
||||
fn range(&self) -> TextRange {
|
||||
self.range()
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for SectionContext<'_> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SectionContext")
|
||||
.field("kind", &self.kind())
|
||||
.field("section_name", &self.section_name())
|
||||
.field("summary_line", &self.summary_line())
|
||||
.field("following_lines", &&self.following_lines_str())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind> {
|
||||
if let Some(kind) = SectionKind::from_str(leading_words(line)) {
|
||||
if style.sections().contains(&kind) {
|
||||
return Some(kind);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Check if the suspected context is really a section header.
|
||||
fn is_docstring_section(
|
||||
line: &Line,
|
||||
section_name_range: TextRange,
|
||||
previous_line: Option<&Line>,
|
||||
next_line: Option<&Line>,
|
||||
) -> bool {
|
||||
// Determine whether the current line looks like a section header, e.g., "Args:".
|
||||
let section_name_suffix = line[usize::from(section_name_range.end())..].trim();
|
||||
let this_looks_like_a_section_name =
|
||||
section_name_suffix == ":" || section_name_suffix.is_empty();
|
||||
if !this_looks_like_a_section_name {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Determine whether the next line is an underline, e.g., "-----".
|
||||
let next_line_is_underline = next_line.is_some_and(|next_line| {
|
||||
let next_line = next_line.trim();
|
||||
if next_line.is_empty() {
|
||||
false
|
||||
} else {
|
||||
let next_line_is_underline = next_line.chars().all(|char| matches!(char, '-' | '='));
|
||||
next_line_is_underline
|
||||
}
|
||||
});
|
||||
if next_line_is_underline {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Determine whether the previous line looks like the end of a paragraph.
|
||||
let previous_line_looks_like_end_of_paragraph = previous_line.map_or(true, |previous_line| {
|
||||
let previous_line = previous_line.trim();
|
||||
let previous_line_ends_with_punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
|
||||
.into_iter()
|
||||
.any(|char| previous_line.ends_with(char));
|
||||
previous_line_ends_with_punctuation || previous_line.is_empty()
|
||||
});
|
||||
if !previous_line_looks_like_end_of_paragraph {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
//! Extract Black configuration settings from a pyproject.toml.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::settings::types::PythonVersion;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub struct Black {
|
||||
#[serde(alias = "line-length", alias = "line_length")]
|
||||
pub line_length: Option<usize>,
|
||||
#[serde(alias = "target-version", alias = "target_version")]
|
||||
pub target_version: Option<Vec<PythonVersion>>,
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
use super::black::Black;
|
||||
use super::isort::Isort;
|
||||
use super::pep621::Project;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ExternalConfig<'a> {
|
||||
pub black: Option<&'a Black>,
|
||||
pub isort: Option<&'a Isort>,
|
||||
pub project: Option<&'a Project>,
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
pub use converter::convert;
|
||||
pub use external_config::ExternalConfig;
|
||||
pub use plugin::Plugin;
|
||||
pub use pyproject::parse;
|
||||
|
||||
mod black;
|
||||
mod converter;
|
||||
mod external_config;
|
||||
mod isort;
|
||||
mod parser;
|
||||
pub mod pep621;
|
||||
mod plugin;
|
||||
mod pyproject;
|
||||
@@ -1,26 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::black::Black;
|
||||
use super::isort::Isort;
|
||||
use super::pep621::Project;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Tools {
|
||||
pub black: Option<Black>,
|
||||
pub isort: Option<Isort>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Pyproject {
|
||||
pub tool: Option<Tools>,
|
||||
pub project: Option<Project>,
|
||||
}
|
||||
|
||||
pub fn parse<P: AsRef<Path>>(path: P) -> Result<Pyproject> {
|
||||
let contents = std::fs::read_to_string(path)?;
|
||||
let pyproject = toml::from_str::<Pyproject>(&contents)?;
|
||||
Ok(pyproject)
|
||||
}
|
||||
@@ -1,498 +0,0 @@
|
||||
//! Code modification struct to add and modify import statements.
|
||||
//!
|
||||
//! Enables rules to make module members available (that may be not yet be imported) during fix
|
||||
//! execution.
|
||||
|
||||
use std::error::Error;
|
||||
|
||||
use anyhow::Result;
|
||||
use libcst_native::{ImportAlias, Name, NameOrAttribute};
|
||||
use ruff_python_ast::{self as ast, PySourceType, Ranged, Stmt, Suite};
|
||||
use ruff_text_size::TextSize;
|
||||
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_python_ast::imports::{AnyImport, Import, ImportFrom};
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_semantic::SemanticModel;
|
||||
use ruff_python_trivia::textwrap::indent;
|
||||
use ruff_source_file::Locator;
|
||||
|
||||
use crate::autofix;
|
||||
use crate::autofix::codemods::CodegenStylist;
|
||||
use crate::cst::matchers::{match_aliases, match_import_from, match_statement};
|
||||
use crate::importer::insertion::Insertion;
|
||||
|
||||
mod insertion;
|
||||
|
||||
pub(crate) struct Importer<'a> {
|
||||
/// The Python AST to which we are adding imports.
|
||||
python_ast: &'a Suite,
|
||||
/// The [`Locator`] for the Python AST.
|
||||
locator: &'a Locator<'a>,
|
||||
/// The [`Stylist`] for the Python AST.
|
||||
stylist: &'a Stylist<'a>,
|
||||
/// The list of visited, top-level runtime imports in the Python AST.
|
||||
runtime_imports: Vec<&'a Stmt>,
|
||||
/// The list of visited, top-level `if TYPE_CHECKING:` blocks in the Python AST.
|
||||
type_checking_blocks: Vec<&'a Stmt>,
|
||||
}
|
||||
|
||||
impl<'a> Importer<'a> {
|
||||
pub(crate) fn new(
|
||||
python_ast: &'a Suite,
|
||||
locator: &'a Locator<'a>,
|
||||
stylist: &'a Stylist<'a>,
|
||||
) -> Self {
|
||||
Self {
|
||||
python_ast,
|
||||
locator,
|
||||
stylist,
|
||||
runtime_imports: Vec::default(),
|
||||
type_checking_blocks: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Visit a top-level import statement.
|
||||
pub(crate) fn visit_import(&mut self, import: &'a Stmt) {
|
||||
self.runtime_imports.push(import);
|
||||
}
|
||||
|
||||
/// Visit a top-level type-checking block.
|
||||
pub(crate) fn visit_type_checking_block(&mut self, type_checking_block: &'a Stmt) {
|
||||
self.type_checking_blocks.push(type_checking_block);
|
||||
}
|
||||
|
||||
/// Add an import statement to import the given module.
|
||||
///
|
||||
/// If there are no existing imports, the new import will be added at the top
|
||||
/// of the file. Otherwise, it will be added after the most recent top-level
|
||||
/// import statement.
|
||||
pub(crate) fn add_import(&self, import: &AnyImport, at: TextSize) -> Edit {
|
||||
let required_import = import.to_string();
|
||||
if let Some(stmt) = self.preceding_import(at) {
|
||||
// Insert after the last top-level import.
|
||||
Insertion::end_of_statement(stmt, self.locator, self.stylist)
|
||||
.into_edit(&required_import)
|
||||
} else {
|
||||
// Insert at the start of the file.
|
||||
Insertion::start_of_file(self.python_ast, self.locator, self.stylist)
|
||||
.into_edit(&required_import)
|
||||
}
|
||||
}
|
||||
|
||||
/// Move an existing import to the top-level, thereby making it available at runtime.
|
||||
///
|
||||
/// If there are no existing imports, the new import will be added at the top
|
||||
/// of the file. Otherwise, it will be added after the most recent top-level
|
||||
/// import statement.
|
||||
pub(crate) fn runtime_import_edit(
|
||||
&self,
|
||||
import: &ImportedMembers,
|
||||
at: TextSize,
|
||||
) -> Result<RuntimeImportEdit> {
|
||||
// Generate the modified import statement.
|
||||
let content = autofix::codemods::retain_imports(
|
||||
&import.names,
|
||||
import.statement,
|
||||
self.locator,
|
||||
self.stylist,
|
||||
)?;
|
||||
|
||||
// Add the import to the top-level.
|
||||
let insertion = if let Some(stmt) = self.preceding_import(at) {
|
||||
// Insert after the last top-level import.
|
||||
Insertion::end_of_statement(stmt, self.locator, self.stylist)
|
||||
} else {
|
||||
// Insert at the start of the file.
|
||||
Insertion::start_of_file(self.python_ast, self.locator, self.stylist)
|
||||
};
|
||||
let add_import_edit = insertion.into_edit(&content);
|
||||
|
||||
Ok(RuntimeImportEdit { add_import_edit })
|
||||
}
|
||||
|
||||
/// Move an existing import into a `TYPE_CHECKING` block.
|
||||
///
|
||||
/// If there are no existing `TYPE_CHECKING` blocks, a new one will be added at the top
|
||||
/// of the file. Otherwise, it will be added after the most recent top-level
|
||||
/// `TYPE_CHECKING` block.
|
||||
pub(crate) fn typing_import_edit(
|
||||
&self,
|
||||
import: &ImportedMembers,
|
||||
at: TextSize,
|
||||
semantic: &SemanticModel,
|
||||
source_type: PySourceType,
|
||||
) -> Result<TypingImportEdit> {
|
||||
// Generate the modified import statement.
|
||||
let content = autofix::codemods::retain_imports(
|
||||
&import.names,
|
||||
import.statement,
|
||||
self.locator,
|
||||
self.stylist,
|
||||
)?;
|
||||
|
||||
// Import the `TYPE_CHECKING` symbol from the typing module.
|
||||
let (type_checking_edit, type_checking) = self.get_or_import_symbol(
|
||||
&ImportRequest::import_from("typing", "TYPE_CHECKING"),
|
||||
at,
|
||||
semantic,
|
||||
)?;
|
||||
|
||||
// Add the import to a `TYPE_CHECKING` block.
|
||||
let add_import_edit = if let Some(block) = self.preceding_type_checking_block(at) {
|
||||
// Add the import to the `TYPE_CHECKING` block.
|
||||
self.add_to_type_checking_block(&content, block.start(), source_type)
|
||||
} else {
|
||||
// Add the import to a new `TYPE_CHECKING` block.
|
||||
self.add_type_checking_block(
|
||||
&format!(
|
||||
"{}if {type_checking}:{}{}",
|
||||
self.stylist.line_ending().as_str(),
|
||||
self.stylist.line_ending().as_str(),
|
||||
indent(&content, self.stylist.indentation())
|
||||
),
|
||||
at,
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(TypingImportEdit {
|
||||
type_checking_edit,
|
||||
add_import_edit,
|
||||
})
|
||||
}
|
||||
|
||||
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
|
||||
/// the symbol available in the current scope along with the bound name of the symbol.
|
||||
///
|
||||
/// Attempts to reuse existing imports when possible.
|
||||
pub(crate) fn get_or_import_symbol(
|
||||
&self,
|
||||
symbol: &ImportRequest,
|
||||
at: TextSize,
|
||||
semantic: &SemanticModel,
|
||||
) -> Result<(Edit, String), ResolutionError> {
|
||||
match self.get_symbol(symbol, at, semantic) {
|
||||
Some(result) => result,
|
||||
None => self.import_symbol(symbol, at, semantic),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`].
|
||||
fn get_symbol(
|
||||
&self,
|
||||
symbol: &ImportRequest,
|
||||
at: TextSize,
|
||||
semantic: &SemanticModel,
|
||||
) -> Option<Result<(Edit, String), ResolutionError>> {
|
||||
// If the symbol is already available in the current scope, use it.
|
||||
let imported_name = semantic.resolve_qualified_import_name(symbol.module, symbol.member)?;
|
||||
|
||||
// If the symbol source (i.e., the import statement) comes after the current location,
|
||||
// abort. For example, we could be generating an edit within a function, and the import
|
||||
// could be defined in the module scope, but after the function definition. In this case,
|
||||
// it's unclear whether we can use the symbol (the function could be called between the
|
||||
// import and the current location, and thus the symbol would not be available). It's also
|
||||
// unclear whether should add an import statement at the start of the file, since it could
|
||||
// be shadowed between the import and the current location.
|
||||
if imported_name.start() > at {
|
||||
return Some(Err(ResolutionError::ImportAfterUsage));
|
||||
}
|
||||
|
||||
// If the symbol source (i.e., the import statement) is in a typing-only context, but we're
|
||||
// in a runtime context, abort.
|
||||
if imported_name.context().is_typing() && semantic.execution_context().is_runtime() {
|
||||
return Some(Err(ResolutionError::IncompatibleContext));
|
||||
}
|
||||
|
||||
// We also add a no-op edit to force conflicts with any other fixes that might try to
|
||||
// remove the import. Consider:
|
||||
//
|
||||
// ```py
|
||||
// import sys
|
||||
//
|
||||
// quit()
|
||||
// ```
|
||||
//
|
||||
// Assume you omit this no-op edit. If you run Ruff with `unused-imports` and
|
||||
// `sys-exit-alias` over this snippet, it will generate two fixes: (1) remove the unused
|
||||
// `sys` import; and (2) replace `quit()` with `sys.exit()`, under the assumption that `sys`
|
||||
// is already imported and available.
|
||||
//
|
||||
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the
|
||||
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
|
||||
let import_edit = Edit::range_replacement(
|
||||
self.locator.slice(imported_name.range()).to_string(),
|
||||
imported_name.range(),
|
||||
);
|
||||
Some(Ok((import_edit, imported_name.into_name())))
|
||||
}
|
||||
|
||||
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
|
||||
/// the symbol available in the current scope along with the bound name of the symbol.
|
||||
///
|
||||
/// For example, assuming `module` is `"functools"` and `member` is `"lru_cache"`, this function
|
||||
/// could return an [`Edit`] to add `import functools` to the start of the file, alongside with
|
||||
/// the name on which the `lru_cache` symbol would be made available (`"functools.lru_cache"`).
|
||||
fn import_symbol(
|
||||
&self,
|
||||
symbol: &ImportRequest,
|
||||
at: TextSize,
|
||||
semantic: &SemanticModel,
|
||||
) -> Result<(Edit, String), ResolutionError> {
|
||||
if let Some(stmt) = self.find_import_from(symbol.module, at) {
|
||||
// Case 1: `from functools import lru_cache` is in scope, and we're trying to reference
|
||||
// `functools.cache`; thus, we add `cache` to the import, and return `"cache"` as the
|
||||
// bound name.
|
||||
if semantic.is_available(symbol.member) {
|
||||
let Ok(import_edit) = self.add_member(stmt, symbol.member) else {
|
||||
return Err(ResolutionError::InvalidEdit);
|
||||
};
|
||||
Ok((import_edit, symbol.member.to_string()))
|
||||
} else {
|
||||
Err(ResolutionError::ConflictingName(symbol.member.to_string()))
|
||||
}
|
||||
} else {
|
||||
match symbol.style {
|
||||
ImportStyle::Import => {
|
||||
// Case 2a: No `functools` import is in scope; thus, we add `import functools`,
|
||||
// and return `"functools.cache"` as the bound name.
|
||||
if semantic.is_available(symbol.module) {
|
||||
let import_edit =
|
||||
self.add_import(&AnyImport::Import(Import::module(symbol.module)), at);
|
||||
Ok((
|
||||
import_edit,
|
||||
format!(
|
||||
"{module}.{member}",
|
||||
module = symbol.module,
|
||||
member = symbol.member
|
||||
),
|
||||
))
|
||||
} else {
|
||||
Err(ResolutionError::ConflictingName(symbol.module.to_string()))
|
||||
}
|
||||
}
|
||||
ImportStyle::ImportFrom => {
|
||||
// Case 2b: No `functools` import is in scope; thus, we add
|
||||
// `from functools import cache`, and return `"cache"` as the bound name.
|
||||
if semantic.is_available(symbol.member) {
|
||||
let import_edit = self.add_import(
|
||||
&AnyImport::ImportFrom(ImportFrom::member(
|
||||
symbol.module,
|
||||
symbol.member,
|
||||
)),
|
||||
at,
|
||||
);
|
||||
Ok((import_edit, symbol.member.to_string()))
|
||||
} else {
|
||||
Err(ResolutionError::ConflictingName(symbol.member.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the top-level [`Stmt`] that imports the given module using `Stmt::ImportFrom`
|
||||
/// preceding the given position, if any.
|
||||
fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> {
|
||||
let mut import_from = None;
|
||||
for stmt in &self.runtime_imports {
|
||||
if stmt.start() >= at {
|
||||
break;
|
||||
}
|
||||
if let Stmt::ImportFrom(ast::StmtImportFrom {
|
||||
module: name,
|
||||
names,
|
||||
level,
|
||||
range: _,
|
||||
}) = stmt
|
||||
{
|
||||
if level.map_or(true, |level| level.to_u32() == 0)
|
||||
&& name.as_ref().is_some_and(|name| name == module)
|
||||
&& names.iter().all(|alias| alias.name.as_str() != "*")
|
||||
{
|
||||
import_from = Some(*stmt);
|
||||
}
|
||||
}
|
||||
}
|
||||
import_from
|
||||
}
|
||||
|
||||
/// Add the given member to an existing `Stmt::ImportFrom` statement.
|
||||
fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
|
||||
let mut statement = match_statement(self.locator.slice(stmt.range()))?;
|
||||
let import_from = match_import_from(&mut statement)?;
|
||||
let aliases = match_aliases(import_from)?;
|
||||
aliases.push(ImportAlias {
|
||||
name: NameOrAttribute::N(Box::new(Name {
|
||||
value: member,
|
||||
lpar: vec![],
|
||||
rpar: vec![],
|
||||
})),
|
||||
asname: None,
|
||||
comma: aliases.last().and_then(|alias| alias.comma.clone()),
|
||||
});
|
||||
Ok(Edit::range_replacement(
|
||||
statement.codegen_stylist(self.stylist),
|
||||
stmt.range(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Add a `TYPE_CHECKING` block to the given module.
|
||||
fn add_type_checking_block(&self, content: &str, at: TextSize) -> Result<Edit> {
|
||||
let insertion = if let Some(stmt) = self.preceding_import(at) {
|
||||
// Insert after the last top-level import.
|
||||
Insertion::end_of_statement(stmt, self.locator, self.stylist)
|
||||
} else {
|
||||
// Insert at the start of the file.
|
||||
Insertion::start_of_file(self.python_ast, self.locator, self.stylist)
|
||||
};
|
||||
if insertion.is_inline() {
|
||||
Err(anyhow::anyhow!(
|
||||
"Cannot insert `TYPE_CHECKING` block inline"
|
||||
))
|
||||
} else {
|
||||
Ok(insertion.into_edit(content))
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an import statement to an existing `TYPE_CHECKING` block.
|
||||
fn add_to_type_checking_block(
|
||||
&self,
|
||||
content: &str,
|
||||
at: TextSize,
|
||||
source_type: PySourceType,
|
||||
) -> Edit {
|
||||
Insertion::start_of_block(at, self.locator, self.stylist, source_type).into_edit(content)
|
||||
}
|
||||
|
||||
/// Return the import statement that precedes the given position, if any.
|
||||
fn preceding_import(&self, at: TextSize) -> Option<&'a Stmt> {
|
||||
self.runtime_imports
|
||||
.partition_point(|stmt| stmt.start() < at)
|
||||
.checked_sub(1)
|
||||
.map(|idx| self.runtime_imports[idx])
|
||||
}
|
||||
|
||||
/// Return the `TYPE_CHECKING` block that precedes the given position, if any.
|
||||
fn preceding_type_checking_block(&self, at: TextSize) -> Option<&'a Stmt> {
|
||||
let block = self.type_checking_blocks.first()?;
|
||||
if block.start() <= at {
|
||||
Some(block)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An edit to the top-level of a module, making it available at runtime.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct RuntimeImportEdit {
|
||||
/// The edit to add the import to the top-level of the module.
|
||||
add_import_edit: Edit,
|
||||
}
|
||||
|
||||
impl RuntimeImportEdit {
|
||||
pub(crate) fn into_edits(self) -> Vec<Edit> {
|
||||
vec![self.add_import_edit]
|
||||
}
|
||||
}
|
||||
|
||||
/// An edit to an import to a typing-only context.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct TypingImportEdit {
|
||||
/// The edit to add the `TYPE_CHECKING` symbol to the module.
|
||||
type_checking_edit: Edit,
|
||||
/// The edit to add the import to a `TYPE_CHECKING` block.
|
||||
add_import_edit: Edit,
|
||||
}
|
||||
|
||||
impl TypingImportEdit {
|
||||
pub(crate) fn into_edits(self) -> Vec<Edit> {
|
||||
vec![self.type_checking_edit, self.add_import_edit]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ImportStyle {
|
||||
/// Import the symbol using the `import` statement (e.g. `import foo; foo.bar`).
|
||||
Import,
|
||||
/// Import the symbol using the `from` statement (e.g. `from foo import bar; bar`).
|
||||
ImportFrom,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ImportRequest<'a> {
|
||||
/// The module from which the symbol can be imported (e.g., `foo`, in `from foo import bar`).
|
||||
module: &'a str,
|
||||
/// The member to import (e.g., `bar`, in `from foo import bar`).
|
||||
member: &'a str,
|
||||
/// The preferred style to use when importing the symbol (e.g., `import foo` or
|
||||
/// `from foo import bar`), if it's not already in scope.
|
||||
style: ImportStyle,
|
||||
}
|
||||
|
||||
impl<'a> ImportRequest<'a> {
|
||||
/// Create a new `ImportRequest` from a module and member. If not present in the scope,
|
||||
/// the symbol should be imported using the "import" statement.
|
||||
pub(crate) fn import(module: &'a str, member: &'a str) -> Self {
|
||||
Self {
|
||||
module,
|
||||
member,
|
||||
style: ImportStyle::Import,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `ImportRequest` from a module and member. If not present in the scope,
|
||||
/// the symbol should be imported using the "import from" statement.
|
||||
pub(crate) fn import_from(module: &'a str, member: &'a str) -> Self {
|
||||
Self {
|
||||
module,
|
||||
member,
|
||||
style: ImportStyle::ImportFrom,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An existing list of module or member imports, located within an import statement.
|
||||
pub(crate) struct ImportedMembers<'a> {
|
||||
/// The import statement.
|
||||
pub(crate) statement: &'a Stmt,
|
||||
/// The "names" of the imported members.
|
||||
pub(crate) names: Vec<&'a str>,
|
||||
}
|
||||
|
||||
/// The result of an [`Importer::get_or_import_symbol`] call.
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ResolutionError {
|
||||
/// The symbol is imported, but the import came after the current location.
|
||||
ImportAfterUsage,
|
||||
/// The symbol is imported, but in an incompatible context (e.g., in typing-only context, while
|
||||
/// we're in a runtime context).
|
||||
IncompatibleContext,
|
||||
/// The symbol can't be imported, because another symbol is bound to the same name.
|
||||
ConflictingName(String),
|
||||
/// The symbol can't be imported due to an error in editing an existing import statement.
|
||||
InvalidEdit,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ResolutionError {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ResolutionError::ImportAfterUsage => {
|
||||
fmt.write_str("Unable to use existing symbol due to late binding")
|
||||
}
|
||||
ResolutionError::IncompatibleContext => {
|
||||
fmt.write_str("Unable to use existing symbol due to incompatible context")
|
||||
}
|
||||
ResolutionError::ConflictingName(binding) => std::write!(
|
||||
fmt,
|
||||
"Unable to insert `{binding}` into scope due to name conflict"
|
||||
),
|
||||
ResolutionError::InvalidEdit => {
|
||||
fmt.write_str("Unable to modify existing import statement")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ResolutionError {}
|
||||
@@ -1,662 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::fmt::Display;
|
||||
use std::fs::File;
|
||||
use std::io::{BufReader, BufWriter, Cursor, Read, Seek, SeekFrom, Write};
|
||||
use std::iter;
|
||||
use std::path::Path;
|
||||
|
||||
use itertools::Itertools;
|
||||
use once_cell::sync::OnceCell;
|
||||
use serde::Serialize;
|
||||
use serde_json::error::Category;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_parser::lexer::lex;
|
||||
use ruff_python_parser::Mode;
|
||||
use ruff_source_file::{NewlineWithTrailingNewline, UniversalNewlineIterator};
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
|
||||
use crate::autofix::source_map::{SourceMap, SourceMarker};
|
||||
use crate::jupyter::index::JupyterIndex;
|
||||
use crate::jupyter::schema::{Cell, RawNotebook, SortAlphabetically, SourceValue};
|
||||
use crate::rules::pycodestyle::rules::SyntaxError;
|
||||
use crate::IOError;
|
||||
|
||||
pub const JUPYTER_NOTEBOOK_EXT: &str = "ipynb";
|
||||
|
||||
/// Run round-trip source code generation on a given Jupyter notebook file path.
|
||||
pub fn round_trip(path: &Path) -> anyhow::Result<String> {
|
||||
let mut notebook = Notebook::from_path(path).map_err(|err| {
|
||||
anyhow::anyhow!(
|
||||
"Failed to read notebook file `{}`: {:?}",
|
||||
path.display(),
|
||||
err
|
||||
)
|
||||
})?;
|
||||
let code = notebook.source_code().to_string();
|
||||
notebook.update_cell_content(&code);
|
||||
let mut writer = Vec::new();
|
||||
notebook.write_inner(&mut writer)?;
|
||||
Ok(String::from_utf8(writer)?)
|
||||
}
|
||||
|
||||
impl Display for SourceValue {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
SourceValue::String(string) => f.write_str(string),
|
||||
SourceValue::StringArray(string_array) => {
|
||||
for string in string_array {
|
||||
f.write_str(string)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Cell {
|
||||
/// Return the [`SourceValue`] of the cell.
|
||||
fn source(&self) -> &SourceValue {
|
||||
match self {
|
||||
Cell::Code(cell) => &cell.source,
|
||||
Cell::Markdown(cell) => &cell.source,
|
||||
Cell::Raw(cell) => &cell.source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the [`SourceValue`] of the cell.
|
||||
fn set_source(&mut self, source: SourceValue) {
|
||||
match self {
|
||||
Cell::Code(cell) => cell.source = source,
|
||||
Cell::Markdown(cell) => cell.source = source,
|
||||
Cell::Raw(cell) => cell.source = source,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return `true` if it's a valid code cell.
|
||||
///
|
||||
/// A valid code cell is a cell where the cell type is [`Cell::Code`] and the
|
||||
/// source doesn't contain a cell magic.
|
||||
fn is_valid_code_cell(&self) -> bool {
|
||||
let source = match self {
|
||||
Cell::Code(cell) => &cell.source,
|
||||
_ => return false,
|
||||
};
|
||||
// Ignore cells containing cell magic. This is different from line magic
|
||||
// which is allowed and ignored by the parser.
|
||||
!match source {
|
||||
SourceValue::String(string) => string
|
||||
.lines()
|
||||
.any(|line| line.trim_start().starts_with("%%")),
|
||||
SourceValue::StringArray(string_array) => string_array
|
||||
.iter()
|
||||
.any(|line| line.trim_start().starts_with("%%")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct Notebook {
|
||||
/// Python source code of the notebook.
|
||||
///
|
||||
/// This is the concatenation of all valid code cells in the notebook
|
||||
/// separated by a newline and a trailing newline. The trailing newline
|
||||
/// is added to make sure that each cell ends with a newline which will
|
||||
/// be removed when updating the cell content.
|
||||
source_code: String,
|
||||
/// The index of the notebook. This is used to map between the concatenated
|
||||
/// source code and the original notebook.
|
||||
index: OnceCell<JupyterIndex>,
|
||||
/// The raw notebook i.e., the deserialized version of JSON string.
|
||||
raw: RawNotebook,
|
||||
/// The offsets of each cell in the concatenated source code. This includes
|
||||
/// the first and last character offsets as well.
|
||||
cell_offsets: Vec<TextSize>,
|
||||
/// The cell index of all valid code cells in the notebook.
|
||||
valid_code_cells: Vec<u32>,
|
||||
/// Flag to indicate if the JSON string of the notebook has a trailing newline.
|
||||
trailing_newline: bool,
|
||||
}
|
||||
|
||||
impl Notebook {
|
||||
/// Read the Jupyter Notebook from the given [`Path`].
|
||||
pub fn from_path(path: &Path) -> Result<Self, Box<Diagnostic>> {
|
||||
Self::from_reader(BufReader::new(File::open(path).map_err(|err| {
|
||||
Diagnostic::new(
|
||||
IOError {
|
||||
message: format!("{err}"),
|
||||
},
|
||||
TextRange::default(),
|
||||
)
|
||||
})?))
|
||||
}
|
||||
|
||||
/// Read the Jupyter Notebook from its JSON string.
|
||||
pub fn from_source_code(source_code: &str) -> Result<Self, Box<Diagnostic>> {
|
||||
Self::from_reader(Cursor::new(source_code))
|
||||
}
|
||||
|
||||
/// Read a Jupyter Notebook from a [`Read`] implementor.
|
||||
///
|
||||
/// See also the black implementation
|
||||
/// <https://github.com/psf/black/blob/69ca0a4c7a365c5f5eea519a90980bab72cab764/src/black/__init__.py#L1017-L1046>
|
||||
fn from_reader<R>(mut reader: R) -> Result<Self, Box<Diagnostic>>
|
||||
where
|
||||
R: Read + Seek,
|
||||
{
|
||||
let trailing_newline = reader.seek(SeekFrom::End(-1)).is_ok_and(|_| {
|
||||
let mut buf = [0; 1];
|
||||
reader.read_exact(&mut buf).is_ok_and(|_| buf[0] == b'\n')
|
||||
});
|
||||
reader.rewind().map_err(|err| {
|
||||
Diagnostic::new(
|
||||
IOError {
|
||||
message: format!("{err}"),
|
||||
},
|
||||
TextRange::default(),
|
||||
)
|
||||
})?;
|
||||
let raw_notebook: RawNotebook = match serde_json::from_reader(reader.by_ref()) {
|
||||
Ok(notebook) => notebook,
|
||||
Err(err) => {
|
||||
// Translate the error into a diagnostic
|
||||
return Err(Box::new({
|
||||
match err.classify() {
|
||||
Category::Io => Diagnostic::new(
|
||||
IOError {
|
||||
message: format!("{err}"),
|
||||
},
|
||||
TextRange::default(),
|
||||
),
|
||||
Category::Syntax | Category::Eof => {
|
||||
// Maybe someone saved the python sources (those with the `# %%` separator)
|
||||
// as jupyter notebook instead. Let's help them.
|
||||
let mut contents = String::new();
|
||||
reader
|
||||
.rewind()
|
||||
.and_then(|_| reader.read_to_string(&mut contents))
|
||||
.map_err(|err| {
|
||||
Diagnostic::new(
|
||||
IOError {
|
||||
message: format!("{err}"),
|
||||
},
|
||||
TextRange::default(),
|
||||
)
|
||||
})?;
|
||||
|
||||
// Check if tokenizing was successful and the file is non-empty
|
||||
if lex(&contents, Mode::Module).any(|result| result.is_err()) {
|
||||
Diagnostic::new(
|
||||
SyntaxError {
|
||||
message: format!(
|
||||
"A Jupyter Notebook (.{JUPYTER_NOTEBOOK_EXT}) must internally be JSON, \
|
||||
but this file isn't valid JSON: {err}"
|
||||
),
|
||||
},
|
||||
TextRange::default(),
|
||||
)
|
||||
} else {
|
||||
Diagnostic::new(
|
||||
SyntaxError {
|
||||
message: format!(
|
||||
"Expected a Jupyter Notebook (.{JUPYTER_NOTEBOOK_EXT}), \
|
||||
which must be internally stored as JSON, \
|
||||
but found a Python source file: {err}"
|
||||
),
|
||||
},
|
||||
TextRange::default(),
|
||||
)
|
||||
}
|
||||
}
|
||||
Category::Data => {
|
||||
// We could try to read the schema version here but if this fails it's
|
||||
// a bug anyway
|
||||
Diagnostic::new(
|
||||
SyntaxError {
|
||||
message: format!(
|
||||
"This file does not match the schema expected of Jupyter Notebooks: {err}"
|
||||
),
|
||||
},
|
||||
TextRange::default(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
// v4 is what everybody uses
|
||||
if raw_notebook.nbformat != 4 {
|
||||
// bail because we should have already failed at the json schema stage
|
||||
return Err(Box::new(Diagnostic::new(
|
||||
SyntaxError {
|
||||
message: format!(
|
||||
"Expected Jupyter Notebook format 4, found {}",
|
||||
raw_notebook.nbformat
|
||||
),
|
||||
},
|
||||
TextRange::default(),
|
||||
)));
|
||||
}
|
||||
|
||||
let valid_code_cells = raw_notebook
|
||||
.cells
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, cell)| cell.is_valid_code_cell())
|
||||
.map(|(idx, _)| u32::try_from(idx).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut contents = Vec::with_capacity(valid_code_cells.len());
|
||||
let mut current_offset = TextSize::from(0);
|
||||
let mut cell_offsets = Vec::with_capacity(valid_code_cells.len());
|
||||
cell_offsets.push(TextSize::from(0));
|
||||
|
||||
for &idx in &valid_code_cells {
|
||||
let cell_contents = match &raw_notebook.cells[idx as usize].source() {
|
||||
SourceValue::String(string) => string.clone(),
|
||||
SourceValue::StringArray(string_array) => string_array.join(""),
|
||||
};
|
||||
current_offset += TextSize::of(&cell_contents) + TextSize::new(1);
|
||||
contents.push(cell_contents);
|
||||
cell_offsets.push(current_offset);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
raw: raw_notebook,
|
||||
index: OnceCell::new(),
|
||||
// The additional newline at the end is to maintain consistency for
|
||||
// all cells. These newlines will be removed before updating the
|
||||
// source code with the transformed content. Refer `update_cell_content`.
|
||||
source_code: contents.join("\n") + "\n",
|
||||
cell_offsets,
|
||||
valid_code_cells,
|
||||
trailing_newline,
|
||||
})
|
||||
}
|
||||
|
||||
/// Update the cell offsets as per the given [`SourceMap`].
|
||||
fn update_cell_offsets(&mut self, source_map: &SourceMap) {
|
||||
// When there are multiple cells without any edits, the offsets of those
|
||||
// cells will be updated using the same marker. So, we can keep track of
|
||||
// the last marker used to update the offsets and check if it's still
|
||||
// the closest marker to the current offset.
|
||||
let mut last_marker: Option<&SourceMarker> = None;
|
||||
|
||||
// The first offset is always going to be at 0, so skip it.
|
||||
for offset in self.cell_offsets.iter_mut().skip(1).rev() {
|
||||
let closest_marker = match last_marker {
|
||||
Some(marker) if marker.source <= *offset => marker,
|
||||
_ => {
|
||||
let Some(marker) = source_map
|
||||
.markers()
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|m| m.source <= *offset)
|
||||
else {
|
||||
// There are no markers above the current offset, so we can
|
||||
// stop here.
|
||||
break;
|
||||
};
|
||||
last_marker = Some(marker);
|
||||
marker
|
||||
}
|
||||
};
|
||||
|
||||
match closest_marker.source.cmp(&closest_marker.dest) {
|
||||
Ordering::Less => *offset += closest_marker.dest - closest_marker.source,
|
||||
Ordering::Greater => *offset -= closest_marker.source - closest_marker.dest,
|
||||
Ordering::Equal => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the cell contents with the transformed content.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// Panics if the transformed content is out of bounds for any cell. This
|
||||
/// can happen only if the cell offsets were not updated before calling
|
||||
/// this method or the offsets were updated incorrectly.
|
||||
fn update_cell_content(&mut self, transformed: &str) {
|
||||
for (&idx, (start, end)) in self
|
||||
.valid_code_cells
|
||||
.iter()
|
||||
.zip(self.cell_offsets.iter().tuple_windows::<(_, _)>())
|
||||
{
|
||||
let cell_content = transformed
|
||||
.get(start.to_usize()..end.to_usize())
|
||||
.unwrap_or_else(|| {
|
||||
panic!(
|
||||
"Transformed content out of bounds ({start:?}..{end:?}) for cell at {idx:?}"
|
||||
);
|
||||
});
|
||||
self.raw.cells[idx as usize].set_source(SourceValue::StringArray(
|
||||
UniversalNewlineIterator::from(
|
||||
// We only need to strip the trailing newline which we added
|
||||
// while concatenating the cell contents.
|
||||
cell_content.strip_suffix('\n').unwrap_or(cell_content),
|
||||
)
|
||||
.map(|line| line.as_full_str().to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
/// Build and return the [`JupyterIndex`].
|
||||
///
|
||||
/// ## Notes
|
||||
///
|
||||
/// Empty cells don't have any newlines, but there's a single visible line
|
||||
/// in the UI. That single line needs to be accounted for.
|
||||
///
|
||||
/// In case of [`SourceValue::StringArray`], newlines are part of the strings.
|
||||
/// So, to get the actual count of lines, we need to check for any trailing
|
||||
/// newline for the last line.
|
||||
///
|
||||
/// For example, consider the following cell:
|
||||
/// ```python
|
||||
/// [
|
||||
/// "import os\n",
|
||||
/// "import sys\n",
|
||||
/// ]
|
||||
/// ```
|
||||
///
|
||||
/// Here, the array suggests that there are two lines, but the actual number
|
||||
/// of lines visible in the UI is three. The same goes for [`SourceValue::String`]
|
||||
/// where we need to check for the trailing newline.
|
||||
///
|
||||
/// The index building is expensive as it needs to go through the content of
|
||||
/// every valid code cell.
|
||||
fn build_index(&self) -> JupyterIndex {
|
||||
let mut row_to_cell = vec![0];
|
||||
let mut row_to_row_in_cell = vec![0];
|
||||
|
||||
for &idx in &self.valid_code_cells {
|
||||
let line_count = match &self.raw.cells[idx as usize].source() {
|
||||
SourceValue::String(string) => {
|
||||
if string.is_empty() {
|
||||
1
|
||||
} else {
|
||||
u32::try_from(NewlineWithTrailingNewline::from(string).count()).unwrap()
|
||||
}
|
||||
}
|
||||
SourceValue::StringArray(string_array) => {
|
||||
if string_array.is_empty() {
|
||||
1
|
||||
} else {
|
||||
let trailing_newline =
|
||||
usize::from(string_array.last().is_some_and(|s| s.ends_with('\n')));
|
||||
u32::try_from(string_array.len() + trailing_newline).unwrap()
|
||||
}
|
||||
}
|
||||
};
|
||||
row_to_cell.extend(iter::repeat(idx + 1).take(line_count as usize));
|
||||
row_to_row_in_cell.extend(1..=line_count);
|
||||
}
|
||||
|
||||
JupyterIndex {
|
||||
row_to_cell,
|
||||
row_to_row_in_cell,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the notebook content.
|
||||
///
|
||||
/// This is the concatenation of all Python code cells.
|
||||
pub fn source_code(&self) -> &str {
|
||||
&self.source_code
|
||||
}
|
||||
|
||||
/// Return the Jupyter notebook index.
|
||||
///
|
||||
/// The index is built only once when required. This is only used to
|
||||
/// report diagnostics, so by that time all of the autofixes must have
|
||||
/// been applied if `--fix` was passed.
|
||||
pub(crate) fn index(&self) -> &JupyterIndex {
|
||||
self.index.get_or_init(|| self.build_index())
|
||||
}
|
||||
|
||||
/// Return the cell offsets for the concatenated source code corresponding
|
||||
/// the Jupyter notebook.
|
||||
pub(crate) fn cell_offsets(&self) -> &[TextSize] {
|
||||
&self.cell_offsets
|
||||
}
|
||||
|
||||
/// Update the notebook with the given sourcemap and transformed content.
|
||||
pub(crate) fn update(&mut self, source_map: &SourceMap, transformed: String) {
|
||||
// Cell offsets must be updated before updating the cell content as
|
||||
// it depends on the offsets to extract the cell content.
|
||||
self.index.take();
|
||||
self.update_cell_offsets(source_map);
|
||||
self.update_cell_content(&transformed);
|
||||
self.source_code = transformed;
|
||||
}
|
||||
|
||||
/// Return a slice of [`Cell`] in the Jupyter notebook.
|
||||
pub fn cells(&self) -> &[Cell] {
|
||||
&self.raw.cells
|
||||
}
|
||||
|
||||
/// Return `true` if the notebook is a Python notebook, `false` otherwise.
|
||||
pub fn is_python_notebook(&self) -> bool {
|
||||
self.raw
|
||||
.metadata
|
||||
.language_info
|
||||
.as_ref()
|
||||
.map_or(true, |language| language.name == "python")
|
||||
}
|
||||
|
||||
fn write_inner(&self, writer: &mut impl Write) -> anyhow::Result<()> {
|
||||
// https://github.com/psf/black/blob/69ca0a4c7a365c5f5eea519a90980bab72cab764/src/black/__init__.py#LL1041
|
||||
let formatter = serde_json::ser::PrettyFormatter::with_indent(b" ");
|
||||
let mut serializer = serde_json::Serializer::with_formatter(writer, formatter);
|
||||
SortAlphabetically(&self.raw).serialize(&mut serializer)?;
|
||||
if self.trailing_newline {
|
||||
writeln!(serializer.into_inner())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write back with an indent of 1, just like black
|
||||
pub fn write(&self, path: &Path) -> anyhow::Result<()> {
|
||||
let mut writer = BufWriter::new(File::create(path)?);
|
||||
self.write_inner(&mut writer)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use test_case::test_case;
|
||||
|
||||
use crate::jupyter::index::JupyterIndex;
|
||||
use crate::jupyter::schema::Cell;
|
||||
use crate::jupyter::Notebook;
|
||||
use crate::registry::Rule;
|
||||
use crate::test::{
|
||||
read_jupyter_notebook, test_notebook_path, test_resource_path, TestedNotebook,
|
||||
};
|
||||
use crate::{assert_messages, settings};
|
||||
|
||||
/// Read a Jupyter cell from the `resources/test/fixtures/jupyter/cell` directory.
|
||||
fn read_jupyter_cell(path: impl AsRef<Path>) -> Result<Cell> {
|
||||
let path = test_resource_path("fixtures/jupyter/cell").join(path);
|
||||
let source_code = std::fs::read_to_string(path)?;
|
||||
Ok(serde_json::from_str(&source_code)?)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid() {
|
||||
assert!(read_jupyter_notebook(Path::new("valid.ipynb")).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_r() {
|
||||
// We can load this, it will be filtered out later
|
||||
assert!(read_jupyter_notebook(Path::new("R.ipynb")).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid() {
|
||||
let path = Path::new("resources/test/fixtures/jupyter/invalid_extension.ipynb");
|
||||
assert_eq!(
|
||||
Notebook::from_path(path).unwrap_err().kind.body,
|
||||
"SyntaxError: Expected a Jupyter Notebook (.ipynb), \
|
||||
which must be internally stored as JSON, \
|
||||
but found a Python source file: \
|
||||
expected value at line 1 column 1"
|
||||
);
|
||||
let path = Path::new("resources/test/fixtures/jupyter/not_json.ipynb");
|
||||
assert_eq!(
|
||||
Notebook::from_path(path).unwrap_err().kind.body,
|
||||
"SyntaxError: A Jupyter Notebook (.ipynb) must internally be JSON, \
|
||||
but this file isn't valid JSON: \
|
||||
expected value at line 1 column 1"
|
||||
);
|
||||
let path = Path::new("resources/test/fixtures/jupyter/wrong_schema.ipynb");
|
||||
assert_eq!(
|
||||
Notebook::from_path(path).unwrap_err().kind.body,
|
||||
"SyntaxError: This file does not match the schema expected of Jupyter Notebooks: \
|
||||
missing field `cells` at line 1 column 2"
|
||||
);
|
||||
}
|
||||
|
||||
#[test_case(Path::new("markdown.json"), false; "markdown")]
|
||||
#[test_case(Path::new("only_magic.json"), true; "only_magic")]
|
||||
#[test_case(Path::new("code_and_magic.json"), true; "code_and_magic")]
|
||||
#[test_case(Path::new("only_code.json"), true; "only_code")]
|
||||
#[test_case(Path::new("cell_magic.json"), false; "cell_magic")]
|
||||
fn test_is_valid_code_cell(path: &Path, expected: bool) -> Result<()> {
|
||||
assert_eq!(read_jupyter_cell(path)?.is_valid_code_cell(), expected);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concat_notebook() -> Result<()> {
|
||||
let notebook = read_jupyter_notebook(Path::new("valid.ipynb"))?;
|
||||
assert_eq!(
|
||||
notebook.source_code,
|
||||
r#"def unused_variable():
|
||||
x = 1
|
||||
y = 2
|
||||
print(f"cell one: {y}")
|
||||
|
||||
unused_variable()
|
||||
def mutable_argument(z=set()):
|
||||
print(f"cell two: {z}")
|
||||
|
||||
mutable_argument()
|
||||
|
||||
|
||||
|
||||
|
||||
print("after empty cells")
|
||||
"#
|
||||
);
|
||||
assert_eq!(
|
||||
notebook.index(),
|
||||
&JupyterIndex {
|
||||
row_to_cell: vec![0, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 5, 7, 7, 8],
|
||||
row_to_row_in_cell: vec![0, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 1, 1, 2, 1],
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
notebook.cell_offsets(),
|
||||
&[
|
||||
0.into(),
|
||||
90.into(),
|
||||
168.into(),
|
||||
169.into(),
|
||||
171.into(),
|
||||
198.into()
|
||||
]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_import_sorting() -> Result<()> {
|
||||
let path = "isort.ipynb".to_string();
|
||||
let TestedNotebook {
|
||||
messages,
|
||||
source_notebook,
|
||||
..
|
||||
} = test_notebook_path(
|
||||
&path,
|
||||
Path::new("isort_expected.ipynb"),
|
||||
&settings::Settings::for_rule(Rule::UnsortedImports),
|
||||
)?;
|
||||
assert_messages!(messages, path, source_notebook);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ipy_escape_command() -> Result<()> {
|
||||
let path = "ipy_escape_command.ipynb".to_string();
|
||||
let TestedNotebook {
|
||||
messages,
|
||||
source_notebook,
|
||||
..
|
||||
} = test_notebook_path(
|
||||
&path,
|
||||
Path::new("ipy_escape_command_expected.ipynb"),
|
||||
&settings::Settings::for_rule(Rule::UnusedImport),
|
||||
)?;
|
||||
assert_messages!(messages, path, source_notebook);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unused_variable() -> Result<()> {
|
||||
let path = "unused_variable.ipynb".to_string();
|
||||
let TestedNotebook {
|
||||
messages,
|
||||
source_notebook,
|
||||
..
|
||||
} = test_notebook_path(
|
||||
&path,
|
||||
Path::new("unused_variable_expected.ipynb"),
|
||||
&settings::Settings::for_rule(Rule::UnusedVariable),
|
||||
)?;
|
||||
assert_messages!(messages, path, source_notebook);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json_consistency() -> Result<()> {
|
||||
let path = "before_fix.ipynb".to_string();
|
||||
let TestedNotebook {
|
||||
linted_notebook: fixed_notebook,
|
||||
..
|
||||
} = test_notebook_path(
|
||||
path,
|
||||
Path::new("after_fix.ipynb"),
|
||||
&settings::Settings::for_rule(Rule::UnusedImport),
|
||||
)?;
|
||||
let mut writer = Vec::new();
|
||||
fixed_notebook.write_inner(&mut writer)?;
|
||||
let actual = String::from_utf8(writer)?;
|
||||
let expected =
|
||||
std::fs::read_to_string(test_resource_path("fixtures/jupyter/after_fix.ipynb"))?;
|
||||
assert_eq!(actual, expected);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test_case(Path::new("before_fix.ipynb"), true; "trailing_newline")]
|
||||
#[test_case(Path::new("no_trailing_newline.ipynb"), false; "no_trailing_newline")]
|
||||
fn test_trailing_newline(path: &Path, trailing_newline: bool) -> Result<()> {
|
||||
let notebook = read_jupyter_notebook(path)?;
|
||||
assert_eq!(notebook.trailing_newline, trailing_newline);
|
||||
|
||||
let mut writer = Vec::new();
|
||||
notebook.write_inner(&mut writer)?;
|
||||
let string = String::from_utf8(writer)?;
|
||||
assert_eq!(string.ends_with('\n'), trailing_newline);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/jupyter/notebook.rs
|
||||
---
|
||||
isort.ipynb:cell 1:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
|
|
||||
1 | / from pathlib import Path
|
||||
2 | | import random
|
||||
3 | | import math
|
||||
4 | | from typing import Any
|
||||
| |_^ I001
|
||||
5 | import collections
|
||||
6 | # Newline should be added here
|
||||
|
|
||||
= help: Organize imports
|
||||
|
||||
ℹ Fix
|
||||
1 |+import math
|
||||
2 |+import random
|
||||
1 3 | from pathlib import Path
|
||||
2 |-import random
|
||||
3 |-import math
|
||||
4 4 | from typing import Any
|
||||
5 5 | import collections
|
||||
6 6 | # Newline should be added here
|
||||
|
||||
isort.ipynb:cell 2:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
|
|
||||
1 | / from typing import Any
|
||||
2 | | import collections
|
||||
3 | | # Newline should be added here
|
||||
| |_^ I001
|
||||
4 | def foo():
|
||||
5 | pass
|
||||
|
|
||||
= help: Organize imports
|
||||
|
||||
ℹ Fix
|
||||
1 1 | from pathlib import Path
|
||||
2 2 | import random
|
||||
3 3 | import math
|
||||
4 |+import collections
|
||||
4 5 | from typing import Any
|
||||
5 |-import collections
|
||||
6 |+
|
||||
7 |+
|
||||
6 8 | # Newline should be added here
|
||||
7 9 | def foo():
|
||||
8 10 | pass
|
||||
|
||||
isort.ipynb:cell 3:1:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
|
|
||||
1 | / from pathlib import Path
|
||||
2 | | import sys
|
||||
3 | |
|
||||
4 | | %matplotlib \
|
||||
| |_^ I001
|
||||
5 | --inline
|
||||
|
|
||||
= help: Organize imports
|
||||
|
||||
ℹ Fix
|
||||
6 6 | # Newline should be added here
|
||||
7 7 | def foo():
|
||||
8 8 | pass
|
||||
9 |+import sys
|
||||
9 10 | from pathlib import Path
|
||||
10 |-import sys
|
||||
11 11 |
|
||||
12 12 | %matplotlib \
|
||||
13 13 | --inline
|
||||
|
||||
isort.ipynb:cell 3:7:1: I001 [*] Import block is un-sorted or un-formatted
|
||||
|
|
||||
5 | --inline
|
||||
6 |
|
||||
7 | / import math
|
||||
8 | | import abc
|
||||
|
|
||||
= help: Organize imports
|
||||
|
||||
ℹ Fix
|
||||
12 12 | %matplotlib \
|
||||
13 13 | --inline
|
||||
14 14 |
|
||||
15 |+import abc
|
||||
15 16 | import math
|
||||
16 |-import abc
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/jupyter/notebook.rs
|
||||
---
|
||||
ipy_escape_command.ipynb:cell 1:5:8: F401 [*] `os` imported but unused
|
||||
|
|
||||
3 | %matplotlib inline
|
||||
4 |
|
||||
5 | import os
|
||||
| ^^ F401
|
||||
6 |
|
||||
7 | _ = math.pi
|
||||
|
|
||||
= help: Remove unused import: `os`
|
||||
|
||||
ℹ Fix
|
||||
2 2 |
|
||||
3 3 | %matplotlib inline
|
||||
4 4 |
|
||||
5 |-import os
|
||||
6 5 |
|
||||
7 6 | _ = math.pi
|
||||
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/jupyter/notebook.rs
|
||||
---
|
||||
unused_variable.ipynb:cell 1:2:5: F841 [*] Local variable `foo1` is assigned to but never used
|
||||
|
|
||||
1 | def f():
|
||||
2 | foo1 = %matplotlib --list
|
||||
| ^^^^ F841
|
||||
3 | foo2: list[str] = %matplotlib --list
|
||||
|
|
||||
= help: Remove assignment to unused variable `foo1`
|
||||
|
||||
ℹ Suggested fix
|
||||
1 1 | def f():
|
||||
2 |- foo1 = %matplotlib --list
|
||||
2 |+ %matplotlib --list
|
||||
3 3 | foo2: list[str] = %matplotlib --list
|
||||
4 4 | def f():
|
||||
5 5 | bar1 = !pwd
|
||||
|
||||
unused_variable.ipynb:cell 1:3:5: F841 [*] Local variable `foo2` is assigned to but never used
|
||||
|
|
||||
1 | def f():
|
||||
2 | foo1 = %matplotlib --list
|
||||
3 | foo2: list[str] = %matplotlib --list
|
||||
| ^^^^ F841
|
||||
|
|
||||
= help: Remove assignment to unused variable `foo2`
|
||||
|
||||
ℹ Suggested fix
|
||||
1 1 | def f():
|
||||
2 2 | foo1 = %matplotlib --list
|
||||
3 |- foo2: list[str] = %matplotlib --list
|
||||
3 |+ %matplotlib --list
|
||||
4 4 | def f():
|
||||
5 5 | bar1 = !pwd
|
||||
6 6 | bar2: str = !pwd
|
||||
|
||||
unused_variable.ipynb:cell 2:2:5: F841 [*] Local variable `bar1` is assigned to but never used
|
||||
|
|
||||
1 | def f():
|
||||
2 | bar1 = !pwd
|
||||
| ^^^^ F841
|
||||
3 | bar2: str = !pwd
|
||||
|
|
||||
= help: Remove assignment to unused variable `bar1`
|
||||
|
||||
ℹ Suggested fix
|
||||
2 2 | foo1 = %matplotlib --list
|
||||
3 3 | foo2: list[str] = %matplotlib --list
|
||||
4 4 | def f():
|
||||
5 |- bar1 = !pwd
|
||||
5 |+ !pwd
|
||||
6 6 | bar2: str = !pwd
|
||||
|
||||
unused_variable.ipynb:cell 2:3:5: F841 [*] Local variable `bar2` is assigned to but never used
|
||||
|
|
||||
1 | def f():
|
||||
2 | bar1 = !pwd
|
||||
3 | bar2: str = !pwd
|
||||
| ^^^^ F841
|
||||
|
|
||||
= help: Remove assignment to unused variable `bar2`
|
||||
|
||||
ℹ Suggested fix
|
||||
3 3 | foo2: list[str] = %matplotlib --list
|
||||
4 4 | def f():
|
||||
5 5 | bar1 = !pwd
|
||||
6 |- bar2: str = !pwd
|
||||
6 |+ !pwd
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
//! This is the library for the [Ruff] Python linter.
|
||||
//!
|
||||
//! **The API is currently completely unstable**
|
||||
//! and subject to change drastically.
|
||||
//!
|
||||
//! [Ruff]: https://github.com/astral-sh/ruff
|
||||
|
||||
pub use rule_selector::RuleSelector;
|
||||
pub use rules::pycodestyle::rules::IOError;
|
||||
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
mod autofix;
|
||||
mod checkers;
|
||||
mod codes;
|
||||
mod comments;
|
||||
mod cst;
|
||||
pub mod directives;
|
||||
mod doc_lines;
|
||||
mod docstrings;
|
||||
pub mod flake8_to_ruff;
|
||||
pub mod fs;
|
||||
mod importer;
|
||||
pub mod jupyter;
|
||||
mod lex;
|
||||
pub mod line_width;
|
||||
pub mod linter;
|
||||
pub mod logging;
|
||||
pub mod message;
|
||||
mod noqa;
|
||||
pub mod packaging;
|
||||
pub mod pyproject_toml;
|
||||
pub mod registry;
|
||||
mod renamer;
|
||||
pub mod resolver;
|
||||
mod rule_redirects;
|
||||
mod rule_selector;
|
||||
pub mod rules;
|
||||
pub mod settings;
|
||||
pub mod source_kind;
|
||||
pub mod upstream_categories;
|
||||
|
||||
#[cfg(any(test, fuzzing))]
|
||||
pub mod test;
|
||||
@@ -1,166 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::num::NonZeroU8;
|
||||
use unicode_width::UnicodeWidthChar;
|
||||
|
||||
use ruff_macros::CacheKey;
|
||||
|
||||
/// The length of a line of text that is considered too long.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, CacheKey)]
|
||||
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
|
||||
pub struct LineLength(usize);
|
||||
|
||||
impl Default for LineLength {
|
||||
/// The default line length.
|
||||
fn default() -> Self {
|
||||
Self(88)
|
||||
}
|
||||
}
|
||||
|
||||
impl LineLength {
|
||||
pub const fn get(&self) -> usize {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<usize> for LineLength {
|
||||
fn from(value: usize) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// A measure of the width of a line of text.
|
||||
///
|
||||
/// This is used to determine if a line is too long.
|
||||
/// It should be compared to a [`LineLength`].
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct LineWidth {
|
||||
/// The width of the line.
|
||||
width: usize,
|
||||
/// The column of the line.
|
||||
/// This is used to calculate the width of tabs.
|
||||
column: usize,
|
||||
/// The tab size to use when calculating the width of tabs.
|
||||
tab_size: TabSize,
|
||||
}
|
||||
|
||||
impl Default for LineWidth {
|
||||
fn default() -> Self {
|
||||
Self::new(TabSize::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for LineWidth {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.width == other.width
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for LineWidth {}
|
||||
|
||||
impl PartialOrd for LineWidth {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for LineWidth {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.width.cmp(&other.width)
|
||||
}
|
||||
}
|
||||
|
||||
impl LineWidth {
|
||||
pub fn get(&self) -> usize {
|
||||
self.width
|
||||
}
|
||||
|
||||
/// Creates a new `LineWidth` with the given tab size.
|
||||
pub fn new(tab_size: TabSize) -> Self {
|
||||
LineWidth {
|
||||
width: 0,
|
||||
column: 0,
|
||||
tab_size,
|
||||
}
|
||||
}
|
||||
|
||||
fn update(mut self, chars: impl Iterator<Item = char>) -> Self {
|
||||
let tab_size: usize = self.tab_size.as_usize();
|
||||
for c in chars {
|
||||
match c {
|
||||
'\t' => {
|
||||
let tab_offset = tab_size - (self.column % tab_size);
|
||||
self.width += tab_offset;
|
||||
self.column += tab_offset;
|
||||
}
|
||||
'\n' | '\r' => {
|
||||
self.width = 0;
|
||||
self.column = 0;
|
||||
}
|
||||
_ => {
|
||||
self.width += c.width().unwrap_or(0);
|
||||
self.column += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds the given text to the line width.
|
||||
#[must_use]
|
||||
pub fn add_str(self, text: &str) -> Self {
|
||||
self.update(text.chars())
|
||||
}
|
||||
|
||||
/// Adds the given character to the line width.
|
||||
#[must_use]
|
||||
pub fn add_char(self, c: char) -> Self {
|
||||
self.update(std::iter::once(c))
|
||||
}
|
||||
|
||||
/// Adds the given width to the line width.
|
||||
/// Also adds the given width to the column.
|
||||
/// It is generally better to use [`LineWidth::add_str`] or [`LineWidth::add_char`].
|
||||
/// The width and column should be the same for the corresponding text.
|
||||
/// Currently, this is only used to add spaces.
|
||||
#[must_use]
|
||||
pub fn add_width(mut self, width: usize) -> Self {
|
||||
self.width += width;
|
||||
self.column += width;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<LineLength> for LineWidth {
|
||||
fn eq(&self, other: &LineLength) -> bool {
|
||||
self.width == other.0
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd<LineLength> for LineWidth {
|
||||
fn partial_cmp(&self, other: &LineLength) -> Option<std::cmp::Ordering> {
|
||||
self.width.partial_cmp(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// The size of a tab.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
|
||||
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
|
||||
pub struct TabSize(NonZeroU8);
|
||||
|
||||
impl TabSize {
|
||||
pub(crate) fn as_usize(self) -> usize {
|
||||
self.0.get() as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TabSize {
|
||||
fn default() -> Self {
|
||||
Self(NonZeroU8::new(4).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NonZeroU8> for TabSize {
|
||||
fn from(tab_size: NonZeroU8) -> Self {
|
||||
Self(tab_size)
|
||||
}
|
||||
}
|
||||
@@ -1,607 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::ops::Deref;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use colored::Colorize;
|
||||
use itertools::Itertools;
|
||||
use log::error;
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::{AsMode, ParseError};
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_ast::imports::ImportMap;
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_index::Indexer;
|
||||
|
||||
use ruff_source_file::{Locator, SourceFileBuilder};
|
||||
|
||||
use crate::autofix::{fix_file, FixResult};
|
||||
use crate::checkers::ast::check_ast;
|
||||
use crate::checkers::filesystem::check_file_path;
|
||||
use crate::checkers::imports::check_imports;
|
||||
use crate::checkers::noqa::check_noqa;
|
||||
use crate::checkers::physical_lines::check_physical_lines;
|
||||
use crate::checkers::tokens::check_tokens;
|
||||
use crate::directives::Directives;
|
||||
use crate::doc_lines::{doc_lines_from_ast, doc_lines_from_tokens};
|
||||
use crate::logging::DisplayParseError;
|
||||
use crate::message::Message;
|
||||
use crate::noqa::add_noqa;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
use crate::rules::pycodestyle;
|
||||
use crate::settings::{flags, Settings};
|
||||
use crate::source_kind::SourceKind;
|
||||
use crate::{directives, fs};
|
||||
|
||||
const CARGO_PKG_NAME: &str = env!("CARGO_PKG_NAME");
|
||||
const CARGO_PKG_REPOSITORY: &str = env!("CARGO_PKG_REPOSITORY");
|
||||
|
||||
/// A [`Result`]-like type that returns both data and an error. Used to return
|
||||
/// diagnostics even in the face of parse errors, since many diagnostics can be
|
||||
/// generated without a full AST.
|
||||
pub struct LinterResult<T> {
|
||||
pub data: T,
|
||||
pub error: Option<ParseError>,
|
||||
}
|
||||
|
||||
impl<T> LinterResult<T> {
|
||||
const fn new(data: T, error: Option<ParseError>) -> Self {
|
||||
Self { data, error }
|
||||
}
|
||||
|
||||
fn map<U, F: FnOnce(T) -> U>(self, f: F) -> LinterResult<U> {
|
||||
LinterResult::new(f(self.data), self.error)
|
||||
}
|
||||
}
|
||||
|
||||
pub type FixTable = FxHashMap<Rule, usize>;
|
||||
|
||||
pub struct FixerResult<'a> {
|
||||
/// The result returned by the linter, after applying any fixes.
|
||||
pub result: LinterResult<(Vec<Message>, Option<ImportMap>)>,
|
||||
/// The resulting source code, after applying any fixes.
|
||||
pub transformed: Cow<'a, SourceKind>,
|
||||
/// The number of fixes applied for each [`Rule`].
|
||||
pub fixed: FixTable,
|
||||
}
|
||||
|
||||
/// Generate `Diagnostic`s from the source code contents at the
|
||||
/// given `Path`.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn check_path(
|
||||
path: &Path,
|
||||
package: Option<&Path>,
|
||||
tokens: Vec<LexResult>,
|
||||
locator: &Locator,
|
||||
stylist: &Stylist,
|
||||
indexer: &Indexer,
|
||||
directives: &Directives,
|
||||
settings: &Settings,
|
||||
noqa: flags::Noqa,
|
||||
source_kind: Option<&SourceKind>,
|
||||
source_type: PySourceType,
|
||||
) -> LinterResult<(Vec<Diagnostic>, Option<ImportMap>)> {
|
||||
// Aggregate all diagnostics.
|
||||
let mut diagnostics = vec![];
|
||||
let mut imports = None;
|
||||
let mut error = None;
|
||||
|
||||
// Collect doc lines. This requires a rare mix of tokens (for comments) and AST
|
||||
// (for docstrings), which demands special-casing at this level.
|
||||
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
|
||||
let mut doc_lines = vec![];
|
||||
if use_doc_lines {
|
||||
doc_lines.extend(doc_lines_from_tokens(&tokens));
|
||||
}
|
||||
|
||||
// Run the token-based rules.
|
||||
if settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_tokens())
|
||||
{
|
||||
diagnostics.extend(check_tokens(
|
||||
&tokens,
|
||||
path,
|
||||
locator,
|
||||
indexer,
|
||||
settings,
|
||||
source_type.is_stub(),
|
||||
));
|
||||
}
|
||||
|
||||
// Run the filesystem-based rules.
|
||||
if settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_filesystem())
|
||||
{
|
||||
diagnostics.extend(check_file_path(path, package, settings));
|
||||
}
|
||||
|
||||
// Run the logical line-based rules.
|
||||
if settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_logical_lines())
|
||||
{
|
||||
diagnostics.extend(crate::checkers::logical_lines::check_logical_lines(
|
||||
&tokens, locator, stylist, settings,
|
||||
));
|
||||
}
|
||||
|
||||
// Run the AST-based rules.
|
||||
let use_ast = settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_ast());
|
||||
let use_imports = !directives.isort.skip_file
|
||||
&& settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_imports());
|
||||
if use_ast || use_imports || use_doc_lines {
|
||||
match ruff_python_parser::parse_program_tokens(
|
||||
tokens,
|
||||
&path.to_string_lossy(),
|
||||
source_type.is_jupyter(),
|
||||
) {
|
||||
Ok(python_ast) => {
|
||||
if use_ast {
|
||||
diagnostics.extend(check_ast(
|
||||
&python_ast,
|
||||
locator,
|
||||
stylist,
|
||||
indexer,
|
||||
&directives.noqa_line_for,
|
||||
settings,
|
||||
noqa,
|
||||
path,
|
||||
package,
|
||||
source_type,
|
||||
));
|
||||
}
|
||||
if use_imports {
|
||||
let (import_diagnostics, module_imports) = check_imports(
|
||||
&python_ast,
|
||||
locator,
|
||||
indexer,
|
||||
&directives.isort,
|
||||
settings,
|
||||
stylist,
|
||||
path,
|
||||
package,
|
||||
source_kind,
|
||||
source_type,
|
||||
);
|
||||
imports = module_imports;
|
||||
diagnostics.extend(import_diagnostics);
|
||||
}
|
||||
if use_doc_lines {
|
||||
doc_lines.extend(doc_lines_from_ast(&python_ast, locator));
|
||||
}
|
||||
}
|
||||
Err(parse_error) => {
|
||||
// Always add a diagnostic for the syntax error, regardless of whether
|
||||
// `Rule::SyntaxError` is enabled. We avoid propagating the syntax error
|
||||
// if it's disabled via any of the usual mechanisms (e.g., `noqa`,
|
||||
// `per-file-ignores`), and the easiest way to detect that suppression is
|
||||
// to see if the diagnostic persists to the end of the function.
|
||||
pycodestyle::rules::syntax_error(&mut diagnostics, &parse_error, locator);
|
||||
error = Some(parse_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate and reorder any doc lines.
|
||||
if use_doc_lines {
|
||||
doc_lines.sort_unstable();
|
||||
doc_lines.dedup();
|
||||
}
|
||||
|
||||
// Run the lines-based rules.
|
||||
if settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_physical_lines())
|
||||
{
|
||||
diagnostics.extend(check_physical_lines(
|
||||
locator, stylist, indexer, &doc_lines, settings,
|
||||
));
|
||||
}
|
||||
|
||||
// Ignore diagnostics based on per-file-ignores.
|
||||
if !diagnostics.is_empty() && !settings.per_file_ignores.is_empty() {
|
||||
let ignores = fs::ignores_from_path(path, &settings.per_file_ignores);
|
||||
if !ignores.is_empty() {
|
||||
diagnostics.retain(|diagnostic| !ignores.contains(diagnostic.kind.rule()));
|
||||
}
|
||||
};
|
||||
|
||||
// Enforce `noqa` directives.
|
||||
if (noqa.into() && !diagnostics.is_empty())
|
||||
|| settings
|
||||
.rules
|
||||
.iter_enabled()
|
||||
.any(|rule_code| rule_code.lint_source().is_noqa())
|
||||
{
|
||||
let ignored = check_noqa(
|
||||
&mut diagnostics,
|
||||
path,
|
||||
locator,
|
||||
indexer.comment_ranges(),
|
||||
&directives.noqa_line_for,
|
||||
error.is_none(),
|
||||
settings,
|
||||
);
|
||||
if noqa.into() {
|
||||
for index in ignored.iter().rev() {
|
||||
diagnostics.swap_remove(*index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there was a syntax error, check if it should be discarded.
|
||||
if error.is_some() {
|
||||
// If the syntax error was removed by _any_ of the above disablement methods (e.g., a
|
||||
// `noqa` directive, or a `per-file-ignore`), discard it.
|
||||
if !diagnostics
|
||||
.iter()
|
||||
.any(|diagnostic| diagnostic.kind.rule() == Rule::SyntaxError)
|
||||
{
|
||||
error = None;
|
||||
}
|
||||
|
||||
// If the syntax error _diagnostic_ is disabled, discard the _diagnostic_.
|
||||
if !settings.rules.enabled(Rule::SyntaxError) {
|
||||
diagnostics.retain(|diagnostic| diagnostic.kind.rule() != Rule::SyntaxError);
|
||||
}
|
||||
}
|
||||
|
||||
LinterResult::new((diagnostics, imports), error)
|
||||
}
|
||||
|
||||
const MAX_ITERATIONS: usize = 100;
|
||||
|
||||
/// Add any missing `# noqa` pragmas to the source code at the given `Path`.
|
||||
pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings) -> Result<usize> {
|
||||
let source_type = PySourceType::from(path);
|
||||
|
||||
// Read the file from disk.
|
||||
let contents = std::fs::read_to_string(path)?;
|
||||
|
||||
// Tokenize once.
|
||||
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(&contents, source_type.as_mode());
|
||||
|
||||
// Map row and column locations to byte slices (lazily).
|
||||
let locator = Locator::new(&contents);
|
||||
|
||||
// Detect the current code style (lazily).
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
|
||||
// Extra indices from the code.
|
||||
let indexer = Indexer::from_tokens(&tokens, &locator);
|
||||
|
||||
// Extract the `# noqa` and `# isort: skip` directives from the source.
|
||||
let directives = directives::extract_directives(
|
||||
&tokens,
|
||||
directives::Flags::from_settings(settings),
|
||||
&locator,
|
||||
&indexer,
|
||||
);
|
||||
|
||||
// Generate diagnostics, ignoring any existing `noqa` directives.
|
||||
let LinterResult {
|
||||
data: diagnostics,
|
||||
error,
|
||||
} = check_path(
|
||||
path,
|
||||
package,
|
||||
tokens,
|
||||
&locator,
|
||||
&stylist,
|
||||
&indexer,
|
||||
&directives,
|
||||
settings,
|
||||
flags::Noqa::Disabled,
|
||||
None,
|
||||
source_type,
|
||||
);
|
||||
|
||||
// Log any parse errors.
|
||||
if let Some(err) = error {
|
||||
// TODO(dhruvmanila): This should use `SourceKind`, update when
|
||||
// `--add-noqa` is supported for Jupyter notebooks.
|
||||
error!(
|
||||
"{}",
|
||||
DisplayParseError::new(err, locator.to_source_code(), None)
|
||||
);
|
||||
}
|
||||
|
||||
// Add any missing `# noqa` pragmas.
|
||||
add_noqa(
|
||||
path,
|
||||
&diagnostics.0,
|
||||
&locator,
|
||||
indexer.comment_ranges(),
|
||||
&directives.noqa_line_for,
|
||||
stylist.line_ending(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generate a [`Message`] for each [`Diagnostic`] triggered by the given source
|
||||
/// code.
|
||||
pub fn lint_only(
|
||||
path: &Path,
|
||||
package: Option<&Path>,
|
||||
settings: &Settings,
|
||||
noqa: flags::Noqa,
|
||||
source_kind: &SourceKind,
|
||||
source_type: PySourceType,
|
||||
) -> LinterResult<(Vec<Message>, Option<ImportMap>)> {
|
||||
// Tokenize once.
|
||||
let tokens: Vec<LexResult> =
|
||||
ruff_python_parser::tokenize(source_kind.source_code(), source_type.as_mode());
|
||||
|
||||
// Map row and column locations to byte slices (lazily).
|
||||
let locator = Locator::new(source_kind.source_code());
|
||||
|
||||
// Detect the current code style (lazily).
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
|
||||
// Extra indices from the code.
|
||||
let indexer = Indexer::from_tokens(&tokens, &locator);
|
||||
|
||||
// Extract the `# noqa` and `# isort: skip` directives from the source.
|
||||
let directives = directives::extract_directives(
|
||||
&tokens,
|
||||
directives::Flags::from_settings(settings),
|
||||
&locator,
|
||||
&indexer,
|
||||
);
|
||||
|
||||
// Generate diagnostics.
|
||||
let result = check_path(
|
||||
path,
|
||||
package,
|
||||
tokens,
|
||||
&locator,
|
||||
&stylist,
|
||||
&indexer,
|
||||
&directives,
|
||||
settings,
|
||||
noqa,
|
||||
Some(source_kind),
|
||||
source_type,
|
||||
);
|
||||
|
||||
result.map(|(diagnostics, imports)| {
|
||||
(
|
||||
diagnostics_to_messages(diagnostics, path, &locator, &directives),
|
||||
imports,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert from diagnostics to messages.
|
||||
fn diagnostics_to_messages(
|
||||
diagnostics: Vec<Diagnostic>,
|
||||
path: &Path,
|
||||
locator: &Locator,
|
||||
directives: &Directives,
|
||||
) -> Vec<Message> {
|
||||
let file = once_cell::unsync::Lazy::new(|| {
|
||||
let mut builder =
|
||||
SourceFileBuilder::new(path.to_string_lossy().as_ref(), locator.contents());
|
||||
|
||||
if let Some(line_index) = locator.line_index() {
|
||||
builder.set_line_index(line_index.clone());
|
||||
}
|
||||
|
||||
builder.finish()
|
||||
});
|
||||
|
||||
diagnostics
|
||||
.into_iter()
|
||||
.map(|diagnostic| {
|
||||
let noqa_offset = directives.noqa_line_for.resolve(diagnostic.start());
|
||||
Message::from_diagnostic(diagnostic, file.deref().clone(), noqa_offset)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Generate `Diagnostic`s from source code content, iteratively autofixing
|
||||
/// until stable.
|
||||
pub fn lint_fix<'a>(
|
||||
path: &Path,
|
||||
package: Option<&Path>,
|
||||
noqa: flags::Noqa,
|
||||
settings: &Settings,
|
||||
source_kind: &'a SourceKind,
|
||||
source_type: PySourceType,
|
||||
) -> Result<FixerResult<'a>> {
|
||||
let mut transformed = Cow::Borrowed(source_kind);
|
||||
|
||||
// Track the number of fixed errors across iterations.
|
||||
let mut fixed = FxHashMap::default();
|
||||
|
||||
// As an escape hatch, bail after 100 iterations.
|
||||
let mut iterations = 0;
|
||||
|
||||
// Track whether the _initial_ source code was parseable.
|
||||
let mut parseable = false;
|
||||
|
||||
// Continuously autofix until the source code stabilizes.
|
||||
loop {
|
||||
// Tokenize once.
|
||||
let tokens: Vec<LexResult> =
|
||||
ruff_python_parser::tokenize(transformed.source_code(), source_type.as_mode());
|
||||
|
||||
// Map row and column locations to byte slices (lazily).
|
||||
let locator = Locator::new(transformed.source_code());
|
||||
|
||||
// Detect the current code style (lazily).
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
|
||||
// Extra indices from the code.
|
||||
let indexer = Indexer::from_tokens(&tokens, &locator);
|
||||
|
||||
// Extract the `# noqa` and `# isort: skip` directives from the source.
|
||||
let directives = directives::extract_directives(
|
||||
&tokens,
|
||||
directives::Flags::from_settings(settings),
|
||||
&locator,
|
||||
&indexer,
|
||||
);
|
||||
|
||||
// Generate diagnostics.
|
||||
let result = check_path(
|
||||
path,
|
||||
package,
|
||||
tokens,
|
||||
&locator,
|
||||
&stylist,
|
||||
&indexer,
|
||||
&directives,
|
||||
settings,
|
||||
noqa,
|
||||
Some(source_kind),
|
||||
source_type,
|
||||
);
|
||||
|
||||
if iterations == 0 {
|
||||
parseable = result.error.is_none();
|
||||
} else {
|
||||
// If the source code was parseable on the first pass, but is no
|
||||
// longer parseable on a subsequent pass, then we've introduced a
|
||||
// syntax error. Return the original code.
|
||||
if parseable && result.error.is_some() {
|
||||
report_autofix_syntax_error(
|
||||
path,
|
||||
transformed.source_code(),
|
||||
&result.error.unwrap(),
|
||||
fixed.keys().copied(),
|
||||
);
|
||||
return Err(anyhow!("Autofix introduced a syntax error"));
|
||||
}
|
||||
}
|
||||
|
||||
// Apply autofix.
|
||||
if let Some(FixResult {
|
||||
code: fixed_contents,
|
||||
fixes: applied,
|
||||
source_map,
|
||||
}) = fix_file(&result.data.0, &locator)
|
||||
{
|
||||
if iterations < MAX_ITERATIONS {
|
||||
// Count the number of fixed errors.
|
||||
for (rule, count) in applied {
|
||||
*fixed.entry(rule).or_default() += count;
|
||||
}
|
||||
|
||||
transformed = Cow::Owned(transformed.updated(fixed_contents, &source_map));
|
||||
|
||||
// Increment the iteration count.
|
||||
iterations += 1;
|
||||
|
||||
// Re-run the linter pass (by avoiding the break).
|
||||
continue;
|
||||
}
|
||||
|
||||
report_failed_to_converge_error(path, transformed.source_code(), &result.data.0);
|
||||
}
|
||||
|
||||
return Ok(FixerResult {
|
||||
result: result.map(|(diagnostics, imports)| {
|
||||
(
|
||||
diagnostics_to_messages(diagnostics, path, &locator, &directives),
|
||||
imports,
|
||||
)
|
||||
}),
|
||||
transformed,
|
||||
fixed,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn collect_rule_codes(rules: impl IntoIterator<Item = Rule>) -> String {
|
||||
rules
|
||||
.into_iter()
|
||||
.map(|rule| rule.noqa_code().to_string())
|
||||
.sorted_unstable()
|
||||
.dedup()
|
||||
.join(", ")
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stderr)]
|
||||
fn report_failed_to_converge_error(path: &Path, transformed: &str, diagnostics: &[Diagnostic]) {
|
||||
let codes = collect_rule_codes(diagnostics.iter().map(|diagnostic| diagnostic.kind.rule()));
|
||||
if cfg!(debug_assertions) {
|
||||
eprintln!(
|
||||
"{}: Failed to converge after {} iterations in `{}` with rule codes {}:---\n{}\n---",
|
||||
"debug error".red().bold(),
|
||||
MAX_ITERATIONS,
|
||||
fs::relativize_path(path),
|
||||
codes,
|
||||
transformed,
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
r#"
|
||||
{}: Failed to converge after {} iterations.
|
||||
|
||||
This indicates a bug in `{}`. If you could open an issue at:
|
||||
|
||||
{}/issues/new?title=%5BInfinite%20loop%5D
|
||||
|
||||
...quoting the contents of `{}`, the rule codes {}, along with the `pyproject.toml` settings and executed command, we'd be very appreciative!
|
||||
"#,
|
||||
"error".red().bold(),
|
||||
MAX_ITERATIONS,
|
||||
CARGO_PKG_NAME,
|
||||
CARGO_PKG_REPOSITORY,
|
||||
fs::relativize_path(path),
|
||||
codes
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::print_stderr)]
|
||||
fn report_autofix_syntax_error(
|
||||
path: &Path,
|
||||
transformed: &str,
|
||||
error: &ParseError,
|
||||
rules: impl IntoIterator<Item = Rule>,
|
||||
) {
|
||||
let codes = collect_rule_codes(rules);
|
||||
if cfg!(debug_assertions) {
|
||||
eprintln!(
|
||||
"{}: Autofix introduced a syntax error in `{}` with rule codes {}: {}\n---\n{}\n---",
|
||||
"error".red().bold(),
|
||||
fs::relativize_path(path),
|
||||
codes,
|
||||
error,
|
||||
transformed,
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
r#"
|
||||
{}: Autofix introduced a syntax error. Reverting all changes.
|
||||
|
||||
This indicates a bug in `{}`. If you could open an issue at:
|
||||
|
||||
{}/issues/new?title=%5BAutofix%20error%5D
|
||||
|
||||
...quoting the contents of `{}`, the rule codes {}, along with the `pyproject.toml` settings and executed command, we'd be very appreciative!
|
||||
"#,
|
||||
"error".red().bold(),
|
||||
CARGO_PKG_NAME,
|
||||
CARGO_PKG_REPOSITORY,
|
||||
fs::relativize_path(path),
|
||||
codes,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
use std::fmt::{Display, Formatter, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use anyhow::Result;
|
||||
use colored::Colorize;
|
||||
use fern;
|
||||
use log::Level;
|
||||
use once_cell::sync::Lazy;
|
||||
use ruff_python_parser::{ParseError, ParseErrorType};
|
||||
|
||||
use ruff_source_file::{OneIndexed, SourceCode, SourceLocation};
|
||||
|
||||
use crate::fs;
|
||||
use crate::jupyter::Notebook;
|
||||
use crate::source_kind::SourceKind;
|
||||
|
||||
pub(crate) static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default);
|
||||
|
||||
/// Warn a user once, with uniqueness determined by the given ID.
|
||||
#[macro_export]
|
||||
macro_rules! warn_user_once_by_id {
|
||||
($id:expr, $($arg:tt)*) => {
|
||||
use colored::Colorize;
|
||||
use log::warn;
|
||||
|
||||
if let Ok(mut states) = $crate::logging::WARNINGS.lock() {
|
||||
if !states.contains(&$id) {
|
||||
let message = format!("{}", format_args!($($arg)*));
|
||||
warn!("{}", message.bold());
|
||||
states.push($id);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Warn a user once, with uniqueness determined by the calling location itself.
|
||||
#[macro_export]
|
||||
macro_rules! warn_user_once {
|
||||
($($arg:tt)*) => {
|
||||
use colored::Colorize;
|
||||
use log::warn;
|
||||
|
||||
static WARNED: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
|
||||
if !WARNED.swap(true, std::sync::atomic::Ordering::SeqCst) {
|
||||
let message = format!("{}", format_args!($($arg)*));
|
||||
warn!("{}", message.bold());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! warn_user {
|
||||
($($arg:tt)*) => {{
|
||||
use colored::Colorize;
|
||||
use log::warn;
|
||||
|
||||
let message = format!("{}", format_args!($($arg)*));
|
||||
warn!("{}", message.bold());
|
||||
}};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! notify_user {
|
||||
($($arg:tt)*) => {
|
||||
println!(
|
||||
"[{}] {}",
|
||||
chrono::Local::now()
|
||||
.format("%H:%M:%S %p")
|
||||
.to_string()
|
||||
.dimmed(),
|
||||
format_args!($($arg)*)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, PartialOrd, Ord, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum LogLevel {
|
||||
/// No output ([`log::LevelFilter::Off`]).
|
||||
Silent,
|
||||
/// Only show lint violations, with no decorative output
|
||||
/// ([`log::LevelFilter::Off`]).
|
||||
Quiet,
|
||||
/// All user-facing output ([`log::LevelFilter::Info`]).
|
||||
#[default]
|
||||
Default,
|
||||
/// All user-facing output ([`log::LevelFilter::Debug`]).
|
||||
Verbose,
|
||||
}
|
||||
|
||||
impl LogLevel {
|
||||
#[allow(clippy::trivially_copy_pass_by_ref)]
|
||||
const fn level_filter(&self) -> log::LevelFilter {
|
||||
match self {
|
||||
LogLevel::Default => log::LevelFilter::Info,
|
||||
LogLevel::Verbose => log::LevelFilter::Debug,
|
||||
LogLevel::Quiet => log::LevelFilter::Off,
|
||||
LogLevel::Silent => log::LevelFilter::Off,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_up_logging(level: &LogLevel) -> Result<()> {
|
||||
fern::Dispatch::new()
|
||||
.format(|out, message, record| match record.level() {
|
||||
Level::Error => {
|
||||
out.finish(format_args!(
|
||||
"{}{} {}",
|
||||
"error".red().bold(),
|
||||
":".bold(),
|
||||
message
|
||||
));
|
||||
}
|
||||
Level::Warn => {
|
||||
out.finish(format_args!(
|
||||
"{}{} {}",
|
||||
"warning".yellow().bold(),
|
||||
":".bold(),
|
||||
message
|
||||
));
|
||||
}
|
||||
Level::Info | Level::Debug | Level::Trace => {
|
||||
out.finish(format_args!(
|
||||
"{}[{}][{}] {}",
|
||||
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
|
||||
record.target(),
|
||||
record.level(),
|
||||
message
|
||||
));
|
||||
}
|
||||
})
|
||||
.level(level.level_filter())
|
||||
.level_for("globset", log::LevelFilter::Warn)
|
||||
.chain(std::io::stderr())
|
||||
.apply()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct DisplayParseError<'a> {
|
||||
error: ParseError,
|
||||
source_code: SourceCode<'a, 'a>,
|
||||
source_kind: Option<&'a SourceKind>,
|
||||
}
|
||||
|
||||
impl<'a> DisplayParseError<'a> {
|
||||
pub fn new(
|
||||
error: ParseError,
|
||||
source_code: SourceCode<'a, 'a>,
|
||||
source_kind: Option<&'a SourceKind>,
|
||||
) -> Self {
|
||||
Self {
|
||||
error,
|
||||
source_code,
|
||||
source_kind,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DisplayParseError<'_> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{header} {path}{colon}",
|
||||
header = "Failed to parse".bold(),
|
||||
path = fs::relativize_path(Path::new(&self.error.source_path)).bold(),
|
||||
colon = ":".cyan(),
|
||||
)?;
|
||||
|
||||
let source_location = self.source_code.source_location(self.error.offset);
|
||||
|
||||
// If we're working on a Jupyter notebook, translate the positions
|
||||
// with respect to the cell and row in the cell. This is the same
|
||||
// format as the `TextEmitter`.
|
||||
let error_location = if let Some(jupyter_index) = self
|
||||
.source_kind
|
||||
.and_then(SourceKind::notebook)
|
||||
.map(Notebook::index)
|
||||
{
|
||||
write!(
|
||||
f,
|
||||
"cell {cell}{colon}",
|
||||
cell = jupyter_index
|
||||
.cell(source_location.row.get())
|
||||
.unwrap_or_default(),
|
||||
colon = ":".cyan(),
|
||||
)?;
|
||||
|
||||
SourceLocation {
|
||||
row: OneIndexed::new(
|
||||
jupyter_index
|
||||
.cell_row(source_location.row.get())
|
||||
.unwrap_or(1) as usize,
|
||||
)
|
||||
.unwrap(),
|
||||
column: source_location.column,
|
||||
}
|
||||
} else {
|
||||
source_location
|
||||
};
|
||||
|
||||
write!(
|
||||
f,
|
||||
"{row}{colon}{column}{colon} {inner}",
|
||||
row = error_location.row,
|
||||
column = error_location.column,
|
||||
colon = ":".cyan(),
|
||||
inner = &DisplayParseErrorType(&self.error.error)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct DisplayParseErrorType<'a>(&'a ParseErrorType);
|
||||
|
||||
impl<'a> DisplayParseErrorType<'a> {
|
||||
pub(crate) fn new(error: &'a ParseErrorType) -> Self {
|
||||
Self(error)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DisplayParseErrorType<'_> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self.0 {
|
||||
ParseErrorType::Eof => write!(f, "Expected token but reached end of file."),
|
||||
ParseErrorType::ExtraToken(ref tok) => write!(
|
||||
f,
|
||||
"Got extraneous token: {tok}",
|
||||
tok = TruncateAtNewline(&tok)
|
||||
),
|
||||
ParseErrorType::InvalidToken => write!(f, "Got invalid token"),
|
||||
ParseErrorType::UnrecognizedToken(ref tok, ref expected) => {
|
||||
if let Some(expected) = expected.as_ref() {
|
||||
write!(
|
||||
f,
|
||||
"Expected '{expected}', but got {tok}",
|
||||
tok = TruncateAtNewline(&tok)
|
||||
)
|
||||
} else {
|
||||
write!(f, "Unexpected token {tok}", tok = TruncateAtNewline(&tok))
|
||||
}
|
||||
}
|
||||
ParseErrorType::Lexical(ref error) => write!(f, "{error}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Truncates the display text before the first newline character to avoid line breaks.
|
||||
struct TruncateAtNewline<'a>(&'a dyn Display);
|
||||
|
||||
impl Display for TruncateAtNewline<'_> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
struct TruncateAdapter<'a> {
|
||||
inner: &'a mut dyn Write,
|
||||
after_new_line: bool,
|
||||
}
|
||||
|
||||
impl Write for TruncateAdapter<'_> {
|
||||
fn write_str(&mut self, s: &str) -> std::fmt::Result {
|
||||
if self.after_new_line {
|
||||
Ok(())
|
||||
} else {
|
||||
if let Some(end) = s.find(['\n', '\r']) {
|
||||
self.inner.write_str(&s[..end])?;
|
||||
self.inner.write_str("\u{23ce}...")?;
|
||||
self.after_new_line = true;
|
||||
Ok(())
|
||||
} else {
|
||||
self.inner.write_str(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
write!(
|
||||
TruncateAdapter {
|
||||
inner: f,
|
||||
after_new_line: false,
|
||||
},
|
||||
"{}",
|
||||
self.0
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::logging::LogLevel;
|
||||
|
||||
#[test]
|
||||
fn ordering() {
|
||||
assert!(LogLevel::Default > LogLevel::Silent);
|
||||
assert!(LogLevel::Default >= LogLevel::Default);
|
||||
assert!(LogLevel::Quiet > LogLevel::Silent);
|
||||
assert!(LogLevel::Verbose > LogLevel::Default);
|
||||
assert!(LogLevel::Verbose > LogLevel::Silent);
|
||||
}
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::Write;
|
||||
use std::ops::Deref;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
pub use azure::AzureEmitter;
|
||||
pub use github::GithubEmitter;
|
||||
pub use gitlab::GitlabEmitter;
|
||||
pub use grouped::GroupedEmitter;
|
||||
pub use json::JsonEmitter;
|
||||
pub use json_lines::JsonLinesEmitter;
|
||||
pub use junit::JunitEmitter;
|
||||
pub use pylint::PylintEmitter;
|
||||
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
|
||||
use ruff_source_file::{SourceFile, SourceLocation};
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
pub use text::TextEmitter;
|
||||
|
||||
use crate::jupyter::Notebook;
|
||||
|
||||
mod azure;
|
||||
mod diff;
|
||||
mod github;
|
||||
mod gitlab;
|
||||
mod grouped;
|
||||
mod json;
|
||||
mod json_lines;
|
||||
mod junit;
|
||||
mod pylint;
|
||||
mod text;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct Message {
|
||||
pub kind: DiagnosticKind,
|
||||
pub range: TextRange,
|
||||
pub fix: Option<Fix>,
|
||||
pub file: SourceFile,
|
||||
pub noqa_offset: TextSize,
|
||||
}
|
||||
|
||||
impl Message {
|
||||
pub fn from_diagnostic(
|
||||
diagnostic: Diagnostic,
|
||||
file: SourceFile,
|
||||
noqa_offset: TextSize,
|
||||
) -> Self {
|
||||
Self {
|
||||
range: diagnostic.range(),
|
||||
kind: diagnostic.kind,
|
||||
fix: diagnostic.fix,
|
||||
file,
|
||||
noqa_offset,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filename(&self) -> &str {
|
||||
self.file.name()
|
||||
}
|
||||
|
||||
pub fn compute_start_location(&self) -> SourceLocation {
|
||||
self.file.to_source_code().source_location(self.start())
|
||||
}
|
||||
|
||||
pub fn compute_end_location(&self) -> SourceLocation {
|
||||
self.file.to_source_code().source_location(self.end())
|
||||
}
|
||||
|
||||
pub const fn start(&self) -> TextSize {
|
||||
self.range.start()
|
||||
}
|
||||
|
||||
pub const fn end(&self) -> TextSize {
|
||||
self.range.end()
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Message {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
(&self.file, self.start()).cmp(&(&other.file, other.start()))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Message {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
struct MessageWithLocation<'a> {
|
||||
message: &'a Message,
|
||||
start_location: SourceLocation,
|
||||
}
|
||||
|
||||
impl Deref for MessageWithLocation<'_> {
|
||||
type Target = Message;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.message
|
||||
}
|
||||
}
|
||||
|
||||
fn group_messages_by_filename(messages: &[Message]) -> BTreeMap<&str, Vec<MessageWithLocation>> {
|
||||
let mut grouped_messages = BTreeMap::default();
|
||||
for message in messages {
|
||||
grouped_messages
|
||||
.entry(message.filename())
|
||||
.or_insert_with(Vec::new)
|
||||
.push(MessageWithLocation {
|
||||
message,
|
||||
start_location: message.compute_start_location(),
|
||||
});
|
||||
}
|
||||
grouped_messages
|
||||
}
|
||||
|
||||
/// Display format for a [`Message`]s.
|
||||
///
|
||||
/// The emitter serializes a slice of [`Message`]'s and writes them to a [`Write`].
|
||||
pub trait Emitter {
|
||||
/// Serializes the `messages` and writes the output to `writer`.
|
||||
fn emit(
|
||||
&mut self,
|
||||
writer: &mut dyn Write,
|
||||
messages: &[Message],
|
||||
context: &EmitterContext,
|
||||
) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
/// Context passed to [`Emitter`].
|
||||
pub struct EmitterContext<'a> {
|
||||
notebooks: &'a FxHashMap<String, Notebook>,
|
||||
}
|
||||
|
||||
impl<'a> EmitterContext<'a> {
|
||||
pub fn new(notebooks: &'a FxHashMap<String, Notebook>) -> Self {
|
||||
Self { notebooks }
|
||||
}
|
||||
|
||||
/// Tests if the file with `name` is a jupyter notebook.
|
||||
pub fn is_notebook(&self, name: &str) -> bool {
|
||||
self.notebooks.contains_key(name)
|
||||
}
|
||||
|
||||
pub fn notebook(&self, name: &str) -> Option<&Notebook> {
|
||||
self.notebooks.get(name)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Edit, Fix};
|
||||
use ruff_source_file::SourceFileBuilder;
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
|
||||
use crate::message::{Emitter, EmitterContext, Message};
|
||||
|
||||
pub(super) fn create_messages() -> Vec<Message> {
|
||||
let fib = r#"import os
|
||||
|
||||
|
||||
def fibonacci(n):
|
||||
"""Compute the nth number in the Fibonacci sequence."""
|
||||
x = 1
|
||||
if n == 0:
|
||||
return 0
|
||||
elif n == 1:
|
||||
return 1
|
||||
else:
|
||||
return fibonacci(n - 1) + fibonacci(n - 2)
|
||||
"#;
|
||||
|
||||
let unused_import = Diagnostic::new(
|
||||
DiagnosticKind {
|
||||
name: "UnusedImport".to_string(),
|
||||
body: "`os` imported but unused".to_string(),
|
||||
suggestion: Some("Remove unused import: `os`".to_string()),
|
||||
},
|
||||
TextRange::new(TextSize::from(7), TextSize::from(9)),
|
||||
)
|
||||
.with_fix(Fix::suggested(Edit::range_deletion(TextRange::new(
|
||||
TextSize::from(0),
|
||||
TextSize::from(10),
|
||||
))));
|
||||
|
||||
let fib_source = SourceFileBuilder::new("fib.py", fib).finish();
|
||||
|
||||
let unused_variable = Diagnostic::new(
|
||||
DiagnosticKind {
|
||||
name: "UnusedVariable".to_string(),
|
||||
body: "Local variable `x` is assigned to but never used".to_string(),
|
||||
suggestion: Some("Remove assignment to unused variable `x`".to_string()),
|
||||
},
|
||||
TextRange::new(TextSize::from(94), TextSize::from(95)),
|
||||
)
|
||||
.with_fix(Fix::suggested(Edit::deletion(
|
||||
TextSize::from(94),
|
||||
TextSize::from(99),
|
||||
)));
|
||||
|
||||
let file_2 = r#"if a == 1: pass"#;
|
||||
|
||||
let undefined_name = Diagnostic::new(
|
||||
DiagnosticKind {
|
||||
name: "UndefinedName".to_string(),
|
||||
body: "Undefined name `a`".to_string(),
|
||||
suggestion: None,
|
||||
},
|
||||
TextRange::new(TextSize::from(3), TextSize::from(4)),
|
||||
);
|
||||
|
||||
let file_2_source = SourceFileBuilder::new("undef.py", file_2).finish();
|
||||
|
||||
let unused_import_start = unused_import.start();
|
||||
let unused_variable_start = unused_variable.start();
|
||||
let undefined_name_start = undefined_name.start();
|
||||
vec![
|
||||
Message::from_diagnostic(unused_import, fib_source.clone(), unused_import_start),
|
||||
Message::from_diagnostic(unused_variable, fib_source, unused_variable_start),
|
||||
Message::from_diagnostic(undefined_name, file_2_source, undefined_name_start),
|
||||
]
|
||||
}
|
||||
|
||||
pub(super) fn capture_emitter_output(
|
||||
emitter: &mut dyn Emitter,
|
||||
messages: &[Message],
|
||||
) -> String {
|
||||
let source_kinds = FxHashMap::default();
|
||||
let context = EmitterContext::new(&source_kinds);
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
emitter.emit(&mut output, messages, &context).unwrap();
|
||||
|
||||
String::from_utf8(output).expect("Output to be valid UTF-8")
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/azure.rs
|
||||
expression: content
|
||||
---
|
||||
##vso[task.logissue type=error;sourcepath=fib.py;linenumber=1;columnnumber=8;code=F401;]`os` imported but unused
|
||||
##vso[task.logissue type=error;sourcepath=fib.py;linenumber=6;columnnumber=5;code=F841;]Local variable `x` is assigned to but never used
|
||||
##vso[task.logissue type=error;sourcepath=undef.py;linenumber=1;columnnumber=4;code=F821;]Undefined name `a`
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/github.rs
|
||||
expression: content
|
||||
---
|
||||
::error title=Ruff (F401),file=fib.py,line=1,col=8,endLine=1,endColumn=10::fib.py:1:8: F401 `os` imported but unused
|
||||
::error title=Ruff (F841),file=fib.py,line=6,col=5,endLine=6,endColumn=6::fib.py:6:5: F841 Local variable `x` is assigned to but never used
|
||||
::error title=Ruff (F821),file=undef.py,line=1,col=4,endLine=1,endColumn=5::undef.py:1:4: F821 Undefined name `a`
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/gitlab.rs
|
||||
expression: redact_fingerprint(&content)
|
||||
---
|
||||
[
|
||||
{
|
||||
"description": "(F401) `os` imported but unused",
|
||||
"fingerprint": "<redacted>",
|
||||
"location": {
|
||||
"lines": {
|
||||
"begin": 1,
|
||||
"end": 1
|
||||
},
|
||||
"path": "fib.py"
|
||||
},
|
||||
"severity": "major"
|
||||
},
|
||||
{
|
||||
"description": "(F841) Local variable `x` is assigned to but never used",
|
||||
"fingerprint": "<redacted>",
|
||||
"location": {
|
||||
"lines": {
|
||||
"begin": 6,
|
||||
"end": 6
|
||||
},
|
||||
"path": "fib.py"
|
||||
},
|
||||
"severity": "major"
|
||||
},
|
||||
{
|
||||
"description": "(F821) Undefined name `a`",
|
||||
"fingerprint": "<redacted>",
|
||||
"location": {
|
||||
"lines": {
|
||||
"begin": 1,
|
||||
"end": 1
|
||||
},
|
||||
"path": "undef.py"
|
||||
},
|
||||
"severity": "major"
|
||||
}
|
||||
]
|
||||
@@ -1,12 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/grouped.rs
|
||||
expression: content
|
||||
---
|
||||
fib.py:
|
||||
1:8 F401 `os` imported but unused
|
||||
6:5 F841 Local variable `x` is assigned to but never used
|
||||
|
||||
undef.py:
|
||||
1:4 F821 Undefined name `a`
|
||||
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/grouped.rs
|
||||
expression: content
|
||||
---
|
||||
fib.py:
|
||||
1:8 F401 [*] `os` imported but unused
|
||||
|
|
||||
1 | import os
|
||||
| ^^ F401
|
||||
|
|
||||
= help: Remove unused import: `os`
|
||||
|
||||
6:5 F841 [*] Local variable `x` is assigned to but never used
|
||||
|
|
||||
4 | def fibonacci(n):
|
||||
5 | """Compute the nth number in the Fibonacci sequence."""
|
||||
6 | x = 1
|
||||
| ^ F841
|
||||
7 | if n == 0:
|
||||
8 | return 0
|
||||
|
|
||||
= help: Remove assignment to unused variable `x`
|
||||
|
||||
undef.py:
|
||||
1:4 F821 Undefined name `a`
|
||||
|
|
||||
1 | if a == 1: pass
|
||||
| ^ F821
|
||||
|
|
||||
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/grouped.rs
|
||||
expression: content
|
||||
---
|
||||
fib.py:
|
||||
1:8 F401 `os` imported but unused
|
||||
|
|
||||
1 | import os
|
||||
| ^^ F401
|
||||
|
|
||||
= help: Remove unused import: `os`
|
||||
|
||||
6:5 F841 Local variable `x` is assigned to but never used
|
||||
|
|
||||
4 | def fibonacci(n):
|
||||
5 | """Compute the nth number in the Fibonacci sequence."""
|
||||
6 | x = 1
|
||||
| ^ F841
|
||||
7 | if n == 0:
|
||||
8 | return 0
|
||||
|
|
||||
= help: Remove assignment to unused variable `x`
|
||||
|
||||
undef.py:
|
||||
1:4 F821 Undefined name `a`
|
||||
|
|
||||
1 | if a == 1: pass
|
||||
| ^ F821
|
||||
|
|
||||
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/json.rs
|
||||
expression: content
|
||||
---
|
||||
[
|
||||
{
|
||||
"code": "F401",
|
||||
"end_location": {
|
||||
"column": 10,
|
||||
"row": 1
|
||||
},
|
||||
"filename": "fib.py",
|
||||
"fix": {
|
||||
"applicability": "Suggested",
|
||||
"edits": [
|
||||
{
|
||||
"content": "",
|
||||
"end_location": {
|
||||
"column": 1,
|
||||
"row": 2
|
||||
},
|
||||
"location": {
|
||||
"column": 1,
|
||||
"row": 1
|
||||
}
|
||||
}
|
||||
],
|
||||
"message": "Remove unused import: `os`"
|
||||
},
|
||||
"location": {
|
||||
"column": 8,
|
||||
"row": 1
|
||||
},
|
||||
"message": "`os` imported but unused",
|
||||
"noqa_row": 1,
|
||||
"url": "https://beta.ruff.rs/docs/rules/unused-import"
|
||||
},
|
||||
{
|
||||
"code": "F841",
|
||||
"end_location": {
|
||||
"column": 6,
|
||||
"row": 6
|
||||
},
|
||||
"filename": "fib.py",
|
||||
"fix": {
|
||||
"applicability": "Suggested",
|
||||
"edits": [
|
||||
{
|
||||
"content": "",
|
||||
"end_location": {
|
||||
"column": 10,
|
||||
"row": 6
|
||||
},
|
||||
"location": {
|
||||
"column": 5,
|
||||
"row": 6
|
||||
}
|
||||
}
|
||||
],
|
||||
"message": "Remove assignment to unused variable `x`"
|
||||
},
|
||||
"location": {
|
||||
"column": 5,
|
||||
"row": 6
|
||||
},
|
||||
"message": "Local variable `x` is assigned to but never used",
|
||||
"noqa_row": 6,
|
||||
"url": "https://beta.ruff.rs/docs/rules/unused-variable"
|
||||
},
|
||||
{
|
||||
"code": "F821",
|
||||
"end_location": {
|
||||
"column": 5,
|
||||
"row": 1
|
||||
},
|
||||
"filename": "undef.py",
|
||||
"fix": null,
|
||||
"location": {
|
||||
"column": 4,
|
||||
"row": 1
|
||||
},
|
||||
"message": "Undefined name `a`",
|
||||
"noqa_row": 1,
|
||||
"url": "https://beta.ruff.rs/docs/rules/undefined-name"
|
||||
}
|
||||
]
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/json_lines.rs
|
||||
expression: content
|
||||
---
|
||||
{"code":"F401","end_location":{"column":10,"row":1},"filename":"fib.py","fix":{"applicability":"Suggested","edits":[{"content":"","end_location":{"column":1,"row":2},"location":{"column":1,"row":1}}],"message":"Remove unused import: `os`"},"location":{"column":8,"row":1},"message":"`os` imported but unused","noqa_row":1,"url":"https://beta.ruff.rs/docs/rules/unused-import"}
|
||||
{"code":"F841","end_location":{"column":6,"row":6},"filename":"fib.py","fix":{"applicability":"Suggested","edits":[{"content":"","end_location":{"column":10,"row":6},"location":{"column":5,"row":6}}],"message":"Remove assignment to unused variable `x`"},"location":{"column":5,"row":6},"message":"Local variable `x` is assigned to but never used","noqa_row":6,"url":"https://beta.ruff.rs/docs/rules/unused-variable"}
|
||||
{"code":"F821","end_location":{"column":5,"row":1},"filename":"undef.py","fix":null,"location":{"column":4,"row":1},"message":"Undefined name `a`","noqa_row":1,"url":"https://beta.ruff.rs/docs/rules/undefined-name"}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/junit.rs
|
||||
expression: content
|
||||
---
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<testsuites name="ruff" tests="3" failures="3" errors="0">
|
||||
<testsuite name="fib.py" tests="2" disabled="0" errors="0" failures="2" package="org.ruff">
|
||||
<testcase name="org.ruff.F401" classname="fib" line="1" column="8">
|
||||
<failure message="`os` imported but unused">line 1, col 8, `os` imported but unused</failure>
|
||||
</testcase>
|
||||
<testcase name="org.ruff.F841" classname="fib" line="6" column="5">
|
||||
<failure message="Local variable `x` is assigned to but never used">line 6, col 5, Local variable `x` is assigned to but never used</failure>
|
||||
</testcase>
|
||||
</testsuite>
|
||||
<testsuite name="undef.py" tests="1" disabled="0" errors="0" failures="1" package="org.ruff">
|
||||
<testcase name="org.ruff.F821" classname="undef" line="1" column="4">
|
||||
<failure message="Undefined name `a`">line 1, col 4, Undefined name `a`</failure>
|
||||
</testcase>
|
||||
</testsuite>
|
||||
</testsuites>
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/pylint.rs
|
||||
expression: content
|
||||
---
|
||||
fib.py:1: [F401] `os` imported but unused
|
||||
fib.py:6: [F841] Local variable `x` is assigned to but never used
|
||||
undef.py:1: [F821] Undefined name `a`
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/text.rs
|
||||
expression: content
|
||||
---
|
||||
fib.py:1:8: F401 `os` imported but unused
|
||||
|
|
||||
1 | import os
|
||||
| ^^ F401
|
||||
|
|
||||
= help: Remove unused import: `os`
|
||||
|
||||
fib.py:6:5: F841 Local variable `x` is assigned to but never used
|
||||
|
|
||||
4 | def fibonacci(n):
|
||||
5 | """Compute the nth number in the Fibonacci sequence."""
|
||||
6 | x = 1
|
||||
| ^ F841
|
||||
7 | if n == 0:
|
||||
8 | return 0
|
||||
|
|
||||
= help: Remove assignment to unused variable `x`
|
||||
|
||||
undef.py:1:4: F821 Undefined name `a`
|
||||
|
|
||||
1 | if a == 1: pass
|
||||
| ^ F821
|
||||
|
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
source: crates/ruff/src/message/text.rs
|
||||
expression: content
|
||||
---
|
||||
fib.py:1:8: F401 [*] `os` imported but unused
|
||||
|
|
||||
1 | import os
|
||||
| ^^ F401
|
||||
|
|
||||
= help: Remove unused import: `os`
|
||||
|
||||
fib.py:6:5: F841 [*] Local variable `x` is assigned to but never used
|
||||
|
|
||||
4 | def fibonacci(n):
|
||||
5 | """Compute the nth number in the Fibonacci sequence."""
|
||||
6 | x = 1
|
||||
| ^ F841
|
||||
7 | if n == 0:
|
||||
8 | return 0
|
||||
|
|
||||
= help: Remove assignment to unused variable `x`
|
||||
|
||||
undef.py:1:4: F821 Undefined name `a`
|
||||
|
|
||||
1 | if a == 1: pass
|
||||
| ^ F821
|
||||
|
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,153 +0,0 @@
|
||||
//! Detect Python package roots and file associations.
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use crate::resolver::{PyprojectConfig, Resolver};
|
||||
|
||||
// If we have a Python package layout like:
|
||||
// - root/
|
||||
// - foo/
|
||||
// - __init__.py
|
||||
// - bar.py
|
||||
// - baz/
|
||||
// - __init__.py
|
||||
// - qux.py
|
||||
//
|
||||
// Then today, if you run with defaults (`src = ["."]`) from `root`, we'll
|
||||
// detect that `foo.bar`, `foo.baz`, and `foo.baz.qux` are first-party modules
|
||||
// (since, if you're in `root`, you can see `foo`).
|
||||
//
|
||||
// However, we'd also like it to be the case that, even if you run this command
|
||||
// from `foo`, we still consider `foo.baz.qux` to be first-party when linting
|
||||
// `foo/bar.py`. More specifically, for each Python file, we should find the
|
||||
// root of the current package.
|
||||
//
|
||||
// Thus, for each file, we iterate up its ancestors, returning the last
|
||||
// directory containing an `__init__.py`.
|
||||
|
||||
/// Return `true` if the directory at the given `Path` appears to be a Python
|
||||
/// package.
|
||||
pub fn is_package(path: &Path, namespace_packages: &[PathBuf]) -> bool {
|
||||
path.join("__init__.py").is_file()
|
||||
|| namespace_packages
|
||||
.iter()
|
||||
.any(|namespace_package| namespace_package == path)
|
||||
}
|
||||
|
||||
/// Return the package root for the given Python file.
|
||||
pub fn detect_package_root<'a>(
|
||||
path: &'a Path,
|
||||
namespace_packages: &'a [PathBuf],
|
||||
) -> Option<&'a Path> {
|
||||
let mut current = None;
|
||||
for parent in path.ancestors() {
|
||||
if !is_package(parent, namespace_packages) {
|
||||
return current;
|
||||
}
|
||||
current = Some(parent);
|
||||
}
|
||||
current
|
||||
}
|
||||
|
||||
/// A wrapper around `is_package` to cache filesystem lookups.
|
||||
fn is_package_with_cache<'a>(
|
||||
path: &'a Path,
|
||||
namespace_packages: &'a [PathBuf],
|
||||
package_cache: &mut FxHashMap<&'a Path, bool>,
|
||||
) -> bool {
|
||||
*package_cache
|
||||
.entry(path)
|
||||
.or_insert_with(|| is_package(path, namespace_packages))
|
||||
}
|
||||
|
||||
/// A wrapper around `detect_package_root` to cache filesystem lookups.
|
||||
fn detect_package_root_with_cache<'a>(
|
||||
path: &'a Path,
|
||||
namespace_packages: &'a [PathBuf],
|
||||
package_cache: &mut FxHashMap<&'a Path, bool>,
|
||||
) -> Option<&'a Path> {
|
||||
let mut current = None;
|
||||
for parent in path.ancestors() {
|
||||
if !is_package_with_cache(parent, namespace_packages, package_cache) {
|
||||
return current;
|
||||
}
|
||||
current = Some(parent);
|
||||
}
|
||||
current
|
||||
}
|
||||
|
||||
/// Return a mapping from Python package to its package root.
|
||||
pub fn detect_package_roots<'a>(
|
||||
files: &[&'a Path],
|
||||
resolver: &'a Resolver,
|
||||
pyproject_config: &'a PyprojectConfig,
|
||||
) -> FxHashMap<&'a Path, Option<&'a Path>> {
|
||||
// Pre-populate the module cache, since the list of files could (but isn't
|
||||
// required to) contain some `__init__.py` files.
|
||||
let mut package_cache: FxHashMap<&Path, bool> = FxHashMap::default();
|
||||
for file in files {
|
||||
if file.ends_with("__init__.py") {
|
||||
if let Some(parent) = file.parent() {
|
||||
package_cache.insert(parent, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Search for the package root for each file.
|
||||
let mut package_roots: FxHashMap<&Path, Option<&Path>> = FxHashMap::default();
|
||||
for file in files {
|
||||
let namespace_packages = &resolver.resolve(file, pyproject_config).namespace_packages;
|
||||
if let Some(package) = file.parent() {
|
||||
if package_roots.contains_key(package) {
|
||||
continue;
|
||||
}
|
||||
package_roots.insert(
|
||||
package,
|
||||
detect_package_root_with_cache(package, namespace_packages, &mut package_cache),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
package_roots
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::packaging::detect_package_root;
|
||||
use crate::test::test_resource_path;
|
||||
|
||||
#[test]
|
||||
fn package_detection() {
|
||||
assert_eq!(
|
||||
detect_package_root(&test_resource_path("package/src/package"), &[],),
|
||||
Some(test_resource_path("package/src/package").as_path())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
detect_package_root(&test_resource_path("project/python_modules/core/core"), &[],),
|
||||
Some(test_resource_path("project/python_modules/core/core").as_path())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
detect_package_root(
|
||||
&test_resource_path("project/examples/docs/docs/concepts"),
|
||||
&[],
|
||||
),
|
||||
Some(test_resource_path("project/examples/docs/docs").as_path())
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
detect_package_root(
|
||||
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("setup.py")
|
||||
.as_path(),
|
||||
&[],
|
||||
),
|
||||
None,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,379 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::de::{self, Visitor};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::IntoEnumIterator;
|
||||
use strum_macros::EnumIter;
|
||||
|
||||
use crate::codes::RuleCodePrefix;
|
||||
use crate::codes::RuleIter;
|
||||
use crate::registry::{Linter, Rule, RuleNamespace};
|
||||
use crate::rule_redirects::get_redirect;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum RuleSelector {
|
||||
/// Select all stable rules.
|
||||
All,
|
||||
/// Select all nursery rules.
|
||||
Nursery,
|
||||
/// Legacy category to select both the `mccabe` and `flake8-comprehensions` linters
|
||||
/// via a single selector.
|
||||
C,
|
||||
/// Legacy category to select both the `flake8-debugger` and `flake8-print` linters
|
||||
/// via a single selector.
|
||||
T,
|
||||
/// Select all rules for a given linter.
|
||||
Linter(Linter),
|
||||
/// Select all rules for a given linter with a given prefix.
|
||||
Prefix {
|
||||
prefix: RuleCodePrefix,
|
||||
redirected_from: Option<&'static str>,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<Linter> for RuleSelector {
|
||||
fn from(linter: Linter) -> Self {
|
||||
Self::Linter(linter)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for RuleSelector {
|
||||
type Err = ParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"ALL" => Ok(Self::All),
|
||||
"NURSERY" => Ok(Self::Nursery),
|
||||
"C" => Ok(Self::C),
|
||||
"T" => Ok(Self::T),
|
||||
_ => {
|
||||
let (s, redirected_from) = match get_redirect(s) {
|
||||
Some((from, target)) => (target, Some(from)),
|
||||
None => (s, None),
|
||||
};
|
||||
|
||||
let (linter, code) =
|
||||
Linter::parse_code(s).ok_or_else(|| ParseError::Unknown(s.to_string()))?;
|
||||
|
||||
if code.is_empty() {
|
||||
return Ok(Self::Linter(linter));
|
||||
}
|
||||
|
||||
Ok(Self::Prefix {
|
||||
prefix: RuleCodePrefix::parse(&linter, code)
|
||||
.map_err(|_| ParseError::Unknown(s.to_string()))?,
|
||||
redirected_from,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ParseError {
|
||||
#[error("Unknown rule selector: `{0}`")]
|
||||
// TODO(martin): tell the user how to discover rule codes via the CLI once such a command is
|
||||
// implemented (but that should of course be done only in ruff_cli and not here)
|
||||
Unknown(String),
|
||||
}
|
||||
|
||||
impl RuleSelector {
|
||||
pub fn prefix_and_code(&self) -> (&'static str, &'static str) {
|
||||
match self {
|
||||
RuleSelector::All => ("", "ALL"),
|
||||
RuleSelector::Nursery => ("", "NURSERY"),
|
||||
RuleSelector::C => ("", "C"),
|
||||
RuleSelector::T => ("", "T"),
|
||||
RuleSelector::Prefix { prefix, .. } => {
|
||||
(prefix.linter().common_prefix(), prefix.short_code())
|
||||
}
|
||||
RuleSelector::Linter(l) => (l.common_prefix(), ""),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for RuleSelector {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let (prefix, code) = self.prefix_and_code();
|
||||
serializer.serialize_str(&format!("{prefix}{code}"))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for RuleSelector {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
// We are not simply doing:
|
||||
// let s: &str = Deserialize::deserialize(deserializer)?;
|
||||
// FromStr::from_str(s).map_err(de::Error::custom)
|
||||
// here because the toml crate apparently doesn't support that
|
||||
// (as of toml v0.6.0 running `cargo test` failed with the above two lines)
|
||||
deserializer.deserialize_str(SelectorVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
struct SelectorVisitor;
|
||||
|
||||
impl Visitor<'_> for SelectorVisitor {
|
||||
type Value = RuleSelector;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str(
|
||||
"expected a string code identifying a linter or specific rule, or a partial rule code or ALL to refer to all rules",
|
||||
)
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
FromStr::from_str(v).map_err(de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RuleCodePrefix> for RuleSelector {
|
||||
fn from(prefix: RuleCodePrefix) -> Self {
|
||||
Self::Prefix {
|
||||
prefix,
|
||||
redirected_from: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoIterator for &RuleSelector {
|
||||
type IntoIter = RuleSelectorIter;
|
||||
type Item = Rule;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
match self {
|
||||
RuleSelector::All => {
|
||||
RuleSelectorIter::All(Rule::iter().filter(|rule| !rule.is_nursery()))
|
||||
}
|
||||
RuleSelector::Nursery => {
|
||||
RuleSelectorIter::Nursery(Rule::iter().filter(Rule::is_nursery))
|
||||
}
|
||||
RuleSelector::C => RuleSelectorIter::Chain(
|
||||
Linter::Flake8Comprehensions
|
||||
.rules()
|
||||
.chain(Linter::McCabe.rules()),
|
||||
),
|
||||
RuleSelector::T => RuleSelectorIter::Chain(
|
||||
Linter::Flake8Debugger
|
||||
.rules()
|
||||
.chain(Linter::Flake8Print.rules()),
|
||||
),
|
||||
RuleSelector::Linter(linter) => RuleSelectorIter::Vec(linter.rules()),
|
||||
RuleSelector::Prefix { prefix, .. } => RuleSelectorIter::Vec(prefix.clone().rules()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum RuleSelectorIter {
|
||||
All(std::iter::Filter<RuleIter, fn(&Rule) -> bool>),
|
||||
Nursery(std::iter::Filter<RuleIter, fn(&Rule) -> bool>),
|
||||
Chain(std::iter::Chain<std::vec::IntoIter<Rule>, std::vec::IntoIter<Rule>>),
|
||||
Vec(std::vec::IntoIter<Rule>),
|
||||
}
|
||||
|
||||
impl Iterator for RuleSelectorIter {
|
||||
type Item = Rule;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
match self {
|
||||
RuleSelectorIter::All(iter) => iter.next(),
|
||||
RuleSelectorIter::Nursery(iter) => iter.next(),
|
||||
RuleSelectorIter::Chain(iter) => iter.next(),
|
||||
RuleSelectorIter::Vec(iter) => iter.next(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A const alternative to the `impl From<RuleCodePrefix> for RuleSelector`
|
||||
/// to let us keep the fields of [`RuleSelector`] private.
|
||||
// Note that Rust doesn't yet support `impl const From<RuleCodePrefix> for
|
||||
// RuleSelector` (see https://github.com/rust-lang/rust/issues/67792).
|
||||
// TODO(martin): Remove once RuleSelector is an enum with Linter & Rule variants
|
||||
pub(crate) const fn prefix_to_selector(prefix: RuleCodePrefix) -> RuleSelector {
|
||||
RuleSelector::Prefix {
|
||||
prefix,
|
||||
redirected_from: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "schemars")]
|
||||
mod schema {
|
||||
use itertools::Itertools;
|
||||
use schemars::JsonSchema;
|
||||
use schemars::_serde_json::Value;
|
||||
use schemars::schema::{InstanceType, Schema, SchemaObject};
|
||||
use strum::IntoEnumIterator;
|
||||
|
||||
use crate::registry::RuleNamespace;
|
||||
use crate::rule_selector::{Linter, RuleCodePrefix};
|
||||
use crate::RuleSelector;
|
||||
|
||||
impl JsonSchema for RuleSelector {
|
||||
fn schema_name() -> String {
|
||||
"RuleSelector".to_string()
|
||||
}
|
||||
|
||||
fn json_schema(_gen: &mut schemars::gen::SchemaGenerator) -> Schema {
|
||||
Schema::Object(SchemaObject {
|
||||
instance_type: Some(InstanceType::String.into()),
|
||||
enum_values: Some(
|
||||
[
|
||||
// Include the non-standard "ALL" selector.
|
||||
"ALL".to_string(),
|
||||
// Include the legacy "C" and "T" selectors.
|
||||
"C".to_string(),
|
||||
"T".to_string(),
|
||||
// Include some common redirect targets for those legacy selectors.
|
||||
"C9".to_string(),
|
||||
"T1".to_string(),
|
||||
"T2".to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.chain(
|
||||
RuleCodePrefix::iter()
|
||||
.map(|p| {
|
||||
let prefix = p.linter().common_prefix();
|
||||
let code = p.short_code();
|
||||
format!("{prefix}{code}")
|
||||
})
|
||||
.chain(Linter::iter().filter_map(|l| {
|
||||
let prefix = l.common_prefix();
|
||||
(!prefix.is_empty()).then(|| prefix.to_string())
|
||||
})),
|
||||
)
|
||||
// Filter out rule gated behind `#[cfg(feature = "unreachable-code")]`, which is
|
||||
// off-by-default
|
||||
.filter(|prefix| prefix != "RUF014")
|
||||
.sorted()
|
||||
.map(Value::String)
|
||||
.collect(),
|
||||
),
|
||||
..SchemaObject::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RuleSelector {
|
||||
pub(crate) fn specificity(&self) -> Specificity {
|
||||
match self {
|
||||
RuleSelector::All => Specificity::All,
|
||||
RuleSelector::Nursery => Specificity::All,
|
||||
RuleSelector::T => Specificity::LinterGroup,
|
||||
RuleSelector::C => Specificity::LinterGroup,
|
||||
RuleSelector::Linter(..) => Specificity::Linter,
|
||||
RuleSelector::Prefix { prefix, .. } => {
|
||||
let prefix: &'static str = prefix.short_code();
|
||||
match prefix.len() {
|
||||
1 => Specificity::Code1Char,
|
||||
2 => Specificity::Code2Chars,
|
||||
3 => Specificity::Code3Chars,
|
||||
4 => Specificity::Code4Chars,
|
||||
5 => Specificity::Code5Chars,
|
||||
_ => panic!("RuleSelector::specificity doesn't yet support codes with so many characters"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(EnumIter, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)]
|
||||
pub(crate) enum Specificity {
|
||||
All,
|
||||
LinterGroup,
|
||||
Linter,
|
||||
Code1Char,
|
||||
Code2Chars,
|
||||
Code3Chars,
|
||||
Code4Chars,
|
||||
Code5Chars,
|
||||
}
|
||||
|
||||
#[cfg(feature = "clap")]
|
||||
mod clap_completion {
|
||||
use clap::builder::{PossibleValue, TypedValueParser, ValueParserFactory};
|
||||
use strum::IntoEnumIterator;
|
||||
|
||||
use crate::{
|
||||
codes::RuleCodePrefix,
|
||||
registry::{Linter, RuleNamespace},
|
||||
RuleSelector,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RuleSelectorParser;
|
||||
|
||||
impl ValueParserFactory for RuleSelector {
|
||||
type Parser = RuleSelectorParser;
|
||||
|
||||
fn value_parser() -> Self::Parser {
|
||||
RuleSelectorParser
|
||||
}
|
||||
}
|
||||
|
||||
impl TypedValueParser for RuleSelectorParser {
|
||||
type Value = RuleSelector;
|
||||
|
||||
fn parse_ref(
|
||||
&self,
|
||||
_cmd: &clap::Command,
|
||||
_arg: Option<&clap::Arg>,
|
||||
value: &std::ffi::OsStr,
|
||||
) -> Result<Self::Value, clap::Error> {
|
||||
let value = value
|
||||
.to_str()
|
||||
.ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
|
||||
|
||||
value
|
||||
.parse()
|
||||
.map_err(|e| clap::Error::raw(clap::error::ErrorKind::InvalidValue, e))
|
||||
}
|
||||
|
||||
fn possible_values(&self) -> Option<Box<dyn Iterator<Item = PossibleValue> + '_>> {
|
||||
Some(Box::new(
|
||||
std::iter::once(PossibleValue::new("ALL").help("all rules")).chain(
|
||||
Linter::iter()
|
||||
.filter_map(|l| {
|
||||
let prefix = l.common_prefix();
|
||||
(!prefix.is_empty()).then(|| PossibleValue::new(prefix).help(l.name()))
|
||||
})
|
||||
.chain(
|
||||
RuleCodePrefix::iter()
|
||||
// Filter out rule gated behind `#[cfg(feature = "unreachable-code")]`, which is
|
||||
// off-by-default
|
||||
.filter(|p| {
|
||||
format!("{}{}", p.linter().common_prefix(), p.short_code())
|
||||
!= "RUF014"
|
||||
})
|
||||
.map(|p| {
|
||||
let prefix = p.linter().common_prefix();
|
||||
let code = p.short_code();
|
||||
|
||||
let mut rules_iter = p.rules();
|
||||
let rule1 = rules_iter.next();
|
||||
let rule2 = rules_iter.next();
|
||||
|
||||
let value = PossibleValue::new(format!("{prefix}{code}"));
|
||||
|
||||
if rule2.is_none() {
|
||||
let rule1 = rule1.unwrap();
|
||||
let name: &'static str = rule1.into();
|
||||
value.help(name)
|
||||
} else {
|
||||
value
|
||||
}
|
||||
}),
|
||||
),
|
||||
),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
//! Airflow-specific rules.
|
||||
pub(crate) mod rules;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use test_case::test_case;
|
||||
|
||||
use crate::registry::Rule;
|
||||
use crate::test::test_path;
|
||||
use crate::{assert_messages, settings};
|
||||
|
||||
#[test_case(Rule::AirflowVariableNameTaskIdMismatch, Path::new("AIR001.py"))]
|
||||
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
|
||||
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
|
||||
let diagnostics = test_path(
|
||||
Path::new("airflow").join(path).as_path(),
|
||||
&settings::Settings::for_rule(rule_code),
|
||||
)?;
|
||||
assert_messages!(snapshot, diagnostics);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user