Compare commits
318 Commits
v0.3.4
...
red-knot-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b93d3e6f21 | ||
|
|
523235d6ea | ||
|
|
414990c022 | ||
|
|
4779dd1173 | ||
|
|
c5adbf17da | ||
|
|
c6dcf3502b | ||
|
|
1e585b8667 | ||
|
|
21d824abfd | ||
|
|
7e28c80354 | ||
|
|
bc03d376e8 | ||
|
|
eb6f562419 | ||
|
|
5561d445d7 | ||
|
|
c391c8b6cb | ||
|
|
ce030a467f | ||
|
|
04a922866a | ||
|
|
0ed7af35ec | ||
|
|
87929ad5f1 | ||
|
|
8a887daeb4 | ||
|
|
7317d734be | ||
|
|
c1a2a60182 | ||
|
|
8e056b3a93 | ||
|
|
616dd1873f | ||
|
|
acfb1a83c9 | ||
|
|
7c0e32f255 | ||
|
|
4b84c55e3a | ||
|
|
4c8d33ec45 | ||
|
|
113e259e6d | ||
|
|
3474e37836 | ||
|
|
dfe90a3b2b | ||
|
|
00d7c01cfc | ||
|
|
983a06cec3 | ||
|
|
47692027bf | ||
|
|
ec3243a6e5 | ||
|
|
2d6978f236 | ||
|
|
2490d2d4af | ||
|
|
59b73fabc1 | ||
|
|
61c97a037c | ||
|
|
7cd065e4a2 | ||
|
|
845ba7cf5f | ||
|
|
5994414739 | ||
|
|
632965d0fa | ||
|
|
77a72ecd38 | ||
|
|
16a1f3cbcc | ||
|
|
cd3e319538 | ||
|
|
45725d3275 | ||
|
|
bbca8eb388 | ||
|
|
c8c227dd5d | ||
|
|
b15e9e6e05 | ||
|
|
22d4f11348 | ||
|
|
269014a539 | ||
|
|
dc09f529bc | ||
|
|
3364ef957d | ||
|
|
77c93fd63c | ||
|
|
1c9f5e3001 | ||
|
|
263a0d25ed | ||
|
|
4738e19974 | ||
|
|
f428bd5052 | ||
|
|
4690890e9f | ||
|
|
19baabba58 | ||
|
|
cee38f39df | ||
|
|
e3fde28146 | ||
|
|
1c8849f9a8 | ||
|
|
37af6e6147 | ||
|
|
92814fd99b | ||
|
|
c9c2e7b978 | ||
|
|
51dec8d95b | ||
|
|
7c8c1c71a3 | ||
|
|
455d22cdc8 | ||
|
|
35ca887e02 | ||
|
|
f5c7a62aa6 | ||
|
|
38d2562f41 | ||
|
|
7eba967e16 | ||
|
|
5b81b8368d | ||
|
|
c30735d4a7 | ||
|
|
62478c3070 | ||
|
|
111bbc61f6 | ||
|
|
925c7f8dd3 | ||
|
|
5b4c8a7c5f | ||
|
|
647548b5e7 | ||
|
|
a9919707d4 | ||
|
|
5dcb1d9e8c | ||
|
|
0b92f450ca | ||
|
|
54d42957b0 | ||
|
|
d3ed0ad68c | ||
|
|
01678a990c | ||
|
|
45692ce89f | ||
|
|
4246111f67 | ||
|
|
9924bd774a | ||
|
|
fc7f07bca7 | ||
|
|
bd6ca3d586 | ||
|
|
23f8e1c3c8 | ||
|
|
a68938897d | ||
|
|
d544199272 | ||
|
|
c80b9a4a90 | ||
|
|
99f7f94538 | ||
|
|
7b3c92a979 | ||
|
|
fdbcb62adc | ||
|
|
0ff25a540c | ||
|
|
34873ec009 | ||
|
|
d3cd61f804 | ||
|
|
9b80cc09ee | ||
|
|
9bb23b0a38 | ||
|
|
06c248a126 | ||
|
|
27902b7130 | ||
|
|
97acf1d59b | ||
|
|
adf63d9013 | ||
|
|
5d3c9f2637 | ||
|
|
33529c049e | ||
|
|
e751b4ea82 | ||
|
|
25a9131109 | ||
|
|
b7066e64e7 | ||
|
|
6c4d779140 | ||
|
|
8020d486f6 | ||
|
|
13ffb5bc19 | ||
|
|
e09180b1df | ||
|
|
2cc487eb22 | ||
|
|
5da7299b32 | ||
|
|
4d8890eef5 | ||
|
|
9f01ac3f87 | ||
|
|
b23414e3cc | ||
|
|
1480d72643 | ||
|
|
06b3e376ac | ||
|
|
e8b1125b30 | ||
|
|
16cc9bd78d | ||
|
|
0a6327418d | ||
|
|
518b29a9ef | ||
|
|
caae8d2c68 | ||
|
|
2971655b28 | ||
|
|
f48a794125 | ||
|
|
2882604451 | ||
|
|
eab3c4e334 | ||
|
|
cffc55576f | ||
|
|
4284e079b5 | ||
|
|
65edbfe62f | ||
|
|
45db695c47 | ||
|
|
1801798e85 | ||
|
|
d4e140d47f | ||
|
|
f779babc5f | ||
|
|
effd5188c9 | ||
|
|
6dccbd2b58 | ||
|
|
0c8ba32819 | ||
|
|
4ac523c19d | ||
|
|
f9214f95bb | ||
|
|
49d9ad4c7e | ||
|
|
c2210359e7 | ||
|
|
670d66f54c | ||
|
|
cbd500141f | ||
|
|
b62aeb39d2 | ||
|
|
cdbd754870 | ||
|
|
91efca1837 | ||
|
|
09ae2341e9 | ||
|
|
f07af6fb63 | ||
|
|
b11d17f65c | ||
|
|
b4c7c55ddd | ||
|
|
0f01713257 | ||
|
|
ed9a92d915 | ||
|
|
6da4ea6116 | ||
|
|
f9a828f493 | ||
|
|
812b0976a9 | ||
|
|
b356c4376c | ||
|
|
85ca5b7eed | ||
|
|
c2421068bc | ||
|
|
e9870fe468 | ||
|
|
a013050c11 | ||
|
|
2e37cf6b3b | ||
|
|
a9e4393008 | ||
|
|
312f43475f | ||
|
|
563daa8a86 | ||
|
|
7ae15c6e0a | ||
|
|
03899dcba3 | ||
|
|
25f5a8b201 | ||
|
|
e7d1d43f39 | ||
|
|
9b9098c3dc | ||
|
|
0cc154c2a9 | ||
|
|
4e8a84617c | ||
|
|
ffea1bb0a3 | ||
|
|
ac14d187c6 | ||
|
|
1eee6f16e4 | ||
|
|
de46a36bbc | ||
|
|
dbf8d0c82c | ||
|
|
02e88fdbb1 | ||
|
|
42d52ebbec | ||
|
|
3fd22973da | ||
|
|
e13e57e024 | ||
|
|
c3e28f9d55 | ||
|
|
a188ba5c26 | ||
|
|
86419c8ab9 | ||
|
|
a9ebfe6ec0 | ||
|
|
0a50874c01 | ||
|
|
6050bab5db | ||
|
|
2a51dcfdf7 | ||
|
|
86588695e3 | ||
|
|
47e0cb8985 | ||
|
|
388658efdb | ||
|
|
3194f90db1 | ||
|
|
ee4bff3475 | ||
|
|
7fb012d0df | ||
|
|
44459f92ef | ||
|
|
1dc93107dc | ||
|
|
7fb5f47efe | ||
|
|
83db62bcda | ||
|
|
b45fd61ec5 | ||
|
|
323264dec2 | ||
|
|
c11e6d709c | ||
|
|
1b31d4e9f1 | ||
|
|
a184dc68f5 | ||
|
|
a4ee9c1978 | ||
|
|
c2790f912b | ||
|
|
2e7a1a4cb1 | ||
|
|
d050d6da2e | ||
|
|
fd8da66fcb | ||
|
|
d02b1069b5 | ||
|
|
6b4fa17097 | ||
|
|
5e2482824c | ||
|
|
e0a8fb607a | ||
|
|
257964a8bc | ||
|
|
d467aa78c2 | ||
|
|
6e1c061e5f | ||
|
|
9872f51293 | ||
|
|
e54b591ec7 | ||
|
|
814b26f82e | ||
|
|
2a4084a2bb | ||
|
|
dff8f93457 | ||
|
|
859e3fc7fa | ||
|
|
0de23760ff | ||
|
|
b90e6df5cc | ||
|
|
0d20ec968f | ||
|
|
99dd3a8ab0 | ||
|
|
eee2d5b915 | ||
|
|
159bad73d5 | ||
|
|
7b48443624 | ||
|
|
d36f60999d | ||
|
|
67f0f615b2 | ||
|
|
200ebeebdc | ||
|
|
23e8279093 | ||
|
|
221b3236a8 | ||
|
|
a0e1544848 | ||
|
|
2740fab7ad | ||
|
|
7042b9b16d | ||
|
|
4047d456b6 | ||
|
|
20d69ea504 | ||
|
|
d021cac0c9 | ||
|
|
46369d48fe | ||
|
|
85d59198aa | ||
|
|
20a2e25cb0 | ||
|
|
a32e70d449 | ||
|
|
76d0edbbaa | ||
|
|
8d547ef83a | ||
|
|
d7a6978e05 | ||
|
|
786ff403b1 | ||
|
|
2ea0c3dce6 | ||
|
|
b9dfa7845f | ||
|
|
090f6580d3 | ||
|
|
eb884c8f76 | ||
|
|
f6b6f0df67 | ||
|
|
9f2127bf04 | ||
|
|
8cd096d9b5 | ||
|
|
716688d44e | ||
|
|
9ad9cea952 | ||
|
|
75e01420fa | ||
|
|
fc54f53662 | ||
|
|
7c2e9f71ea | ||
|
|
3c48913473 | ||
|
|
9f56902719 | ||
|
|
1bcdfe268d | ||
|
|
a0263ab472 | ||
|
|
6b580c1544 | ||
|
|
3f7d666e8b | ||
|
|
cce25ec116 | ||
|
|
fc7fa59e5f | ||
|
|
abbefae6f1 | ||
|
|
f9d0c6d9ae | ||
|
|
4d59142255 | ||
|
|
72aa1ce00f | ||
|
|
b6b737c937 | ||
|
|
b074e7dc9b | ||
|
|
e81f1f7971 | ||
|
|
83c3580346 | ||
|
|
f0662eea48 | ||
|
|
b49b861b2d | ||
|
|
877a9145ae | ||
|
|
ba24bd88cf | ||
|
|
825fd7c990 | ||
|
|
a28776e3aa | ||
|
|
80b46889ed | ||
|
|
fdd25f0d99 | ||
|
|
e0ab5629cc | ||
|
|
960e47423c | ||
|
|
4950ca4142 | ||
|
|
3e1f3b8132 | ||
|
|
21f63c57d5 | ||
|
|
bd07c13348 | ||
|
|
9e21e5918c | ||
|
|
f7aab5ac69 | ||
|
|
59ac3f48c8 | ||
|
|
9512bd66b5 | ||
|
|
5729dc3589 | ||
|
|
2890485785 | ||
|
|
a5f6e5dc88 | ||
|
|
d93db63a22 | ||
|
|
ecc11dcc12 | ||
|
|
e9115b8d8a | ||
|
|
d625f55c05 | ||
|
|
9856c1446b | ||
|
|
39fb6d9bfc | ||
|
|
22f237fec6 | ||
|
|
021f0bdccb | ||
|
|
7cc40d5621 | ||
|
|
c447454111 | ||
|
|
895d9df02f | ||
|
|
0c194f55e8 | ||
|
|
0a99bd84ce | ||
|
|
9feb9b0aa8 | ||
|
|
61b7982422 | ||
|
|
594b232e0f | ||
|
|
a06ffeb54e | ||
|
|
b74dd420fc | ||
|
|
4f06d59ff6 |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -8,5 +8,7 @@ crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_3.py text eol=crlf
|
||||
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_crlf.py text eol=crlf
|
||||
crates/ruff_python_formatter/tests/snapshots/format@docstring_code_examples_crlf.py.snap text eol=crlf
|
||||
|
||||
crates/ruff_python_parser/resources/inline linguist-generated=true
|
||||
|
||||
ruff.schema.json linguist-generated=true text=auto eol=lf
|
||||
*.md.snap linguist-language=Markdown
|
||||
|
||||
8
.github/CODEOWNERS
vendored
8
.github/CODEOWNERS
vendored
@@ -5,11 +5,13 @@
|
||||
# - The '*' pattern is global owners.
|
||||
# - Order is important. The last matching pattern has the most precedence.
|
||||
|
||||
# Jupyter
|
||||
/crates/ruff_linter/src/jupyter/ @dhruvmanila
|
||||
/crates/ruff_notebook/ @dhruvmanila
|
||||
/crates/ruff_formatter/ @MichaReiser
|
||||
/crates/ruff_python_formatter/ @MichaReiser
|
||||
/crates/ruff_python_parser/ @MichaReiser
|
||||
/crates/ruff_python_parser/ @MichaReiser @dhruvmanila
|
||||
|
||||
# flake8-pyi
|
||||
/crates/ruff_linter/src/rules/flake8_pyi/ @AlexWaygood
|
||||
|
||||
# Script for fuzzing the parser
|
||||
/scripts/fuzz-parser/ @AlexWaygood
|
||||
|
||||
21
.github/dependabot.yml
vendored
21
.github/dependabot.yml
vendored
@@ -1,21 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels: ["internal"]
|
||||
groups:
|
||||
actions:
|
||||
patterns:
|
||||
- "*"
|
||||
ignore:
|
||||
# The latest versions of these are not compatible with our release workflow
|
||||
- dependency-name: "actions/upload-artifact"
|
||||
- dependency-name: "actions/download-artifact"
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
labels: ["internal"]
|
||||
68
.github/renovate.json5
vendored
Normal file
68
.github/renovate.json5
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
$schema: "https://docs.renovatebot.com/renovate-schema.json",
|
||||
dependencyDashboard: true,
|
||||
suppressNotifications: ["prEditedNotification"],
|
||||
extends: ["config:recommended"],
|
||||
labels: ["internal"],
|
||||
schedule: ["before 4am on Monday"],
|
||||
semanticCommits: "disabled",
|
||||
separateMajorMinor: false,
|
||||
prHourlyLimit: 10,
|
||||
enabledManagers: ["github-actions", "pre-commit", "cargo", "pep621", "npm"],
|
||||
cargo: {
|
||||
// See https://docs.renovatebot.com/configuration-options/#rangestrategy
|
||||
rangeStrategy: "update-lockfile",
|
||||
},
|
||||
pep621: {
|
||||
fileMatch: ["^(python|scripts)/.*pyproject\\.toml$"],
|
||||
},
|
||||
npm: {
|
||||
fileMatch: ["^playground/.*package\\.json$"],
|
||||
},
|
||||
"pre-commit": {
|
||||
enabled: true,
|
||||
},
|
||||
packageRules: [
|
||||
{
|
||||
// Group upload/download artifact updates, the versions are dependent
|
||||
groupName: "Artifact GitHub Actions dependencies",
|
||||
matchManagers: ["github-actions"],
|
||||
matchPackagePatterns: ["actions/.*-artifact"],
|
||||
description: "Weekly update of artifact-related GitHub Actions dependencies",
|
||||
},
|
||||
{
|
||||
groupName: "pre-commit dependencies",
|
||||
matchManagers: ["pre-commit"],
|
||||
description: "Weekly update of pre-commit dependencies",
|
||||
},
|
||||
{
|
||||
groupName: "NPM Development dependencies",
|
||||
matchManagers: ["npm"],
|
||||
matchDepTypes: ["devDependencies"],
|
||||
description: "Weekly update of NPM development dependencies",
|
||||
},
|
||||
{
|
||||
groupName: "Monaco",
|
||||
matchManagers: ["npm"],
|
||||
matchPackagePatterns: ["monaco"],
|
||||
description: "Weekly update of the Monaco editor",
|
||||
},
|
||||
{
|
||||
groupName: "strum",
|
||||
matchManagers: ["cargo"],
|
||||
matchPackagePatterns: ["strum"],
|
||||
description: "Weekly update of strum dependencies",
|
||||
},
|
||||
{
|
||||
groupName: "ESLint",
|
||||
matchManagers: ["npm"],
|
||||
matchPackageNames: ["eslint"],
|
||||
allowedVersions: "<9",
|
||||
description: "Constraint ESLint to version 8 until TypeScript-eslint supports ESLint 9", // https://github.com/typescript-eslint/typescript-eslint/issues/8211
|
||||
},
|
||||
],
|
||||
vulnerabilityAlerts: {
|
||||
commitMessageSuffix: "",
|
||||
labels: ["internal", "security"],
|
||||
},
|
||||
}
|
||||
69
.github/workflows/ci.yaml
vendored
69
.github/workflows/ci.yaml
vendored
@@ -23,6 +23,8 @@ jobs:
|
||||
name: "Determine changes"
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
# Flag that is raised when any code that affects parser is changed
|
||||
parser: ${{ steps.changed.outputs.parser_any_changed }}
|
||||
# Flag that is raised when any code that affects linter is changed
|
||||
linter: ${{ steps.changed.outputs.linter_any_changed }}
|
||||
# Flag that is raised when any code that affects formatter is changed
|
||||
@@ -35,10 +37,21 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: tj-actions/changed-files@v43
|
||||
- uses: tj-actions/changed-files@v44
|
||||
id: changed
|
||||
with:
|
||||
files_yaml: |
|
||||
parser:
|
||||
- Cargo.toml
|
||||
- Cargo.lock
|
||||
- crates/ruff_python_trivia/**
|
||||
- crates/ruff_source_file/**
|
||||
- crates/ruff_text_size/**
|
||||
- crates/ruff_python_ast/**
|
||||
- crates/ruff_python_parser/**
|
||||
- scripts/fuzz-parser/**
|
||||
- .github/workflows/ci.yaml
|
||||
|
||||
linter:
|
||||
- Cargo.toml
|
||||
- Cargo.lock
|
||||
@@ -181,6 +194,22 @@ jobs:
|
||||
cd crates/ruff_wasm
|
||||
wasm-pack test --node
|
||||
|
||||
cargo-build-release:
|
||||
name: "cargo build (release)"
|
||||
runs-on: macos-latest
|
||||
needs: determine_changes
|
||||
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- name: "Install mold"
|
||||
uses: rui314/setup-mold@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: "Build"
|
||||
run: cargo build --release --locked
|
||||
|
||||
cargo-fuzz:
|
||||
name: "cargo fuzz"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -200,6 +229,38 @@ jobs:
|
||||
tool: cargo-fuzz@0.11.2
|
||||
- run: cargo fuzz build -s none
|
||||
|
||||
fuzz-parser:
|
||||
name: "Fuzz the parser"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- cargo-test-linux
|
||||
- determine_changes
|
||||
if: ${{ needs.determine_changes.outputs.parser == 'true' }}
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
- name: Install uv
|
||||
run: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
- name: Install Python requirements
|
||||
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
|
||||
- uses: actions/download-artifact@v4
|
||||
name: Download Ruff binary to test
|
||||
id: download-cached-binary
|
||||
with:
|
||||
name: ruff
|
||||
path: ruff-to-test
|
||||
- name: Fuzz
|
||||
run: |
|
||||
# Make executable, since artifact download doesn't preserve this
|
||||
chmod +x ${{ steps.download-cached-binary.outputs.download-path }}/ruff
|
||||
|
||||
python scripts/fuzz-parser/fuzz.py 0-500 --test-executable ${{ steps.download-cached-binary.outputs.download-path }}/ruff
|
||||
|
||||
scripts:
|
||||
name: "test scripts"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -228,9 +289,7 @@ jobs:
|
||||
- determine_changes
|
||||
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
|
||||
# Ecosystem check needs linter and/or formatter changes.
|
||||
if: github.event_name == 'pull_request' && ${{
|
||||
needs.determine_changes.outputs.code == 'true'
|
||||
}}
|
||||
if: ${{ github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -509,7 +568,7 @@ jobs:
|
||||
benchmarks:
|
||||
runs-on: ubuntu-latest
|
||||
needs: determine_changes
|
||||
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
|
||||
if: ${{ github.repository == 'astral-sh/ruff' && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: "Checkout Branch"
|
||||
|
||||
72
.github/workflows/daily_fuzz.yaml
vendored
Normal file
72
.github/workflows/daily_fuzz.yaml
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
name: Daily parser fuzz
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
pull_request:
|
||||
paths:
|
||||
- ".github/workflows/daily_fuzz.yaml"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
CARGO_NET_RETRY: 10
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTUP_MAX_RETRIES: 10
|
||||
PACKAGE_NAME: ruff
|
||||
FORCE_COLOR: 1
|
||||
|
||||
jobs:
|
||||
fuzz:
|
||||
name: Fuzz
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
# Don't run the cron job on forks:
|
||||
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Install uv
|
||||
run: curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
- name: Install Python requirements
|
||||
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
|
||||
- name: "Install Rust toolchain"
|
||||
run: rustup show
|
||||
- name: "Install mold"
|
||||
uses: rui314/setup-mold@v1
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Build ruff
|
||||
# A debug build means the script runs slower once it gets started,
|
||||
# but this is outweighed by the fact that a release build takes *much* longer to compile in CI
|
||||
run: cargo build --locked
|
||||
- name: Fuzz
|
||||
run: python scripts/fuzz-parser/fuzz.py $(shuf -i 0-9999999999999999999 -n 1000) --test-executable target/debug/ruff
|
||||
|
||||
create-issue-on-failure:
|
||||
name: Create an issue if the daily fuzz surfaced any bugs
|
||||
runs-on: ubuntu-latest
|
||||
needs: fuzz
|
||||
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.fuzz.result == 'failure' }}
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.create({
|
||||
owner: "astral-sh",
|
||||
repo: "ruff",
|
||||
title: `Daily parser fuzz failed on ${new Date().toDateString()}`,
|
||||
body: "Runs listed here: https://github.com/astral-sh/ruff/actions/workflows/daily_fuzz.yml",
|
||||
labels: ["bug", "parser", "fuzzer"],
|
||||
})
|
||||
19
.github/workflows/release.yaml
vendored
19
.github/workflows/release.yaml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
path: dist
|
||||
|
||||
macos-x86_64:
|
||||
runs-on: macos-latest
|
||||
runs-on: macos-12
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -97,8 +97,8 @@ jobs:
|
||||
*.tar.gz
|
||||
*.sha256
|
||||
|
||||
macos-universal:
|
||||
runs-on: macos-latest
|
||||
macos-aarch64:
|
||||
runs-on: macos-14
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -106,16 +106,17 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
architecture: x64
|
||||
architecture: arm64
|
||||
- name: "Prep README.md"
|
||||
run: python scripts/transform_readme.py --target pypi
|
||||
- name: "Build wheels - universal2"
|
||||
- name: "Build wheels - aarch64"
|
||||
uses: PyO3/maturin-action@v1
|
||||
with:
|
||||
args: --release --locked --target universal2-apple-darwin --out dist
|
||||
- name: "Test wheel - universal2"
|
||||
target: aarch64
|
||||
args: --release --locked --out dist
|
||||
- name: "Test wheel - aarch64"
|
||||
run: |
|
||||
pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
|
||||
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
|
||||
ruff --help
|
||||
python -m ruff --help
|
||||
- name: "Upload wheels"
|
||||
@@ -451,7 +452,7 @@ jobs:
|
||||
name: Upload to PyPI
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- macos-universal
|
||||
- macos-aarch64
|
||||
- macos-x86_64
|
||||
- windows
|
||||
- linux
|
||||
|
||||
@@ -17,4 +17,4 @@ MD013: false
|
||||
# MD024/no-duplicate-heading
|
||||
MD024:
|
||||
# Allow when nested under different parents e.g. CHANGELOG.md
|
||||
allow_different_nesting: true
|
||||
siblings_only: true
|
||||
|
||||
@@ -13,7 +13,7 @@ exclude: |
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/abravalheri/validate-pyproject
|
||||
rev: v0.15
|
||||
rev: v0.16
|
||||
hooks:
|
||||
- id: validate-pyproject
|
||||
|
||||
@@ -31,7 +31,7 @@ repos:
|
||||
)$
|
||||
|
||||
- repo: https://github.com/igorshubovych/markdownlint-cli
|
||||
rev: v0.37.0
|
||||
rev: v0.39.0
|
||||
hooks:
|
||||
- id: markdownlint-fix
|
||||
exclude: |
|
||||
@@ -41,7 +41,7 @@ repos:
|
||||
)$
|
||||
|
||||
- repo: https://github.com/crate-ci/typos
|
||||
rev: v1.16.22
|
||||
rev: v1.20.10
|
||||
hooks:
|
||||
- id: typos
|
||||
|
||||
@@ -55,7 +55,7 @@ repos:
|
||||
pass_filenames: false # This makes it a lot faster
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.1.4
|
||||
rev: v0.4.2
|
||||
hooks:
|
||||
- id: ruff-format
|
||||
- id: ruff
|
||||
@@ -70,7 +70,7 @@ repos:
|
||||
|
||||
# Prettier
|
||||
- repo: https://github.com/pre-commit/mirrors-prettier
|
||||
rev: v3.0.3
|
||||
rev: v3.1.0
|
||||
hooks:
|
||||
- id: prettier
|
||||
types: [yaml]
|
||||
|
||||
242
CHANGELOG.md
242
CHANGELOG.md
@@ -1,5 +1,236 @@
|
||||
# Changelog
|
||||
|
||||
## 0.4.2
|
||||
|
||||
### Rule changes
|
||||
|
||||
- \[`flake8-pyi`\] Allow for overloaded `__exit__` and `__aexit__` definitions (`PYI036`) ([#11057](https://github.com/astral-sh/ruff/pull/11057))
|
||||
- \[`pyupgrade`\] Catch usages of `"%s" % var` and provide an unsafe fix (`UP031`) ([#11019](https://github.com/astral-sh/ruff/pull/11019))
|
||||
- \[`refurb`\] Implement new rule that suggests min/max over `sorted()` (`FURB192`) ([#10868](https://github.com/astral-sh/ruff/pull/10868))
|
||||
|
||||
### Server
|
||||
|
||||
- Fix an issue with missing diagnostics for Neovim and Helix ([#11092](https://github.com/astral-sh/ruff/pull/11092))
|
||||
- Implement hover documentation for `noqa` codes ([#11096](https://github.com/astral-sh/ruff/pull/11096))
|
||||
- Introduce common Ruff configuration options with new server settings ([#11062](https://github.com/astral-sh/ruff/pull/11062))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Use `macos-12` for building release wheels to enable macOS 11 compatibility ([#11146](https://github.com/astral-sh/ruff/pull/11146))
|
||||
- \[`flake8-blind-expect`\] Allow raise from in `BLE001` ([#11131](https://github.com/astral-sh/ruff/pull/11131))
|
||||
- \[`flake8-pyi`\] Allow simple assignments to `None` in enum class scopes (`PYI026`) ([#11128](https://github.com/astral-sh/ruff/pull/11128))
|
||||
- \[`flake8-simplify`\] Avoid raising `SIM911` for non-`zip` attribute calls ([#11126](https://github.com/astral-sh/ruff/pull/11126))
|
||||
- \[`refurb`\] Avoid `operator.itemgetter` suggestion for single-item tuple ([#11095](https://github.com/astral-sh/ruff/pull/11095))
|
||||
- \[`ruff`\] Respect per-file-ignores for `RUF100` with no other diagnostics ([#11058](https://github.com/astral-sh/ruff/pull/11058))
|
||||
- \[`ruff`\] Fix async comprehension false positive (`RUF029`) ([#11070](https://github.com/astral-sh/ruff/pull/11070))
|
||||
|
||||
### Documentation
|
||||
|
||||
- \[`flake8-bugbear`\] Document explicitly disabling strict zip (`B905`) ([#11040](https://github.com/astral-sh/ruff/pull/11040))
|
||||
- \[`flake8-type-checking`\] Mention `lint.typing-modules` in `TCH001`, `TCH002`, and `TCH003` ([#11144](https://github.com/astral-sh/ruff/pull/11144))
|
||||
- \[`isort`\] Improve documentation around custom `isort` sections ([#11050](https://github.com/astral-sh/ruff/pull/11050))
|
||||
- \[`pylint`\] Fix documentation oversight for `invalid-X-returns` ([#11094](https://github.com/astral-sh/ruff/pull/11094))
|
||||
|
||||
### Performance
|
||||
|
||||
- Use `matchit` to resolve per-file settings ([#11111](https://github.com/astral-sh/ruff/pull/11111))
|
||||
|
||||
## 0.4.1
|
||||
|
||||
### Preview features
|
||||
|
||||
- \[`pylint`\] Implement `invalid-hash-returned` (`PLE0309`) ([#10961](https://github.com/astral-sh/ruff/pull/10961))
|
||||
- \[`pylint`\] Implement `invalid-index-returned` (`PLE0305`) ([#10962](https://github.com/astral-sh/ruff/pull/10962))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- \[`pylint`\] Allow `NoReturn`-like functions for `__str__`, `__len__`, etc. (`PLE0307`) ([#11017](https://github.com/astral-sh/ruff/pull/11017))
|
||||
- Parser: Use empty range when there's "gap" in token source ([#11032](https://github.com/astral-sh/ruff/pull/11032))
|
||||
- \[`ruff`\] Ignore stub functions in `unused-async` (`RUF029`) ([#11026](https://github.com/astral-sh/ruff/pull/11026))
|
||||
- Parser: Expect indented case block instead of match stmt ([#11033](https://github.com/astral-sh/ruff/pull/11033))
|
||||
|
||||
## 0.4.0
|
||||
|
||||
### A new, hand-written parser
|
||||
|
||||
Ruff's new parser is **>2x faster**, which translates to a **20-40% speedup** for all linting and formatting invocations.
|
||||
There's a lot to say about this exciting change, so check out the [blog post](https://astral.sh/blog/ruff-v0.4.0) for more details!
|
||||
|
||||
See [#10036](https://github.com/astral-sh/ruff/pull/10036) for implementation details.
|
||||
|
||||
### A new language server in Rust
|
||||
|
||||
With this release, we also want to highlight our new language server. `ruff server` is a Rust-powered language
|
||||
server that comes built-in with Ruff. It can be used with any editor that supports the [Language Server Protocol](https://microsoft.github.io/language-server-protocol/) (LSP).
|
||||
It uses a multi-threaded, lock-free architecture inspired by `rust-analyzer` and it will open the door for a lot
|
||||
of exciting features. It’s also faster than our previous [Python-based language server](https://github.com/astral-sh/ruff-lsp)
|
||||
-- but you probably guessed that already.
|
||||
|
||||
`ruff server` is only in alpha, but it has a lot of features that you can try out today:
|
||||
|
||||
- Lints Python files automatically and shows quick-fixes when available
|
||||
- Formats Python files, with support for range formatting
|
||||
- Comes with commands for quickly performing actions: `ruff.applyAutofix`, `ruff.applyFormat`, and `ruff.applyOrganizeImports`
|
||||
- Supports `source.fixAll` and `source.organizeImports` source actions
|
||||
- Automatically reloads your project configuration when you change it
|
||||
|
||||
To setup `ruff server` with your editor, refer to the [README.md](https://github.com/astral-sh/ruff/blob/main/crates/ruff_server/README.md).
|
||||
|
||||
### Preview features
|
||||
|
||||
- \[`pycodestyle`\] Do not trigger `E3` rules on `def`s following a function/method with a dummy body ([#10704](https://github.com/astral-sh/ruff/pull/10704))
|
||||
- \[`pylint`\] Implement `invalid-bytes-returned` (`E0308`) ([#10959](https://github.com/astral-sh/ruff/pull/10959))
|
||||
- \[`pylint`\] Implement `invalid-length-returned` (`E0303`) ([#10963](https://github.com/astral-sh/ruff/pull/10963))
|
||||
- \[`pylint`\] Implement `self-cls-assignment` (`W0642`) ([#9267](https://github.com/astral-sh/ruff/pull/9267))
|
||||
- \[`pylint`\] Omit stubs from `invalid-bool` and `invalid-str-return-type` ([#11008](https://github.com/astral-sh/ruff/pull/11008))
|
||||
- \[`ruff`\] New rule `unused-async` (`RUF029`) to detect unneeded `async` keywords on functions ([#9966](https://github.com/astral-sh/ruff/pull/9966))
|
||||
|
||||
### Rule changes
|
||||
|
||||
- \[`flake8-bandit`\] Allow `urllib.request.urlopen` calls with static `Request` argument (`S310`) ([#10964](https://github.com/astral-sh/ruff/pull/10964))
|
||||
- \[`flake8-bugbear`\] Treat `raise NotImplemented`-only bodies as stub functions (`B006`) ([#10990](https://github.com/astral-sh/ruff/pull/10990))
|
||||
- \[`flake8-slots`\] Respect same-file `Enum` subclasses (`SLOT000`) ([#11006](https://github.com/astral-sh/ruff/pull/11006))
|
||||
- \[`pylint`\] Support inverted comparisons (`PLR1730`) ([#10920](https://github.com/astral-sh/ruff/pull/10920))
|
||||
|
||||
### Linter
|
||||
|
||||
- Improve handling of builtin symbols in linter rules ([#10919](https://github.com/astral-sh/ruff/pull/10919))
|
||||
- Improve display of rules in `--show-settings` ([#11003](https://github.com/astral-sh/ruff/pull/11003))
|
||||
- Improve inference capabilities of the `BuiltinTypeChecker` ([#10976](https://github.com/astral-sh/ruff/pull/10976))
|
||||
- Resolve classes and functions relative to script name ([#10965](https://github.com/astral-sh/ruff/pull/10965))
|
||||
- Improve performance of `RuleTable::any_enabled` ([#10971](https://github.com/astral-sh/ruff/pull/10971))
|
||||
|
||||
### Server
|
||||
|
||||
*This section is devoted to updates for our new language server, written in Rust.*
|
||||
|
||||
- Enable ruff-specific source actions ([#10916](https://github.com/astral-sh/ruff/pull/10916))
|
||||
- Refreshes diagnostics for open files when file configuration is changed ([#10988](https://github.com/astral-sh/ruff/pull/10988))
|
||||
- Important errors are now shown as popups ([#10951](https://github.com/astral-sh/ruff/pull/10951))
|
||||
- Introduce settings for directly configuring the linter and formatter ([#10984](https://github.com/astral-sh/ruff/pull/10984))
|
||||
- Resolve configuration for each document individually ([#10950](https://github.com/astral-sh/ruff/pull/10950))
|
||||
- Write a setup guide for Neovim ([#10987](https://github.com/astral-sh/ruff/pull/10987))
|
||||
|
||||
### Configuration
|
||||
|
||||
- Add `RUFF_OUTPUT_FILE` environment variable support ([#10992](https://github.com/astral-sh/ruff/pull/10992))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Avoid `non-augmented-assignment` for reversed, non-commutative operators (`PLR6104`) ([#10909](https://github.com/astral-sh/ruff/pull/10909))
|
||||
- Limit commutative non-augmented-assignments to primitive data types (`PLR6104`) ([#10912](https://github.com/astral-sh/ruff/pull/10912))
|
||||
- Respect `per-file-ignores` for `RUF100` on blanket `# noqa` ([#10908](https://github.com/astral-sh/ruff/pull/10908))
|
||||
- Consider `if` expression for parenthesized with items parsing ([#11010](https://github.com/astral-sh/ruff/pull/11010))
|
||||
- Consider binary expr for parenthesized with items parsing ([#11012](https://github.com/astral-sh/ruff/pull/11012))
|
||||
- Reset `FOR_TARGET` context for all kinds of parentheses ([#11009](https://github.com/astral-sh/ruff/pull/11009))
|
||||
|
||||
## 0.3.7
|
||||
|
||||
### Preview features
|
||||
|
||||
- \[`flake8-bugbear`\] Implement `loop-iterator-mutation` (`B909`) ([#9578](https://github.com/astral-sh/ruff/pull/9578))
|
||||
- \[`pylint`\] Implement rule to prefer augmented assignment (`PLR6104`) ([#9932](https://github.com/astral-sh/ruff/pull/9932))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Avoid TOCTOU errors in cache initialization ([#10884](https://github.com/astral-sh/ruff/pull/10884))
|
||||
- \[`pylint`\] Recode `nan-comparison` rule to `W0177` ([#10894](https://github.com/astral-sh/ruff/pull/10894))
|
||||
- \[`pylint`\] Reverse min-max logic in `if-stmt-min-max` ([#10890](https://github.com/astral-sh/ruff/pull/10890))
|
||||
|
||||
## 0.3.6
|
||||
|
||||
### Preview features
|
||||
|
||||
- \[`pylint`\] Implement `bad-staticmethod-argument` (`PLW0211`) ([#10781](https://github.com/astral-sh/ruff/pull/10781))
|
||||
- \[`pylint`\] Implement `if-stmt-min-max` (`PLR1730`, `PLR1731`) ([#10002](https://github.com/astral-sh/ruff/pull/10002))
|
||||
- \[`pyupgrade`\] Replace `str,Enum` multiple inheritance with `StrEnum` `UP042` ([#10713](https://github.com/astral-sh/ruff/pull/10713))
|
||||
- \[`refurb`\] Implement `if-expr-instead-of-or-operator` (`FURB110`) ([#10687](https://github.com/astral-sh/ruff/pull/10687))
|
||||
- \[`refurb`\] Implement `int-on-sliced-str` (`FURB166`) ([#10650](https://github.com/astral-sh/ruff/pull/10650))
|
||||
- \[`refurb`\] Implement `write-whole-file` (`FURB103`) ([#10802](https://github.com/astral-sh/ruff/pull/10802))
|
||||
- \[`refurb`\] Support `itemgetter` in `reimplemented-operator` (`FURB118`) ([#10526](https://github.com/astral-sh/ruff/pull/10526))
|
||||
- \[`flake8_comprehensions`\] Add `sum`/`min`/`max` to unnecessary comprehension check (`C419`) ([#10759](https://github.com/astral-sh/ruff/pull/10759))
|
||||
|
||||
### Rule changes
|
||||
|
||||
- \[`pydocstyle`\] Require capitalizing docstrings where the first sentence is a single word (`D403`) ([#10776](https://github.com/astral-sh/ruff/pull/10776))
|
||||
- \[`pycodestyle`\] Ignore annotated lambdas in class scopes (`E731`) ([#10720](https://github.com/astral-sh/ruff/pull/10720))
|
||||
- \[`flake8-pyi`\] Various improvements to PYI034 ([#10807](https://github.com/astral-sh/ruff/pull/10807))
|
||||
- \[`flake8-slots`\] Flag subclasses of call-based `typing.NamedTuple`s as well as subclasses of `collections.namedtuple()` (`SLOT002`) ([#10808](https://github.com/astral-sh/ruff/pull/10808))
|
||||
- \[`pyflakes`\] Allow forward references in class bases in stub files (`F821`) ([#10779](https://github.com/astral-sh/ruff/pull/10779))
|
||||
- \[`pygrep-hooks`\] Improve `blanket-noqa` error message (`PGH004`) ([#10851](https://github.com/astral-sh/ruff/pull/10851))
|
||||
|
||||
### CLI
|
||||
|
||||
- Support `FORCE_COLOR` env var ([#10839](https://github.com/astral-sh/ruff/pull/10839))
|
||||
|
||||
### Configuration
|
||||
|
||||
- Support negated patterns in `[extend-]per-file-ignores` ([#10852](https://github.com/astral-sh/ruff/pull/10852))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- \[`flake8-import-conventions`\] Accept non-aliased (but correct) import in `unconventional-import-alias` (`ICN001`) ([#10729](https://github.com/astral-sh/ruff/pull/10729))
|
||||
- \[`flake8-quotes`\] Add semantic model flag when inside f-string replacement field ([#10766](https://github.com/astral-sh/ruff/pull/10766))
|
||||
- \[`pep8-naming`\] Recursively resolve `TypeDicts` for N815 violations ([#10719](https://github.com/astral-sh/ruff/pull/10719))
|
||||
- \[`flake8-quotes`\] Respect `Q00*` ignores in `flake8-quotes` rules ([#10728](https://github.com/astral-sh/ruff/pull/10728))
|
||||
- \[`flake8-simplify`\] Show negated condition in `needless-bool` diagnostics (`SIM103`) ([#10854](https://github.com/astral-sh/ruff/pull/10854))
|
||||
- \[`ruff`\] Use within-scope shadowed bindings in `asyncio-dangling-task` (`RUF006`) ([#10793](https://github.com/astral-sh/ruff/pull/10793))
|
||||
- \[`flake8-pytest-style`\] Fix single-tuple conversion in `pytest-parametrize-values-wrong-type` (`PT007`) ([#10862](https://github.com/astral-sh/ruff/pull/10862))
|
||||
- \[`flake8-return`\] Ignore assignments to annotated variables in `unnecessary-assign` (`RET504`) ([#10741](https://github.com/astral-sh/ruff/pull/10741))
|
||||
- \[`refurb`\] Do not allow any keyword arguments for `read-whole-file` in `rb` mode (`FURB101`) ([#10803](https://github.com/astral-sh/ruff/pull/10803))
|
||||
- \[`pylint`\] Don't recommend decorating staticmethods with `@singledispatch` (`PLE1519`, `PLE1520`) ([#10637](https://github.com/astral-sh/ruff/pull/10637))
|
||||
- \[`pydocstyle`\] Use section name range for all section-related docstring diagnostics ([#10740](https://github.com/astral-sh/ruff/pull/10740))
|
||||
- Respect `# noqa` directives on `__all__` openers ([#10798](https://github.com/astral-sh/ruff/pull/10798))
|
||||
|
||||
## 0.3.5
|
||||
|
||||
### Preview features
|
||||
|
||||
- \[`pylint`\] Implement `modified-iterating-set` (`E4703`) ([#10473](https://github.com/astral-sh/ruff/pull/10473))
|
||||
- \[`refurb`\] Implement `for-loop-set-mutations` (`FURB142`) ([#10583](https://github.com/astral-sh/ruff/pull/10583))
|
||||
- \[`refurb`\] Implement `unnecessary-from-float` (`FURB164`) ([#10647](https://github.com/astral-sh/ruff/pull/10647))
|
||||
- \[`refurb`\] Implement `verbose-decimal-constructor` (`FURB157`) ([#10533](https://github.com/astral-sh/ruff/pull/10533))
|
||||
|
||||
### Rule changes
|
||||
|
||||
- \[`flake8-comprehensions`\] Handled special case for `C401` which also matches `C416` ([#10596](https://github.com/astral-sh/ruff/pull/10596))
|
||||
- \[`flake8-pyi`\] Mark `unaliased-collections-abc-set-import` fix as "safe" for more cases in stub files (`PYI025`) ([#10547](https://github.com/astral-sh/ruff/pull/10547))
|
||||
- \[`numpy`\] Add `row_stack` to NumPy 2.0 migration rule ([#10646](https://github.com/astral-sh/ruff/pull/10646))
|
||||
- \[`pycodestyle`\] Allow cell magics before an import (`E402`) ([#10545](https://github.com/astral-sh/ruff/pull/10545))
|
||||
- \[`pycodestyle`\] Avoid blank line rules for the first logical line in cell ([#10291](https://github.com/astral-sh/ruff/pull/10291))
|
||||
|
||||
### Configuration
|
||||
|
||||
- Respected nested namespace packages ([#10541](https://github.com/astral-sh/ruff/pull/10541))
|
||||
- \[`flake8-boolean-trap`\] Add setting for user defined allowed boolean trap ([#10531](https://github.com/astral-sh/ruff/pull/10531))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Correctly handle references in `__all__` definitions when renaming symbols in autofixes ([#10527](https://github.com/astral-sh/ruff/pull/10527))
|
||||
- Track ranges of names inside `__all__` definitions ([#10525](https://github.com/astral-sh/ruff/pull/10525))
|
||||
- \[`flake8-bugbear`\] Avoid false positive for usage after `continue` (`B031`) ([#10539](https://github.com/astral-sh/ruff/pull/10539))
|
||||
- \[`flake8-copyright`\] Accept commas in default copyright pattern ([#9498](https://github.com/astral-sh/ruff/pull/9498))
|
||||
- \[`flake8-datetimez`\] Allow f-strings with `%z` for `DTZ007` ([#10651](https://github.com/astral-sh/ruff/pull/10651))
|
||||
- \[`flake8-pytest-style`\] Fix `PT014` autofix for last item in list ([#10532](https://github.com/astral-sh/ruff/pull/10532))
|
||||
- \[`flake8-quotes`\] Ignore `Q000`, `Q001` when string is inside forward ref ([#10585](https://github.com/astral-sh/ruff/pull/10585))
|
||||
- \[`isort`\] Always place non-relative imports after relative imports ([#10669](https://github.com/astral-sh/ruff/pull/10669))
|
||||
- \[`isort`\] Respect Unicode characters in import sorting ([#10529](https://github.com/astral-sh/ruff/pull/10529))
|
||||
- \[`pyflakes`\] Fix F821 false negatives when `from __future__ import annotations` is active (attempt 2) ([#10524](https://github.com/astral-sh/ruff/pull/10524))
|
||||
- \[`pyflakes`\] Make `unnecessary-lambda` an always-unsafe fix ([#10668](https://github.com/astral-sh/ruff/pull/10668))
|
||||
- \[`pylint`\] Fixed false-positive on the rule `PLW1641` (`eq-without-hash`) ([#10566](https://github.com/astral-sh/ruff/pull/10566))
|
||||
- \[`ruff`\] Fix panic in unused `# noqa` removal with multi-byte space (`RUF100`) ([#10682](https://github.com/astral-sh/ruff/pull/10682))
|
||||
|
||||
### Documentation
|
||||
|
||||
- Add PR title format to `CONTRIBUTING.md` ([#10665](https://github.com/astral-sh/ruff/pull/10665))
|
||||
- Fix list markup to include blank lines required ([#10591](https://github.com/astral-sh/ruff/pull/10591))
|
||||
- Put `flake8-logging` next to the other flake8 plugins in registry ([#10587](https://github.com/astral-sh/ruff/pull/10587))
|
||||
- \[`flake8-bandit`\] Update warning message for rule `S305` to address insecure block cipher mode use ([#10602](https://github.com/astral-sh/ruff/pull/10602))
|
||||
- \[`flake8-bugbear`\] Document use of anonymous assignment in `useless-expression` ([#10551](https://github.com/astral-sh/ruff/pull/10551))
|
||||
- \[`flake8-datetimez`\] Clarify error messages and docs for `DTZ` rules ([#10621](https://github.com/astral-sh/ruff/pull/10621))
|
||||
- \[`pycodestyle`\] Use same before vs. after numbers for `space-around-operator` ([#10640](https://github.com/astral-sh/ruff/pull/10640))
|
||||
- \[`ruff`\] Change `quadratic-list-summation` docs to use `iadd` consistently ([#10666](https://github.com/astral-sh/ruff/pull/10666))
|
||||
|
||||
## 0.3.4
|
||||
|
||||
### Preview features
|
||||
@@ -97,7 +328,7 @@
|
||||
- Fix unstable `with` items formatting ([#10274](https://github.com/astral-sh/ruff/pull/10274))
|
||||
- Avoid repeating function calls in f-string conversions ([#10265](https://github.com/astral-sh/ruff/pull/10265))
|
||||
- Fix E203 false positive for slices in format strings ([#10280](https://github.com/astral-sh/ruff/pull/10280))
|
||||
- Fix incorrect `Parameter` range for `*args` and `**kwargs` ([#10283](https://github.com/astral-sh/ruff/pull/10283))
|
||||
- Fix incorrect `Parameter` range for `*args` and `**kwargs` ([#10283](https://github.com/astral-sh/ruff/pull/10283))
|
||||
- Treat `typing.Annotated` subscripts as type definitions ([#10285](https://github.com/astral-sh/ruff/pull/10285))
|
||||
|
||||
## 0.3.1
|
||||
@@ -205,8 +436,7 @@ This release introduces the Ruff 2024.2 style, stabilizing the following changes
|
||||
Highlights include:
|
||||
|
||||
- Initial support formatting f-strings (in `--preview`).
|
||||
- Support for overriding arbitrary configuration options via the CLI through an expanded `--config`
|
||||
argument (e.g., `--config "lint.isort.combine-as-imports=false"`).
|
||||
- Support for overriding arbitrary configuration options via the CLI through an expanded `--config` argument (e.g., `--config "lint.isort.combine-as-imports=false"`).
|
||||
- Significant performance improvements in Ruff's lexer, parser, and lint rules.
|
||||
|
||||
### Preview features
|
||||
@@ -854,7 +1084,7 @@ docstrings via the `docstring-code-format` setting.
|
||||
- \[`pylint`\] Default `max-positional-args` to `max-args` ([#8998](https://github.com/astral-sh/ruff/pull/8998))
|
||||
- \[`pylint`\] Add `allow-dunder-method-names` setting for `bad-dunder-method-name` (`PLW3201`) ([#8812](https://github.com/astral-sh/ruff/pull/8812))
|
||||
- \[`isort`\] Add support for `from-first` setting ([#8663](https://github.com/astral-sh/ruff/pull/8663))
|
||||
- \[`isort`\] Add support for `length-sort` settings ([#8841](https://github.com/astral-sh/ruff/pull/8841))
|
||||
- \[`isort`\] Add support for `length-sort` settings ([#8841](https://github.com/astral-sh/ruff/pull/8841))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -983,7 +1213,7 @@ docstrings via the `docstring-code-format` setting.
|
||||
- \[`flake8-trio`\] Implement `TRIO115` ([#8486](https://github.com/astral-sh/ruff/pull/8486))
|
||||
- \[`refurb`\] Implement `type-none-comparison` (`FURB169`) ([#8487](https://github.com/astral-sh/ruff/pull/8487))
|
||||
- Flag all comparisons against builtin types in `E721` ([#8491](https://github.com/astral-sh/ruff/pull/8491))
|
||||
- Make `SIM118` fix as safe when the expression is a known dictionary ([#8525](https://github.com/astral-sh/ruff/pull/8525))
|
||||
- Make `SIM118` fix as safe when the expression is a known dictionary ([#8525](https://github.com/astral-sh/ruff/pull/8525))
|
||||
|
||||
### Formatter
|
||||
|
||||
@@ -1151,7 +1381,7 @@ Try it today with `ruff format`! [Check out the blog post](https://astral.sh/blo
|
||||
- Add `backports.strenum` to `deprecated-imports` ([#8113](https://github.com/astral-sh/ruff/pull/8113))
|
||||
- Update `SIM112` to ignore `https_proxy`, `http_proxy`, and `no_proxy` ([#8140](https://github.com/astral-sh/ruff/pull/8140))
|
||||
- Update fix for `literal-membership` (`PLR6201`) to be unsafe ([#8097](https://github.com/astral-sh/ruff/pull/8097))
|
||||
- Update fix for `mutable-argument-defaults` (`B006`) to be unsafe ([#8108](https://github.com/astral-sh/ruff/pull/8108))
|
||||
- Update fix for `mutable-argument-defaults` (`B006`) to be unsafe ([#8108](https://github.com/astral-sh/ruff/pull/8108))
|
||||
|
||||
### Formatter
|
||||
|
||||
|
||||
@@ -33,27 +33,18 @@ Welcome! We're happy to have you here. Thank you in advance for your contributio
|
||||
|
||||
## The Basics
|
||||
|
||||
Ruff welcomes contributions in the form of Pull Requests.
|
||||
Ruff welcomes contributions in the form of pull requests.
|
||||
|
||||
For small changes (e.g., bug fixes), feel free to submit a PR.
|
||||
|
||||
For larger changes (e.g., new lint rules, new functionality, new configuration options), consider
|
||||
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed change.
|
||||
You can also join us on [**Discord**](https://discord.com/invite/astral-sh) to discuss your idea with the
|
||||
You can also join us on [Discord](https://discord.com/invite/astral-sh) to discuss your idea with the
|
||||
community. We've labeled [beginner-friendly tasks](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
||||
in the issue tracker, along with [bugs](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
||||
and [improvements](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted)
|
||||
that are ready for contributions.
|
||||
|
||||
If you're looking for a place to start, we recommend implementing a new lint rule (see:
|
||||
[_Adding a new lint rule_](#example-adding-a-new-lint-rule), which will allow you to learn from and
|
||||
pattern-match against the examples in the existing codebase. Many lint rules are inspired by
|
||||
existing Python plugins, which can be used as a reference implementation.
|
||||
|
||||
As a concrete example: consider taking on one of the rules from the [`flake8-pyi`](https://github.com/astral-sh/ruff/issues/848)
|
||||
plugin, and looking to the originating [Python source](https://github.com/PyCQA/flake8-pyi) for
|
||||
guidance.
|
||||
|
||||
If you have suggestions on how we might improve the contributing documentation, [let us know](https://github.com/astral-sh/ruff/discussions/5693)!
|
||||
|
||||
### Prerequisites
|
||||
@@ -107,7 +98,7 @@ RUFF_UPDATE_SCHEMA=1 cargo test # Rust testing and updating ruff.schema.json
|
||||
pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
|
||||
```
|
||||
|
||||
These checks will run on GitHub Actions when you open your Pull Request, but running them locally
|
||||
These checks will run on GitHub Actions when you open your pull request, but running them locally
|
||||
will save you time and expedite the merge process.
|
||||
|
||||
Note that many code changes also require updating the snapshot tests, which is done interactively
|
||||
@@ -117,7 +108,14 @@ after running `cargo test` like so:
|
||||
cargo insta review
|
||||
```
|
||||
|
||||
Your Pull Request will be reviewed by a maintainer, which may involve a few rounds of iteration
|
||||
If your pull request relates to a specific lint rule, include the category and rule code in the
|
||||
title, as in the following examples:
|
||||
|
||||
- \[`flake8-bugbear`\] Avoid false positive for usage after `continue` (`B031`)
|
||||
- \[`flake8-simplify`\] Detect implicit `else` cases in `needless-bool` (`SIM103`)
|
||||
- \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
|
||||
|
||||
Your pull request will be reviewed by a maintainer, which may involve a few rounds of iteration
|
||||
prior to merging.
|
||||
|
||||
### Project Structure
|
||||
@@ -125,8 +123,8 @@ prior to merging.
|
||||
Ruff is structured as a monorepo with a [flat crate structure](https://matklad.github.io/2021/08/22/large-rust-workspaces.html),
|
||||
such that all crates are contained in a flat `crates` directory.
|
||||
|
||||
The vast majority of the code, including all lint rules, lives in the `ruff` crate (located at
|
||||
`crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
|
||||
The vast majority of the code, including all lint rules, lives in the `ruff_linter` crate (located
|
||||
at `crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
|
||||
|
||||
At the time of writing, the repository includes the following crates:
|
||||
|
||||
@@ -199,11 +197,14 @@ and calling out to lint rule analyzer functions as it goes.
|
||||
If you need to inspect the AST, you can run `cargo dev print-ast` with a Python file. Grep
|
||||
for the `Diagnostic::new` invocations to understand how other, similar rules are implemented.
|
||||
|
||||
Once you're satisfied with your code, add tests for your rule. See [rule testing](#rule-testing-fixtures-and-snapshots)
|
||||
for more details.
|
||||
Once you're satisfied with your code, add tests for your rule
|
||||
(see: [rule testing](#rule-testing-fixtures-and-snapshots)), and regenerate the documentation and
|
||||
associated assets (like our JSON Schema) with `cargo dev generate-all`.
|
||||
|
||||
Finally, regenerate the documentation and other generated assets (like our JSON Schema) with:
|
||||
`cargo dev generate-all`.
|
||||
Finally, submit a pull request, and include the category, rule name, and rule code in the title, as
|
||||
in:
|
||||
|
||||
> \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
|
||||
|
||||
#### Rule naming convention
|
||||
|
||||
@@ -813,8 +814,8 @@ To understand Ruff's import categorization system, we first need to define two c
|
||||
"project root".)
|
||||
- "Package root": The top-most directory defining the Python package that includes a given Python
|
||||
file. To find the package root for a given Python file, traverse up its parent directories until
|
||||
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't marked as
|
||||
a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
|
||||
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't in a subtree
|
||||
marked as a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
|
||||
just before that, i.e., the first directory in the package.
|
||||
|
||||
For example, given:
|
||||
|
||||
878
Cargo.lock
generated
878
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
37
Cargo.toml
37
Cargo.toml
@@ -12,53 +12,56 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
|
||||
license = "MIT"
|
||||
|
||||
[workspace.dependencies]
|
||||
aho-corasick = { version = "1.1.2" }
|
||||
aho-corasick = { version = "1.1.3" }
|
||||
annotate-snippets = { version = "0.9.2", features = ["color"] }
|
||||
anyhow = { version = "1.0.80" }
|
||||
argfile = { version = "0.1.6" }
|
||||
argfile = { version = "0.2.0" }
|
||||
bincode = { version = "1.3.3" }
|
||||
bitflags = { version = "2.4.1" }
|
||||
bitflags = { version = "2.5.0" }
|
||||
bstr = { version = "1.9.1" }
|
||||
cachedir = { version = "0.3.1" }
|
||||
chrono = { version = "0.4.35", default-features = false, features = ["clock"] }
|
||||
clap = { version = "4.5.3", features = ["derive"] }
|
||||
clap_complete_command = { version = "0.5.1" }
|
||||
clearscreen = { version = "2.0.0" }
|
||||
codspeed-criterion-compat = { version = "2.4.0", default-features = false }
|
||||
clearscreen = { version = "3.0.0" }
|
||||
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
|
||||
colored = { version = "2.1.0" }
|
||||
console_error_panic_hook = { version = "0.1.7" }
|
||||
console_log = { version = "1.0.0" }
|
||||
countme = { version = "3.0.1" }
|
||||
criterion = { version = "0.5.1", default-features = false }
|
||||
crossbeam = { version = "0.8.4" }
|
||||
dashmap = { version = "5.5.3" }
|
||||
dirs = { version = "5.0.0" }
|
||||
drop_bomb = { version = "0.1.5" }
|
||||
env_logger = { version = "0.10.1" }
|
||||
env_logger = { version = "0.11.0" }
|
||||
fern = { version = "0.6.1" }
|
||||
filetime = { version = "0.2.23" }
|
||||
fs-err = { version = "2.11.0" }
|
||||
glob = { version = "0.3.1" }
|
||||
globset = { version = "0.4.14" }
|
||||
hashbrown = "0.14.3"
|
||||
hexf-parse = { version = "0.2.1" }
|
||||
ignore = { version = "0.4.22" }
|
||||
imara-diff = { version = "0.1.5" }
|
||||
imperative = { version = "1.0.4" }
|
||||
indexmap = { version = "2.2.6" }
|
||||
indicatif = { version = "0.17.8" }
|
||||
indoc = { version = "2.0.4" }
|
||||
insta = { version = "1.35.1", feature = ["filters", "glob"] }
|
||||
insta-cmd = { version = "0.4.0" }
|
||||
insta-cmd = { version = "0.6.0" }
|
||||
is-macro = { version = "0.3.5" }
|
||||
is-wsl = { version = "0.4.0" }
|
||||
itertools = { version = "0.12.1" }
|
||||
js-sys = { version = "0.3.69" }
|
||||
jod-thread = { version = "0.1.2" }
|
||||
lalrpop-util = { version = "0.20.0", default-features = false }
|
||||
lexical-parse-float = { version = "0.8.0", features = ["format"] }
|
||||
libc = { version = "0.2.153" }
|
||||
libcst = { version = "1.1.0", default-features = false }
|
||||
log = { version = "0.4.17" }
|
||||
lsp-server = { version = "0.7.6" }
|
||||
lsp-types = { version = "0.95.0", features = ["proposed"] }
|
||||
matchit = { version = "0.8.1" }
|
||||
memchr = { version = "2.7.1" }
|
||||
mimalloc = { version = "0.1.39" }
|
||||
natord = { version = "1.0.9" }
|
||||
@@ -66,15 +69,17 @@ notify = { version = "6.1.1" }
|
||||
num_cpus = { version = "1.16.0" }
|
||||
once_cell = { version = "1.19.0" }
|
||||
path-absolutize = { version = "3.1.1" }
|
||||
path-slash = { version = "0.2.1" }
|
||||
pathdiff = { version = "0.2.1" }
|
||||
pep440_rs = { version = "0.4.0", features = ["serde"] }
|
||||
parking_lot = "0.12.1"
|
||||
pep440_rs = { version = "0.6.0", features = ["serde"] }
|
||||
pretty_assertions = "1.3.0"
|
||||
proc-macro2 = { version = "1.0.79" }
|
||||
pyproject-toml = { version = "0.9.0" }
|
||||
quick-junit = { version = "0.3.5" }
|
||||
quick-junit = { version = "0.4.0" }
|
||||
quote = { version = "1.0.23" }
|
||||
rand = { version = "0.8.5" }
|
||||
rayon = { version = "1.8.1" }
|
||||
rayon = { version = "1.10.0" }
|
||||
regex = { version = "1.10.2" }
|
||||
result-like = { version = "0.5.0" }
|
||||
rustc-hash = { version = "1.1.0" }
|
||||
@@ -88,11 +93,11 @@ serde_with = { version = "3.6.0", default-features = false, features = ["macros"
|
||||
shellexpand = { version = "3.0.0" }
|
||||
shlex = { version = "1.3.0" }
|
||||
similar = { version = "2.4.0", features = ["inline"] }
|
||||
smallvec = { version = "1.13.1" }
|
||||
smallvec = { version = "1.13.2" }
|
||||
static_assertions = "1.1.0"
|
||||
strum = { version = "0.25.0", features = ["strum_macros"] }
|
||||
strum_macros = { version = "0.25.3" }
|
||||
syn = { version = "2.0.51" }
|
||||
strum = { version = "0.26.0", features = ["strum_macros"] }
|
||||
strum_macros = { version = "0.26.0" }
|
||||
syn = { version = "2.0.55" }
|
||||
tempfile = { version = "3.9.0" }
|
||||
test-case = { version = "3.3.1" }
|
||||
thiserror = { version = "1.0.58" }
|
||||
@@ -101,7 +106,7 @@ toml = { version = "0.8.11" }
|
||||
tracing = { version = "0.1.40" }
|
||||
tracing-indicatif = { version = "0.3.6" }
|
||||
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
||||
tracing-tree = { version = "0.2.4" }
|
||||
tracing-tree = { version = "0.3.0" }
|
||||
typed-arena = { version = "2.0.2" }
|
||||
unic-ucd-category = { version = "0.9" }
|
||||
unicode-ident = { version = "1.0.12" }
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
[](https://github.com/astral-sh/ruff)
|
||||
[](https://pypi.python.org/pypi/ruff)
|
||||
[](https://pypi.python.org/pypi/ruff)
|
||||
[](https://github.com/astral-sh/ruff/blob/main/LICENSE)
|
||||
[](https://pypi.python.org/pypi/ruff)
|
||||
[](https://github.com/astral-sh/ruff/actions)
|
||||
[](https://discord.com/invite/astral-sh)
|
||||
@@ -50,6 +50,7 @@ times faster than any individual tool.
|
||||
Ruff is extremely actively developed and used in major open-source projects like:
|
||||
|
||||
- [Apache Airflow](https://github.com/apache/airflow)
|
||||
- [Apache Superset](https://github.com/apache/superset)
|
||||
- [FastAPI](https://github.com/tiangolo/fastapi)
|
||||
- [Hugging Face](https://github.com/huggingface/transformers)
|
||||
- [Pandas](https://github.com/pandas-dev/pandas)
|
||||
@@ -151,7 +152,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
|
||||
```yaml
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.3.4
|
||||
rev: v0.4.2
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
@@ -498,7 +499,7 @@ If you're using Ruff, consider adding the Ruff badge to your project's `README.m
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
This repository is licensed under the [MIT License](https://github.com/astral-sh/ruff/blob/main/LICENSE)
|
||||
|
||||
<div align="center">
|
||||
<a target="_blank" href="https://astral.sh" style="background:none">
|
||||
|
||||
@@ -3,9 +3,17 @@
|
||||
extend-exclude = ["**/resources/**/*", "**/snapshots/**/*"]
|
||||
|
||||
[default.extend-words]
|
||||
"arange" = "arange" # e.g. `numpy.arange`
|
||||
hel = "hel"
|
||||
whos = "whos"
|
||||
spawnve = "spawnve"
|
||||
ned = "ned"
|
||||
pn = "pn" # `import panel as pd` is a thing
|
||||
poit = "poit"
|
||||
BA = "BA" # acronym for "Bad Allowed", used in testing.
|
||||
|
||||
[default]
|
||||
extend-ignore-re = [
|
||||
# Line ignore with trailing "spellchecker:disable-line"
|
||||
"(?Rm)^.*#\\s*spellchecker:disable-line$"
|
||||
]
|
||||
|
||||
47
crates/red_knot/Cargo.toml
Normal file
47
crates/red_knot/Cargo.toml
Normal file
@@ -0,0 +1,47 @@
|
||||
[package]
|
||||
name = "red_knot"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
documentation.workspace = true
|
||||
repository.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ruff_formatter = { path = "../ruff_formatter" }
|
||||
ruff_index = { path = "../ruff_index" }
|
||||
ruff_notebook = { path = "../ruff_notebook" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast" }
|
||||
ruff_python_formatter = { path = "../ruff_python_formatter" }
|
||||
ruff_python_parser = { path = "../ruff_python_parser" }
|
||||
ruff_python_trivia = { path = "../ruff_python_trivia" }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
ctrlc = "3.4.4"
|
||||
crossbeam = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
hashbrown = { workspace = true }
|
||||
indexmap = { workspace = true }
|
||||
log = { workspace = true }
|
||||
notify = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
rayon = { workspace = true }
|
||||
rustc-hash = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
smol_str = "0.2.1"
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tracing-tree = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
textwrap = "0.16.1"
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
415
crates/red_knot/src/ast_ids.rs
Normal file
415
crates/red_knot/src/ast_ids.rs
Normal file
@@ -0,0 +1,415 @@
|
||||
use std::any::type_name;
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use ruff_index::{Idx, IndexVec};
|
||||
use ruff_python_ast::visitor::preorder;
|
||||
use ruff_python_ast::visitor::preorder::{PreorderVisitor, TraversalSignal};
|
||||
use ruff_python_ast::{
|
||||
AnyNodeRef, AstNode, ExceptHandler, ExceptHandlerExceptHandler, Expr, MatchCase, ModModule,
|
||||
NodeKind, Parameter, Stmt, StmtAnnAssign, StmtAssign, StmtAugAssign, StmtClassDef,
|
||||
StmtFunctionDef, StmtGlobal, StmtImport, StmtImportFrom, StmtNonlocal, StmtTypeAlias,
|
||||
TypeParam, TypeParamParamSpec, TypeParamTypeVar, TypeParamTypeVarTuple, WithItem,
|
||||
};
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
/// A type agnostic ID that uniquely identifies an AST node in a file.
|
||||
#[ruff_index::newtype_index]
|
||||
pub struct AstId;
|
||||
|
||||
/// A typed ID that uniquely identifies an AST node in a file.
|
||||
///
|
||||
/// This is different from [`AstId`] in that it is a combination of ID and the type of the node the ID identifies.
|
||||
/// Typing the ID prevents mixing IDs of different node types and allows to restrict the API to only accept
|
||||
/// nodes for which an ID has been created (not all AST nodes get an ID).
|
||||
pub struct TypedAstId<N: HasAstId> {
|
||||
erased: AstId,
|
||||
_marker: PhantomData<fn() -> N>,
|
||||
}
|
||||
|
||||
impl<N: HasAstId> TypedAstId<N> {
|
||||
/// Upcasts this ID from a more specific node type to a more general node type.
|
||||
pub fn upcast<M: HasAstId>(self) -> TypedAstId<M>
|
||||
where
|
||||
N: Into<M>,
|
||||
{
|
||||
TypedAstId {
|
||||
erased: self.erased,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> Copy for TypedAstId<N> {}
|
||||
impl<N: HasAstId> Clone for TypedAstId<N> {
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> PartialEq for TypedAstId<N> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.erased == other.erased
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> Eq for TypedAstId<N> {}
|
||||
impl<N: HasAstId> Hash for TypedAstId<N> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.erased.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> Debug for TypedAstId<N> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_tuple("TypedAstId")
|
||||
.field(&self.erased)
|
||||
.field(&type_name::<N>())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AstIds {
|
||||
ids: IndexVec<AstId, NodeKey>,
|
||||
reverse: FxHashMap<NodeKey, AstId>,
|
||||
}
|
||||
|
||||
impl AstIds {
|
||||
// TODO rust analyzer doesn't allocate an ID for every node. It only allocates ids for
|
||||
// nodes with a corresponding HIR element, that is nodes that are definitions.
|
||||
pub fn from_module(module: &ModModule) -> Self {
|
||||
let mut visitor = AstIdsVisitor::default();
|
||||
|
||||
// TODO: visit_module?
|
||||
// Make sure we visit the root
|
||||
visitor.create_id(module);
|
||||
visitor.visit_body(&module.body);
|
||||
|
||||
while let Some(deferred) = visitor.deferred.pop() {
|
||||
match deferred {
|
||||
DeferredNode::FunctionDefinition(def) => {
|
||||
def.visit_preorder(&mut visitor);
|
||||
}
|
||||
DeferredNode::ClassDefinition(def) => def.visit_preorder(&mut visitor),
|
||||
}
|
||||
}
|
||||
|
||||
AstIds {
|
||||
ids: visitor.ids,
|
||||
reverse: visitor.reverse,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the ID to the root node.
|
||||
pub fn root(&self) -> NodeKey {
|
||||
self.ids[AstId::new(0)]
|
||||
}
|
||||
|
||||
/// Returns the [`TypedAstId`] for a node.
|
||||
pub fn ast_id<N: HasAstId>(&self, node: &N) -> TypedAstId<N> {
|
||||
let key = node.syntax_node_key();
|
||||
TypedAstId {
|
||||
erased: self.reverse.get(&key).copied().unwrap(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [`TypedAstId`] for the node identified with the given [`TypedNodeKey`].
|
||||
pub fn ast_id_for_key<N: HasAstId>(&self, node: &TypedNodeKey<N>) -> TypedAstId<N> {
|
||||
let ast_id = self.ast_id_for_node_key(node.inner);
|
||||
|
||||
TypedAstId {
|
||||
erased: ast_id,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the untyped [`AstId`] for the node identified by the given `node` key.
|
||||
pub fn ast_id_for_node_key(&self, node: NodeKey) -> AstId {
|
||||
self.reverse
|
||||
.get(&node)
|
||||
.copied()
|
||||
.expect("Can't find node in AstIds map.")
|
||||
}
|
||||
|
||||
/// Returns the [`TypedNodeKey`] for the node identified by the given [`TypedAstId`].
|
||||
pub fn key<N: HasAstId>(&self, id: TypedAstId<N>) -> TypedNodeKey<N> {
|
||||
let syntax_key = self.ids[id.erased];
|
||||
|
||||
TypedNodeKey::new(syntax_key).unwrap()
|
||||
}
|
||||
|
||||
pub fn node_key<H: HasAstId>(&self, id: TypedAstId<H>) -> NodeKey {
|
||||
self.ids[id.erased]
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AstIds {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let mut map = f.debug_map();
|
||||
for (key, value) in self.ids.iter_enumerated() {
|
||||
map.entry(&key, &value);
|
||||
}
|
||||
|
||||
map.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for AstIds {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.ids == other.ids
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for AstIds {}
|
||||
|
||||
#[derive(Default)]
|
||||
struct AstIdsVisitor<'a> {
|
||||
ids: IndexVec<AstId, NodeKey>,
|
||||
reverse: FxHashMap<NodeKey, AstId>,
|
||||
deferred: Vec<DeferredNode<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> AstIdsVisitor<'a> {
|
||||
fn create_id<A: HasAstId>(&mut self, node: &A) {
|
||||
let node_key = node.syntax_node_key();
|
||||
|
||||
let id = self.ids.push(node_key);
|
||||
self.reverse.insert(node_key, id);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PreorderVisitor<'a> for AstIdsVisitor<'a> {
|
||||
fn visit_stmt(&mut self, stmt: &'a Stmt) {
|
||||
match stmt {
|
||||
Stmt::FunctionDef(def) => {
|
||||
self.create_id(def);
|
||||
self.deferred.push(DeferredNode::FunctionDefinition(def));
|
||||
return;
|
||||
}
|
||||
// TODO defer visiting the assignment body, type alias parameters etc?
|
||||
Stmt::ClassDef(def) => {
|
||||
self.create_id(def);
|
||||
self.deferred.push(DeferredNode::ClassDefinition(def));
|
||||
return;
|
||||
}
|
||||
Stmt::Expr(_) => {
|
||||
// Skip
|
||||
return;
|
||||
}
|
||||
Stmt::Return(_) => {}
|
||||
Stmt::Delete(_) => {}
|
||||
Stmt::Assign(assignment) => self.create_id(assignment),
|
||||
Stmt::AugAssign(assignment) => {
|
||||
self.create_id(assignment);
|
||||
}
|
||||
Stmt::AnnAssign(assignment) => self.create_id(assignment),
|
||||
Stmt::TypeAlias(assignment) => self.create_id(assignment),
|
||||
Stmt::For(_) => {}
|
||||
Stmt::While(_) => {}
|
||||
Stmt::If(_) => {}
|
||||
Stmt::With(_) => {}
|
||||
Stmt::Match(_) => {}
|
||||
Stmt::Raise(_) => {}
|
||||
Stmt::Try(_) => {}
|
||||
Stmt::Assert(_) => {}
|
||||
Stmt::Import(import) => self.create_id(import),
|
||||
Stmt::ImportFrom(import_from) => self.create_id(import_from),
|
||||
Stmt::Global(global) => self.create_id(global),
|
||||
Stmt::Nonlocal(non_local) => self.create_id(non_local),
|
||||
Stmt::Pass(_) => {}
|
||||
Stmt::Break(_) => {}
|
||||
Stmt::Continue(_) => {}
|
||||
Stmt::IpyEscapeCommand(_) => {}
|
||||
}
|
||||
|
||||
preorder::walk_stmt(self, stmt);
|
||||
}
|
||||
|
||||
fn visit_expr(&mut self, _expr: &'a Expr) {}
|
||||
|
||||
fn visit_parameter(&mut self, parameter: &'a Parameter) {
|
||||
self.create_id(parameter);
|
||||
preorder::walk_parameter(self, parameter);
|
||||
}
|
||||
|
||||
fn visit_except_handler(&mut self, except_handler: &'a ExceptHandler) {
|
||||
match except_handler {
|
||||
ExceptHandler::ExceptHandler(except_handler) => {
|
||||
self.create_id(except_handler);
|
||||
}
|
||||
}
|
||||
|
||||
preorder::walk_except_handler(self, except_handler);
|
||||
}
|
||||
|
||||
fn visit_with_item(&mut self, with_item: &'a WithItem) {
|
||||
self.create_id(with_item);
|
||||
preorder::walk_with_item(self, with_item);
|
||||
}
|
||||
|
||||
fn visit_match_case(&mut self, match_case: &'a MatchCase) {
|
||||
self.create_id(match_case);
|
||||
preorder::walk_match_case(self, match_case);
|
||||
}
|
||||
|
||||
fn visit_type_param(&mut self, type_param: &'a TypeParam) {
|
||||
self.create_id(type_param);
|
||||
}
|
||||
}
|
||||
|
||||
enum DeferredNode<'a> {
|
||||
FunctionDefinition(&'a StmtFunctionDef),
|
||||
ClassDefinition(&'a StmtClassDef),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct TypedNodeKey<N: AstNode> {
|
||||
/// The type erased node key.
|
||||
inner: NodeKey,
|
||||
_marker: PhantomData<fn() -> N>,
|
||||
}
|
||||
|
||||
impl<N: AstNode> TypedNodeKey<N> {
|
||||
pub fn from_node(node: &N) -> Self {
|
||||
let inner = NodeKey {
|
||||
kind: node.as_any_node_ref().kind(),
|
||||
range: node.range(),
|
||||
};
|
||||
Self {
|
||||
inner,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(node_key: NodeKey) -> Option<Self> {
|
||||
N::can_cast(node_key.kind).then_some(TypedNodeKey {
|
||||
inner: node_key,
|
||||
_marker: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn resolve<'a>(&self, root: AnyNodeRef<'a>) -> Option<N::Ref<'a>> {
|
||||
let node_ref = self.inner.resolve(root)?;
|
||||
|
||||
Some(N::cast_ref(node_ref).unwrap())
|
||||
}
|
||||
|
||||
pub fn resolve_unwrap<'a>(&self, root: AnyNodeRef<'a>) -> N::Ref<'a> {
|
||||
self.resolve(root).expect("node should resolve")
|
||||
}
|
||||
|
||||
pub fn erased(&self) -> &NodeKey {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
struct FindNodeKeyVisitor<'a> {
|
||||
key: NodeKey,
|
||||
result: Option<AnyNodeRef<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> PreorderVisitor<'a> for FindNodeKeyVisitor<'a> {
|
||||
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
|
||||
if self.result.is_some() {
|
||||
return TraversalSignal::Skip;
|
||||
}
|
||||
|
||||
if node.range() == self.key.range && node.kind() == self.key.kind {
|
||||
self.result = Some(node);
|
||||
TraversalSignal::Skip
|
||||
} else if node.range().contains_range(self.key.range) {
|
||||
TraversalSignal::Traverse
|
||||
} else {
|
||||
TraversalSignal::Skip
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_body(&mut self, body: &'a [Stmt]) {
|
||||
// TODO it would be more efficient to use binary search instead of linear
|
||||
for stmt in body {
|
||||
if stmt.range().start() > self.key.range.end() {
|
||||
break;
|
||||
}
|
||||
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO an alternative to this is to have a `NodeId` on each node (in increasing order depending on the position).
|
||||
// This would allow to reduce the size of this to a u32.
|
||||
// What would be nice if we could use an `Arc::weak_ref` here but that only works if we use
|
||||
// `Arc` internally
|
||||
// TODO: Implement the logic to resolve a node, given a db (and the correct file).
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct NodeKey {
|
||||
kind: NodeKind,
|
||||
range: TextRange,
|
||||
}
|
||||
|
||||
impl NodeKey {
|
||||
pub fn resolve<'a>(&self, root: AnyNodeRef<'a>) -> Option<AnyNodeRef<'a>> {
|
||||
// We need to do a binary search here. Only traverse into a node if the range is withint the node
|
||||
let mut visitor = FindNodeKeyVisitor {
|
||||
key: *self,
|
||||
result: None,
|
||||
};
|
||||
|
||||
if visitor.enter_node(root) == TraversalSignal::Traverse {
|
||||
root.visit_preorder(&mut visitor);
|
||||
}
|
||||
|
||||
visitor.result
|
||||
}
|
||||
}
|
||||
|
||||
/// Marker trait implemented by AST nodes for which we extract the `AstId`.
|
||||
pub trait HasAstId: AstNode {
|
||||
fn node_key(&self) -> TypedNodeKey<Self>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
TypedNodeKey {
|
||||
inner: self.syntax_node_key(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn syntax_node_key(&self) -> NodeKey {
|
||||
NodeKey {
|
||||
kind: self.as_any_node_ref().kind(),
|
||||
range: self.range(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HasAstId for StmtFunctionDef {}
|
||||
impl HasAstId for StmtClassDef {}
|
||||
impl HasAstId for StmtAnnAssign {}
|
||||
impl HasAstId for StmtAugAssign {}
|
||||
impl HasAstId for StmtAssign {}
|
||||
impl HasAstId for StmtTypeAlias {}
|
||||
|
||||
impl HasAstId for ModModule {}
|
||||
|
||||
impl HasAstId for StmtImport {}
|
||||
|
||||
impl HasAstId for StmtImportFrom {}
|
||||
|
||||
impl HasAstId for Parameter {}
|
||||
|
||||
impl HasAstId for TypeParam {}
|
||||
impl HasAstId for Stmt {}
|
||||
impl HasAstId for TypeParamTypeVar {}
|
||||
impl HasAstId for TypeParamTypeVarTuple {}
|
||||
impl HasAstId for TypeParamParamSpec {}
|
||||
impl HasAstId for StmtGlobal {}
|
||||
impl HasAstId for StmtNonlocal {}
|
||||
|
||||
impl HasAstId for ExceptHandlerExceptHandler {}
|
||||
impl HasAstId for WithItem {}
|
||||
impl HasAstId for MatchCase {}
|
||||
165
crates/red_knot/src/cache.rs
Normal file
165
crates/red_knot/src/cache.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use std::fmt::Formatter;
|
||||
use std::hash::Hash;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use crate::db::QueryResult;
|
||||
use dashmap::mapref::entry::Entry;
|
||||
|
||||
use crate::FxDashMap;
|
||||
|
||||
/// Simple key value cache that locks on a per-key level.
|
||||
pub struct KeyValueCache<K, V> {
|
||||
map: FxDashMap<K, V>,
|
||||
statistics: CacheStatistics,
|
||||
}
|
||||
|
||||
impl<K, V> KeyValueCache<K, V>
|
||||
where
|
||||
K: Eq + Hash + Clone,
|
||||
V: Clone,
|
||||
{
|
||||
pub fn try_get(&self, key: &K) -> Option<V> {
|
||||
if let Some(existing) = self.map.get(key) {
|
||||
self.statistics.hit();
|
||||
Some(existing.clone())
|
||||
} else {
|
||||
self.statistics.miss();
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get<F>(&self, key: &K, compute: F) -> QueryResult<V>
|
||||
where
|
||||
F: FnOnce(&K) -> QueryResult<V>,
|
||||
{
|
||||
Ok(match self.map.entry(key.clone()) {
|
||||
Entry::Occupied(cached) => {
|
||||
self.statistics.hit();
|
||||
|
||||
cached.get().clone()
|
||||
}
|
||||
Entry::Vacant(vacant) => {
|
||||
self.statistics.miss();
|
||||
|
||||
let value = compute(key)?;
|
||||
vacant.insert(value.clone());
|
||||
value
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set(&mut self, key: K, value: V) {
|
||||
self.map.insert(key, value);
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, key: &K) -> Option<V> {
|
||||
self.map.remove(key).map(|(_, value)| value)
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.map.clear();
|
||||
self.map.shrink_to_fit();
|
||||
}
|
||||
|
||||
pub fn statistics(&self) -> Option<Statistics> {
|
||||
self.statistics.to_statistics()
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> Default for KeyValueCache<K, V>
|
||||
where
|
||||
K: Eq + Hash,
|
||||
V: Clone,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
map: FxDashMap::default(),
|
||||
statistics: CacheStatistics::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, V> std::fmt::Debug for KeyValueCache<K, V>
|
||||
where
|
||||
K: std::fmt::Debug + Eq + Hash,
|
||||
V: std::fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let mut debug = f.debug_map();
|
||||
|
||||
for entry in &self.map {
|
||||
debug.entry(&entry.value(), &entry.key());
|
||||
}
|
||||
|
||||
debug.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Statistics {
|
||||
pub hits: usize,
|
||||
pub misses: usize,
|
||||
}
|
||||
|
||||
impl Statistics {
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
pub fn hit_rate(&self) -> Option<f64> {
|
||||
if self.hits + self.misses == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((self.hits as f64) / (self.hits + self.misses) as f64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
pub type CacheStatistics = DebugStatistics;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
pub type CacheStatistics = ReleaseStatistics;
|
||||
|
||||
pub trait StatisticsRecorder {
|
||||
fn hit(&self);
|
||||
fn miss(&self);
|
||||
fn to_statistics(&self) -> Option<Statistics>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DebugStatistics {
|
||||
hits: AtomicUsize,
|
||||
misses: AtomicUsize,
|
||||
}
|
||||
|
||||
impl StatisticsRecorder for DebugStatistics {
|
||||
// TODO figure out appropriate Ordering
|
||||
fn hit(&self) {
|
||||
self.hits.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
fn miss(&self) {
|
||||
self.misses.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
fn to_statistics(&self) -> Option<Statistics> {
|
||||
let hits = self.hits.load(Ordering::SeqCst);
|
||||
let misses = self.misses.load(Ordering::SeqCst);
|
||||
|
||||
Some(Statistics { hits, misses })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ReleaseStatistics;
|
||||
|
||||
impl StatisticsRecorder for ReleaseStatistics {
|
||||
#[inline]
|
||||
fn hit(&self) {}
|
||||
|
||||
#[inline]
|
||||
fn miss(&self) {}
|
||||
|
||||
#[inline]
|
||||
fn to_statistics(&self) -> Option<Statistics> {
|
||||
None
|
||||
}
|
||||
}
|
||||
42
crates/red_knot/src/cancellation.rs
Normal file
42
crates/red_knot/src/cancellation.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct CancellationTokenSource {
|
||||
signal: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl CancellationTokenSource {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
signal: Arc::new(AtomicBool::new(false)),
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
pub fn cancel(&self) {
|
||||
self.signal.store(true, std::sync::atomic::Ordering::SeqCst);
|
||||
}
|
||||
|
||||
pub fn is_cancelled(&self) -> bool {
|
||||
self.signal.load(std::sync::atomic::Ordering::SeqCst)
|
||||
}
|
||||
|
||||
pub fn token(&self) -> CancellationToken {
|
||||
CancellationToken {
|
||||
signal: self.signal.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CancellationToken {
|
||||
signal: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl CancellationToken {
|
||||
/// Returns `true` if cancellation has been requested.
|
||||
pub fn is_cancelled(&self) -> bool {
|
||||
self.signal.load(std::sync::atomic::Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
296
crates/red_knot/src/db.rs
Normal file
296
crates/red_knot/src/db.rs
Normal file
@@ -0,0 +1,296 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use jars::{HasJar, HasJars};
|
||||
pub use query::{QueryError, QueryResult};
|
||||
pub use runtime::DbRuntime;
|
||||
pub use storage::JarsStorage;
|
||||
|
||||
use crate::files::FileId;
|
||||
use crate::lint::{Diagnostics, LintSemanticStorage, LintSyntaxStorage};
|
||||
use crate::module::{Module, ModuleData, ModuleName, ModuleResolver, ModuleSearchPath};
|
||||
use crate::parse::{Parsed, ParsedStorage};
|
||||
use crate::source::{Source, SourceStorage};
|
||||
use crate::symbols::{SymbolId, SymbolTable, SymbolTablesStorage};
|
||||
use crate::types::{Type, TypeStore};
|
||||
|
||||
mod jars;
|
||||
mod query;
|
||||
mod runtime;
|
||||
mod storage;
|
||||
|
||||
pub trait Database {
|
||||
/// Returns a reference to the runtime of the current worker.
|
||||
fn runtime(&self) -> &DbRuntime;
|
||||
|
||||
/// Returns a mutable reference to the runtime. Only one worker can hold a mutable reference to the runtime.
|
||||
fn runtime_mut(&mut self) -> &mut DbRuntime;
|
||||
|
||||
/// Returns `Ok` if the queries have not been cancelled and `Err(QueryError::Cancelled)` otherwise.
|
||||
fn cancelled(&self) -> QueryResult<()> {
|
||||
self.runtime().cancelled()
|
||||
}
|
||||
|
||||
/// Returns `true` if the queries have been cancelled.
|
||||
fn is_cancelled(&self) -> bool {
|
||||
self.runtime().is_cancelled()
|
||||
}
|
||||
}
|
||||
|
||||
/// Database that supports running queries from multiple threads.
|
||||
pub trait ParallelDatabase: Database + Send {
|
||||
/// Creates a snapshot of the database state that can be used to query the database in another thread.
|
||||
///
|
||||
/// The snapshot is a read-only view of the database but query results are shared between threads.
|
||||
/// All queries will be automatically cancelled when applying any mutations (calling [`HasJars::jars_mut`])
|
||||
/// to the database (not the snapshot, because they're readonly).
|
||||
///
|
||||
/// ## Creating a snapshot
|
||||
///
|
||||
/// Creating a snapshot of the database's jars is cheap but creating a snapshot of
|
||||
/// other state stored on the database might require deep-cloning data. That's why you should
|
||||
/// avoid creating snapshots in a hot function (e.g. don't create a snapshot for each file, instead
|
||||
/// create a snapshot when scheduling the check of an entire program).
|
||||
///
|
||||
/// ## Salsa compatibility
|
||||
/// Salsa prohibits creating a snapshot while running a local query (it's fine if other workers run a query) [[source](https://github.com/salsa-rs/salsa/issues/80)].
|
||||
/// We should avoid creating snapshots while running a query because we might want to adopt Salsa in the future (if we can figure out persistent caching).
|
||||
/// Unfortunately, the infrastructure doesn't provide an automated way of knowing when a query is run, that's
|
||||
/// why we have to "enforce" this constraint manually.
|
||||
fn snapshot(&self) -> Snapshot<Self>;
|
||||
}
|
||||
|
||||
/// Readonly snapshot of a database.
|
||||
///
|
||||
/// ## Dead locks
|
||||
/// A snapshot should always be dropped as soon as it is no longer necessary to run queries.
|
||||
/// Storing the snapshot without running a query or periodically checking if cancellation was requested
|
||||
/// can lead to deadlocks because mutating the [`Database`] requires cancels all pending queries
|
||||
/// and waiting for all [`Snapshot`]s to be dropped.
|
||||
#[derive(Debug)]
|
||||
pub struct Snapshot<DB: ?Sized>
|
||||
where
|
||||
DB: ParallelDatabase,
|
||||
{
|
||||
db: DB,
|
||||
}
|
||||
|
||||
impl<DB> Snapshot<DB>
|
||||
where
|
||||
DB: ParallelDatabase,
|
||||
{
|
||||
pub fn new(db: DB) -> Self {
|
||||
Snapshot { db }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB> std::ops::Deref for Snapshot<DB>
|
||||
where
|
||||
DB: ParallelDatabase,
|
||||
{
|
||||
type Target = DB;
|
||||
|
||||
fn deref(&self) -> &DB {
|
||||
&self.db
|
||||
}
|
||||
}
|
||||
|
||||
// Red knot specific databases code.
|
||||
|
||||
pub trait SourceDb: Database {
|
||||
// queries
|
||||
fn file_id(&self, path: &std::path::Path) -> FileId;
|
||||
|
||||
fn file_path(&self, file_id: FileId) -> Arc<std::path::Path>;
|
||||
|
||||
fn source(&self, file_id: FileId) -> QueryResult<Source>;
|
||||
|
||||
fn parse(&self, file_id: FileId) -> QueryResult<Parsed>;
|
||||
}
|
||||
|
||||
pub trait SemanticDb: SourceDb {
|
||||
// queries
|
||||
fn resolve_module(&self, name: ModuleName) -> QueryResult<Option<Module>>;
|
||||
|
||||
fn file_to_module(&self, file_id: FileId) -> QueryResult<Option<Module>>;
|
||||
|
||||
fn path_to_module(&self, path: &Path) -> QueryResult<Option<Module>>;
|
||||
|
||||
fn symbol_table(&self, file_id: FileId) -> QueryResult<Arc<SymbolTable>>;
|
||||
|
||||
fn infer_symbol_type(&self, file_id: FileId, symbol_id: SymbolId) -> QueryResult<Type>;
|
||||
|
||||
// mutations
|
||||
|
||||
fn add_module(&mut self, path: &Path) -> Option<(Module, Vec<Arc<ModuleData>>)>;
|
||||
|
||||
fn set_module_search_paths(&mut self, paths: Vec<ModuleSearchPath>);
|
||||
}
|
||||
|
||||
pub trait LintDb: SemanticDb {
|
||||
fn lint_syntax(&self, file_id: FileId) -> QueryResult<Diagnostics>;
|
||||
|
||||
fn lint_semantic(&self, file_id: FileId) -> QueryResult<Diagnostics>;
|
||||
}
|
||||
|
||||
pub trait Db: LintDb {}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SourceJar {
|
||||
pub sources: SourceStorage,
|
||||
pub parsed: ParsedStorage,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SemanticJar {
|
||||
pub module_resolver: ModuleResolver,
|
||||
pub symbol_tables: SymbolTablesStorage,
|
||||
pub type_store: TypeStore,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LintJar {
|
||||
pub lint_syntax: LintSyntaxStorage,
|
||||
pub lint_semantic: LintSemanticStorage,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::db::{
|
||||
Database, DbRuntime, HasJar, HasJars, JarsStorage, LintDb, LintJar, QueryResult, SourceDb,
|
||||
SourceJar,
|
||||
};
|
||||
use crate::files::{FileId, Files};
|
||||
use crate::lint::{lint_semantic, lint_syntax, Diagnostics};
|
||||
use crate::module::{
|
||||
add_module, file_to_module, path_to_module, resolve_module, set_module_search_paths,
|
||||
Module, ModuleData, ModuleName, ModuleSearchPath,
|
||||
};
|
||||
use crate::parse::{parse, Parsed};
|
||||
use crate::source::{source_text, Source};
|
||||
use crate::symbols::{symbol_table, SymbolId, SymbolTable};
|
||||
use crate::types::{infer_symbol_type, Type};
|
||||
|
||||
use super::{SemanticDb, SemanticJar};
|
||||
|
||||
// This can be a partial database used in a single crate for testing.
|
||||
// It would hold fewer data than the full database.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct TestDb {
|
||||
files: Files,
|
||||
jars: JarsStorage<Self>,
|
||||
}
|
||||
|
||||
impl HasJar<SourceJar> for TestDb {
|
||||
fn jar(&self) -> QueryResult<&SourceJar> {
|
||||
Ok(&self.jars()?.0)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut SourceJar {
|
||||
&mut self.jars_mut().0
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJar<SemanticJar> for TestDb {
|
||||
fn jar(&self) -> QueryResult<&SemanticJar> {
|
||||
Ok(&self.jars()?.1)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut SemanticJar {
|
||||
&mut self.jars_mut().1
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJar<LintJar> for TestDb {
|
||||
fn jar(&self) -> QueryResult<&LintJar> {
|
||||
Ok(&self.jars()?.2)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut LintJar {
|
||||
&mut self.jars_mut().2
|
||||
}
|
||||
}
|
||||
|
||||
impl SourceDb for TestDb {
|
||||
fn file_id(&self, path: &Path) -> FileId {
|
||||
self.files.intern(path)
|
||||
}
|
||||
|
||||
fn file_path(&self, file_id: FileId) -> Arc<Path> {
|
||||
self.files.path(file_id)
|
||||
}
|
||||
|
||||
fn source(&self, file_id: FileId) -> QueryResult<Source> {
|
||||
source_text(self, file_id)
|
||||
}
|
||||
|
||||
fn parse(&self, file_id: FileId) -> QueryResult<Parsed> {
|
||||
parse(self, file_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl SemanticDb for TestDb {
|
||||
fn resolve_module(&self, name: ModuleName) -> QueryResult<Option<Module>> {
|
||||
resolve_module(self, name)
|
||||
}
|
||||
|
||||
fn file_to_module(&self, file_id: FileId) -> QueryResult<Option<Module>> {
|
||||
file_to_module(self, file_id)
|
||||
}
|
||||
|
||||
fn path_to_module(&self, path: &Path) -> QueryResult<Option<Module>> {
|
||||
path_to_module(self, path)
|
||||
}
|
||||
|
||||
fn symbol_table(&self, file_id: FileId) -> QueryResult<Arc<SymbolTable>> {
|
||||
symbol_table(self, file_id)
|
||||
}
|
||||
|
||||
fn infer_symbol_type(&self, file_id: FileId, symbol_id: SymbolId) -> QueryResult<Type> {
|
||||
infer_symbol_type(self, file_id, symbol_id)
|
||||
}
|
||||
|
||||
fn add_module(&mut self, path: &Path) -> Option<(Module, Vec<Arc<ModuleData>>)> {
|
||||
add_module(self, path)
|
||||
}
|
||||
|
||||
fn set_module_search_paths(&mut self, paths: Vec<ModuleSearchPath>) {
|
||||
set_module_search_paths(self, paths);
|
||||
}
|
||||
}
|
||||
|
||||
impl LintDb for TestDb {
|
||||
fn lint_syntax(&self, file_id: FileId) -> QueryResult<Diagnostics> {
|
||||
lint_syntax(self, file_id)
|
||||
}
|
||||
|
||||
fn lint_semantic(&self, file_id: FileId) -> QueryResult<Diagnostics> {
|
||||
lint_semantic(self, file_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJars for TestDb {
|
||||
type Jars = (SourceJar, SemanticJar, LintJar);
|
||||
|
||||
fn jars(&self) -> QueryResult<&Self::Jars> {
|
||||
self.jars.jars()
|
||||
}
|
||||
|
||||
fn jars_mut(&mut self) -> &mut Self::Jars {
|
||||
self.jars.jars_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl Database for TestDb {
|
||||
fn runtime(&self) -> &DbRuntime {
|
||||
self.jars.runtime()
|
||||
}
|
||||
|
||||
fn runtime_mut(&mut self) -> &mut DbRuntime {
|
||||
self.jars.runtime_mut()
|
||||
}
|
||||
}
|
||||
}
|
||||
37
crates/red_knot/src/db/jars.rs
Normal file
37
crates/red_knot/src/db/jars.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use crate::db::query::QueryResult;
|
||||
|
||||
/// Gives access to a specific jar in the database.
|
||||
///
|
||||
/// Nope, the terminology isn't borrowed from Java but from Salsa <https://salsa-rs.github.io/salsa/>,
|
||||
/// which is an analogy to storing the salsa in different jars.
|
||||
///
|
||||
/// The basic idea is that each crate can define its own jar and the jars can be combined to a single
|
||||
/// database in the top level crate. Each crate also defines its own `Database` trait. The combination of
|
||||
/// `Database` trait and the jar allows to write queries in isolation without having to know how they get composed at the upper levels.
|
||||
///
|
||||
/// Salsa further defines a `HasIngredient` trait which slices the jar to a specific storage (e.g. a specific cache).
|
||||
/// We don't need this just jet because we write our queries by hand. We may want a similar trait if we decide
|
||||
/// to use a macro to generate the queries.
|
||||
pub trait HasJar<T> {
|
||||
/// Gives a read-only reference to the jar.
|
||||
fn jar(&self) -> QueryResult<&T>;
|
||||
|
||||
/// Gives a mutable reference to the jar.
|
||||
fn jar_mut(&mut self) -> &mut T;
|
||||
}
|
||||
|
||||
/// Gives access to the jars in a database.
|
||||
pub trait HasJars {
|
||||
/// A type storing the jars.
|
||||
///
|
||||
/// Most commonly, this is a tuple where each jar is a tuple element.
|
||||
type Jars: Default;
|
||||
|
||||
/// Gives access to the underlying jars but tests if the queries have been cancelled.
|
||||
///
|
||||
/// Returns `Err(QueryError::Cancelled)` if the queries have been cancelled.
|
||||
fn jars(&self) -> QueryResult<&Self::Jars>;
|
||||
|
||||
/// Gives mutable access to the underlying jars.
|
||||
fn jars_mut(&mut self) -> &mut Self::Jars;
|
||||
}
|
||||
20
crates/red_knot/src/db/query.rs
Normal file
20
crates/red_knot/src/db/query.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
/// Reason why a db query operation failed.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum QueryError {
|
||||
/// The query was cancelled because the DB was mutated or the query was cancelled by the host (e.g. on a file change or when pressing CTRL+C).
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
impl Display for QueryError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
QueryError::Cancelled => f.write_str("query was cancelled"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for QueryError {}
|
||||
|
||||
pub type QueryResult<T> = Result<T, QueryError>;
|
||||
41
crates/red_knot/src/db/runtime.rs
Normal file
41
crates/red_knot/src/db/runtime.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use crate::cancellation::CancellationTokenSource;
|
||||
use crate::db::{QueryError, QueryResult};
|
||||
|
||||
/// Holds the jar agnostic state of the database.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DbRuntime {
|
||||
/// The cancellation token source used to signal other works that the queries should be aborted and
|
||||
/// exit at the next possible point.
|
||||
cancellation_token: CancellationTokenSource,
|
||||
}
|
||||
|
||||
impl DbRuntime {
|
||||
pub(super) fn snapshot(&self) -> Self {
|
||||
Self {
|
||||
cancellation_token: self.cancellation_token.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancels the pending queries of other workers. The current worker cannot have any pending
|
||||
/// queries because we're holding a mutable reference to the runtime.
|
||||
pub(super) fn cancel_other_workers(&mut self) {
|
||||
self.cancellation_token.cancel();
|
||||
// Set a new cancellation token so that we're in a non-cancelled state again when running the next
|
||||
// query.
|
||||
self.cancellation_token = CancellationTokenSource::default();
|
||||
}
|
||||
|
||||
/// Returns `Ok` if the queries have not been cancelled and `Err(QueryError::Cancelled)` otherwise.
|
||||
pub(super) fn cancelled(&self) -> QueryResult<()> {
|
||||
if self.cancellation_token.is_cancelled() {
|
||||
Err(QueryError::Cancelled)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the queries have been cancelled.
|
||||
pub(super) fn is_cancelled(&self) -> bool {
|
||||
self.cancellation_token.is_cancelled()
|
||||
}
|
||||
}
|
||||
117
crates/red_knot/src/db/storage.rs
Normal file
117
crates/red_knot/src/db/storage.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
use std::fmt::Formatter;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crossbeam::sync::WaitGroup;
|
||||
|
||||
use crate::db::query::QueryResult;
|
||||
use crate::db::runtime::DbRuntime;
|
||||
use crate::db::{HasJars, ParallelDatabase};
|
||||
|
||||
/// Stores the jars of a database and the state for each worker.
|
||||
///
|
||||
/// Today, all state is shared across all workers, but it may be desired to store data per worker in the future.
|
||||
pub struct JarsStorage<T>
|
||||
where
|
||||
T: HasJars + Sized,
|
||||
{
|
||||
// It's important that `jars_wait_group` is declared after `jars` to ensure that `jars` is dropped first.
|
||||
// See https://doc.rust-lang.org/reference/destructors.html
|
||||
/// Stores the jars of the database.
|
||||
jars: Arc<T::Jars>,
|
||||
|
||||
/// Used to count the references to `jars`. Allows implementing `jars_mut` without requiring to clone `jars`.
|
||||
jars_wait_group: WaitGroup,
|
||||
|
||||
/// The data agnostic state.
|
||||
runtime: DbRuntime,
|
||||
}
|
||||
|
||||
impl<Db> JarsStorage<Db>
|
||||
where
|
||||
Db: HasJars,
|
||||
{
|
||||
pub(super) fn new() -> Self {
|
||||
Self {
|
||||
jars: Arc::new(Db::Jars::default()),
|
||||
jars_wait_group: WaitGroup::default(),
|
||||
runtime: DbRuntime::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a snapshot of the jars.
|
||||
///
|
||||
/// Creating the snapshot is cheap because it doesn't clone the jars, it only increments a ref counter.
|
||||
#[must_use]
|
||||
pub fn snapshot(&self) -> JarsStorage<Db>
|
||||
where
|
||||
Db: ParallelDatabase,
|
||||
{
|
||||
Self {
|
||||
jars: self.jars.clone(),
|
||||
jars_wait_group: self.jars_wait_group.clone(),
|
||||
runtime: self.runtime.snapshot(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn jars(&self) -> QueryResult<&Db::Jars> {
|
||||
self.runtime.cancelled()?;
|
||||
Ok(&self.jars)
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the jars without cloning their content.
|
||||
///
|
||||
/// The method cancels any pending queries of other works and waits for them to complete so that
|
||||
/// this instance is the only instance holding a reference to the jars.
|
||||
pub(crate) fn jars_mut(&mut self) -> &mut Db::Jars {
|
||||
// We have a mutable ref here, so no more workers can be spawned between calling this function and taking the mut ref below.
|
||||
self.cancel_other_workers();
|
||||
|
||||
// Now all other references to `self.jars` should have been released. We can now safely return a mutable reference
|
||||
// to the Arc's content.
|
||||
let jars =
|
||||
Arc::get_mut(&mut self.jars).expect("All references to jars should have been released");
|
||||
|
||||
jars
|
||||
}
|
||||
|
||||
pub(crate) fn runtime(&self) -> &DbRuntime {
|
||||
&self.runtime
|
||||
}
|
||||
|
||||
pub(crate) fn runtime_mut(&mut self) -> &mut DbRuntime {
|
||||
// Note: This method may need to use a similar trick to `jars_mut` if `DbRuntime` is ever to store data that is shared between workers.
|
||||
&mut self.runtime
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
fn cancel_other_workers(&mut self) {
|
||||
self.runtime.cancel_other_workers();
|
||||
|
||||
// Wait for all other works to complete.
|
||||
let existing_wait = std::mem::take(&mut self.jars_wait_group);
|
||||
existing_wait.wait();
|
||||
}
|
||||
}
|
||||
|
||||
impl<Db> Default for JarsStorage<Db>
|
||||
where
|
||||
Db: HasJars,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> std::fmt::Debug for JarsStorage<T>
|
||||
where
|
||||
T: HasJars,
|
||||
<T as HasJars>::Jars: std::fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SharedStorage")
|
||||
.field("jars", &self.jars)
|
||||
.field("jars_wait_group", &self.jars_wait_group)
|
||||
.field("runtime", &self.runtime)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
148
crates/red_knot/src/files.rs
Normal file
148
crates/red_knot/src/files.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
use std::fmt::{Debug, Formatter};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use hashbrown::hash_map::RawEntryMut;
|
||||
use parking_lot::RwLock;
|
||||
use rustc_hash::FxHasher;
|
||||
|
||||
use ruff_index::{newtype_index, IndexVec};
|
||||
|
||||
type Map<K, V> = hashbrown::HashMap<K, V, ()>;
|
||||
|
||||
#[newtype_index]
|
||||
pub struct FileId;
|
||||
|
||||
// TODO we'll need a higher level virtual file system abstraction that allows testing if a file exists
|
||||
// or retrieving its content (ideally lazily and in a way that the memory can be retained later)
|
||||
// I suspect that we'll end up with a FileSystem trait and our own Path abstraction.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Files {
|
||||
inner: Arc<RwLock<FilesInner>>,
|
||||
}
|
||||
|
||||
impl Files {
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub fn intern(&self, path: &Path) -> FileId {
|
||||
self.inner.write().intern(path)
|
||||
}
|
||||
|
||||
pub fn try_get(&self, path: &Path) -> Option<FileId> {
|
||||
self.inner.read().try_get(path)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub fn path(&self, id: FileId) -> Arc<Path> {
|
||||
self.inner.read().path(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Files {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
let files = self.inner.read();
|
||||
let mut debug = f.debug_map();
|
||||
for item in files.iter() {
|
||||
debug.entry(&item.0, &item.1);
|
||||
}
|
||||
|
||||
debug.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Files {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner.read().eq(&other.inner.read())
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Files {}
|
||||
|
||||
#[derive(Default)]
|
||||
struct FilesInner {
|
||||
by_path: Map<FileId, ()>,
|
||||
// TODO should we use a map here to reclaim the space for removed files?
|
||||
// TODO I think we should use our own path abstraction here to avoid having to normalize paths
|
||||
// and dealing with non-utf paths everywhere.
|
||||
by_id: IndexVec<FileId, Arc<Path>>,
|
||||
}
|
||||
|
||||
impl FilesInner {
|
||||
/// Inserts the path and returns a new id for it or returns the id if it is an existing path.
|
||||
// TODO should this accept Path or PathBuf?
|
||||
pub(crate) fn intern(&mut self, path: &Path) -> FileId {
|
||||
let mut hasher = FxHasher::default();
|
||||
path.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
|
||||
let entry = self
|
||||
.by_path
|
||||
.raw_entry_mut()
|
||||
.from_hash(hash, |existing_file| &*self.by_id[*existing_file] == path);
|
||||
|
||||
match entry {
|
||||
RawEntryMut::Occupied(entry) => *entry.key(),
|
||||
RawEntryMut::Vacant(entry) => {
|
||||
let id = self.by_id.push(Arc::from(path));
|
||||
entry.insert_with_hasher(hash, id, (), |_| hash);
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn try_get(&self, path: &Path) -> Option<FileId> {
|
||||
let mut hasher = FxHasher::default();
|
||||
path.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
|
||||
Some(
|
||||
*self
|
||||
.by_path
|
||||
.raw_entry()
|
||||
.from_hash(hash, |existing_file| &*self.by_id[*existing_file] == path)?
|
||||
.0,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the path for the file with the given id.
|
||||
pub(crate) fn path(&self, id: FileId) -> Arc<Path> {
|
||||
self.by_id[id].clone()
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self) -> impl Iterator<Item = (FileId, Arc<Path>)> + '_ {
|
||||
self.by_path.keys().map(|id| (*id, self.by_id[*id].clone()))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for FilesInner {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.by_id == other.by_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for FilesInner {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn insert_path_twice_same_id() {
|
||||
let files = Files::default();
|
||||
let path = PathBuf::from("foo/bar");
|
||||
let id1 = files.intern(&path);
|
||||
let id2 = files.intern(&path);
|
||||
assert_eq!(id1, id2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_different_paths_different_ids() {
|
||||
let files = Files::default();
|
||||
let path1 = PathBuf::from("foo/bar");
|
||||
let path2 = PathBuf::from("foo/bar/baz");
|
||||
let id1 = files.intern(&path1);
|
||||
let id2 = files.intern(&path2);
|
||||
assert_ne!(id1, id2);
|
||||
}
|
||||
}
|
||||
135
crates/red_knot/src/format.rs
Normal file
135
crates/red_knot/src/format.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use ruff_formatter::PrintedRange;
|
||||
use ruff_python_formatter::{FormatModuleError, PyFormatOptions};
|
||||
use ruff_text_size::TextRange;
|
||||
|
||||
use crate::cache::KeyValueCache;
|
||||
use crate::db::{HasJar, QueryError, SourceDb};
|
||||
use crate::files::FileId;
|
||||
use crate::lint::Diagnostics;
|
||||
use crate::FxDashSet;
|
||||
|
||||
pub(crate) trait FormatDb: SourceDb {
|
||||
/// Formats a file and returns its formatted content or an indicator that it is unchanged.
|
||||
fn format_file(&self, file_id: FileId) -> Result<FormattedFile, FormatError>;
|
||||
|
||||
/// Formats a range in a file.
|
||||
fn format_file_range(
|
||||
&self,
|
||||
file_id: FileId,
|
||||
range: TextRange,
|
||||
) -> Result<PrintedRange, FormatError>;
|
||||
|
||||
fn check_file_formatted(&self, file_id: FileId) -> Result<Diagnostics, FormatError>;
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(db))]
|
||||
pub(crate) fn format_file<Db>(db: &Db, file_id: FileId) -> Result<FormattedFile, FormatError>
|
||||
where
|
||||
Db: FormatDb + HasJar<FormatJar>,
|
||||
{
|
||||
let formatted = &db.jar()?.formatted;
|
||||
|
||||
if formatted.contains(&file_id) {
|
||||
return Ok(FormattedFile::Unchanged);
|
||||
}
|
||||
|
||||
let source = db.source(file_id)?;
|
||||
|
||||
// TODO use the `format_module` method here to re-use the AST.
|
||||
let printed =
|
||||
ruff_python_formatter::format_module_source(source.text(), PyFormatOptions::default())?;
|
||||
|
||||
Ok(if printed.as_code() == source.text() {
|
||||
formatted.insert(file_id);
|
||||
FormattedFile::Unchanged
|
||||
} else {
|
||||
FormattedFile::Formatted(printed.into_code())
|
||||
})
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(db))]
|
||||
pub(crate) fn format_file_range<Db: FormatDb + HasJar<FormatJar>>(
|
||||
db: &Db,
|
||||
file_id: FileId,
|
||||
range: TextRange,
|
||||
) -> Result<PrintedRange, FormatError> {
|
||||
let formatted = &db.jar()?.formatted;
|
||||
let source = db.source(file_id)?;
|
||||
|
||||
if formatted.contains(&file_id) {
|
||||
return Ok(PrintedRange::new(source.text()[range].into(), range));
|
||||
}
|
||||
|
||||
// TODO use the `format_module` method here to re-use the AST.
|
||||
|
||||
let result =
|
||||
ruff_python_formatter::format_range(source.text(), range, PyFormatOptions::default())?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Checks if the file is correctly formatted. It creates a diagnostic for formatting issues.
|
||||
#[tracing::instrument(level = "trace", skip(db))]
|
||||
pub(crate) fn check_formatted<Db>(db: &Db, file_id: FileId) -> Result<Diagnostics, FormatError>
|
||||
where
|
||||
Db: FormatDb + HasJar<FormatJar>,
|
||||
{
|
||||
Ok(if db.format_file(file_id)?.is_unchanged() {
|
||||
Diagnostics::Empty
|
||||
} else {
|
||||
Diagnostics::from(vec!["File is not formatted".to_string()])
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum FormatError {
|
||||
Format(FormatModuleError),
|
||||
Query(QueryError),
|
||||
}
|
||||
|
||||
impl From<FormatModuleError> for FormatError {
|
||||
fn from(value: FormatModuleError) -> Self {
|
||||
Self::Format(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<QueryError> for FormatError {
|
||||
fn from(value: QueryError) -> Self {
|
||||
Self::Query(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub(crate) enum FormattedFile {
|
||||
Formatted(String),
|
||||
Unchanged,
|
||||
}
|
||||
|
||||
impl FormattedFile {
|
||||
pub(crate) const fn is_unchanged(&self) -> bool {
|
||||
matches!(self, FormattedFile::Unchanged)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct FormatJar {
|
||||
pub formatted: FxDashSet<FileId>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub(crate) struct FormattedStorage(KeyValueCache<FileId, ()>);
|
||||
|
||||
impl Deref for FormattedStorage {
|
||||
type Target = KeyValueCache<FileId, ()>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for FormattedStorage {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
67
crates/red_knot/src/hir.rs
Normal file
67
crates/red_knot/src/hir.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
//! Key observations
|
||||
//!
|
||||
//! The HIR avoids allocations to large extends by:
|
||||
//! * Using an arena per node type
|
||||
//! * using ids and id ranges to reference items.
|
||||
//!
|
||||
//! Using separate arena per node type has the advantage that the IDs are relatively stable, because
|
||||
//! they only change when a node of the same kind has been added or removed. (What's unclear is if that matters or if
|
||||
//! it still triggers a re-compute because the AST-id in the node has changed).
|
||||
//!
|
||||
//! The HIR does not store all details. It mainly stores the *public* interface. There's a reference
|
||||
//! back to the AST node to get more details.
|
||||
//!
|
||||
//!
|
||||
|
||||
use crate::ast_ids::{HasAstId, TypedAstId};
|
||||
use crate::files::FileId;
|
||||
use std::fmt::Formatter;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
pub struct HirAstId<N: HasAstId> {
|
||||
file_id: FileId,
|
||||
node_id: TypedAstId<N>,
|
||||
}
|
||||
|
||||
impl<N: HasAstId> Copy for HirAstId<N> {}
|
||||
impl<N: HasAstId> Clone for HirAstId<N> {
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> PartialEq for HirAstId<N> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.file_id == other.file_id && self.node_id == other.node_id
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> Eq for HirAstId<N> {}
|
||||
|
||||
impl<N: HasAstId> std::fmt::Debug for HirAstId<N> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HirAstId")
|
||||
.field("file_id", &self.file_id)
|
||||
.field("node_id", &self.node_id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> Hash for HirAstId<N> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.file_id.hash(state);
|
||||
self.node_id.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: HasAstId> HirAstId<N> {
|
||||
pub fn upcast<M: HasAstId>(self) -> HirAstId<M>
|
||||
where
|
||||
N: Into<M>,
|
||||
{
|
||||
HirAstId {
|
||||
file_id: self.file_id,
|
||||
node_id: self.node_id.upcast(),
|
||||
}
|
||||
}
|
||||
}
|
||||
556
crates/red_knot/src/hir/definition.rs
Normal file
556
crates/red_knot/src/hir/definition.rs
Normal file
@@ -0,0 +1,556 @@
|
||||
use std::ops::{Index, Range};
|
||||
|
||||
use ruff_index::{newtype_index, IndexVec};
|
||||
use ruff_python_ast::visitor::preorder;
|
||||
use ruff_python_ast::visitor::preorder::PreorderVisitor;
|
||||
use ruff_python_ast::{
|
||||
Decorator, ExceptHandler, ExceptHandlerExceptHandler, Expr, MatchCase, ModModule, Stmt,
|
||||
StmtAnnAssign, StmtAssign, StmtClassDef, StmtFunctionDef, StmtGlobal, StmtImport,
|
||||
StmtImportFrom, StmtNonlocal, StmtTypeAlias, TypeParam, TypeParamParamSpec, TypeParamTypeVar,
|
||||
TypeParamTypeVarTuple, WithItem,
|
||||
};
|
||||
|
||||
use crate::ast_ids::{AstIds, HasAstId};
|
||||
use crate::files::FileId;
|
||||
use crate::hir::HirAstId;
|
||||
use crate::Name;
|
||||
|
||||
#[newtype_index]
|
||||
pub struct FunctionId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Function {
|
||||
ast_id: HirAstId<StmtFunctionDef>,
|
||||
name: Name,
|
||||
parameters: Range<ParameterId>,
|
||||
type_parameters: Range<TypeParameterId>, // TODO: type_parameters, return expression, decorators
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct ParameterId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Parameter {
|
||||
kind: ParameterKind,
|
||||
name: Name,
|
||||
default: Option<()>, // TODO use expression HIR
|
||||
ast_id: HirAstId<ruff_python_ast::Parameter>,
|
||||
}
|
||||
|
||||
// TODO or should `Parameter` be an enum?
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub enum ParameterKind {
|
||||
PositionalOnly,
|
||||
Arguments,
|
||||
Vararg,
|
||||
KeywordOnly,
|
||||
Kwarg,
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct ClassId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Class {
|
||||
name: Name,
|
||||
ast_id: HirAstId<StmtClassDef>,
|
||||
// TODO type parameters, inheritance, decorators, members
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct AssignmentId;
|
||||
|
||||
// This can have more than one name...
|
||||
// but that means we can't implement `name()` on `ModuleItem`.
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Assignment {
|
||||
// TODO: Handle multiple names / targets
|
||||
name: Name,
|
||||
ast_id: HirAstId<StmtAssign>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct AnnotatedAssignment {
|
||||
name: Name,
|
||||
ast_id: HirAstId<StmtAnnAssign>,
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct AnnotatedAssignmentId;
|
||||
|
||||
#[newtype_index]
|
||||
pub struct TypeAliasId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct TypeAlias {
|
||||
name: Name,
|
||||
ast_id: HirAstId<StmtTypeAlias>,
|
||||
parameters: Range<TypeParameterId>,
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct TypeParameterId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub enum TypeParameter {
|
||||
TypeVar(TypeParameterTypeVar),
|
||||
ParamSpec(TypeParameterParamSpec),
|
||||
TypeVarTuple(TypeParameterTypeVarTuple),
|
||||
}
|
||||
|
||||
impl TypeParameter {
|
||||
pub fn ast_id(&self) -> HirAstId<TypeParam> {
|
||||
match self {
|
||||
TypeParameter::TypeVar(type_var) => type_var.ast_id.upcast(),
|
||||
TypeParameter::ParamSpec(param_spec) => param_spec.ast_id.upcast(),
|
||||
TypeParameter::TypeVarTuple(type_var_tuple) => type_var_tuple.ast_id.upcast(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct TypeParameterTypeVar {
|
||||
name: Name,
|
||||
ast_id: HirAstId<TypeParamTypeVar>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct TypeParameterParamSpec {
|
||||
name: Name,
|
||||
ast_id: HirAstId<TypeParamParamSpec>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct TypeParameterTypeVarTuple {
|
||||
name: Name,
|
||||
ast_id: HirAstId<TypeParamTypeVarTuple>,
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct GlobalId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct Global {
|
||||
// TODO track names
|
||||
ast_id: HirAstId<StmtGlobal>,
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct NonLocalId;
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct NonLocal {
|
||||
// TODO track names
|
||||
ast_id: HirAstId<StmtNonlocal>,
|
||||
}
|
||||
|
||||
pub enum DefinitionId {
|
||||
Function(FunctionId),
|
||||
Parameter(ParameterId),
|
||||
Class(ClassId),
|
||||
Assignment(AssignmentId),
|
||||
AnnotatedAssignment(AnnotatedAssignmentId),
|
||||
Global(GlobalId),
|
||||
NonLocal(NonLocalId),
|
||||
TypeParameter(TypeParameterId),
|
||||
TypeAlias(TypeAlias),
|
||||
}
|
||||
|
||||
pub enum DefinitionItem {
|
||||
Function(Function),
|
||||
Parameter(Parameter),
|
||||
Class(Class),
|
||||
Assignment(Assignment),
|
||||
AnnotatedAssignment(AnnotatedAssignment),
|
||||
Global(Global),
|
||||
NonLocal(NonLocal),
|
||||
TypeParameter(TypeParameter),
|
||||
TypeAlias(TypeAlias),
|
||||
}
|
||||
|
||||
// The closest is rust-analyzers item-tree. It only represents "Items" which make the public interface of a module
|
||||
// (it excludes any other statement or expressions). rust-analyzer uses it as the main input to the name resolution
|
||||
// algorithm
|
||||
// > It is the input to the name resolution algorithm, as well as to the queries defined in `adt.rs`,
|
||||
// > `data.rs`, and most things in `attr.rs`.
|
||||
//
|
||||
// > One important purpose of this layer is to provide an "invalidation barrier" for incremental
|
||||
// > computations: when typing inside an item body, the `ItemTree` of the modified file is typically
|
||||
// > unaffected, so we don't have to recompute name resolution results or item data (see `data.rs`).
|
||||
//
|
||||
// I haven't fully figured this out but I think that this composes the "public" interface of a module?
|
||||
// But maybe that's too optimistic.
|
||||
//
|
||||
//
|
||||
#[derive(Debug, Clone, Default, Eq, PartialEq)]
|
||||
pub struct Definitions {
|
||||
functions: IndexVec<FunctionId, Function>,
|
||||
parameters: IndexVec<ParameterId, Parameter>,
|
||||
classes: IndexVec<ClassId, Class>,
|
||||
assignments: IndexVec<AssignmentId, Assignment>,
|
||||
annotated_assignments: IndexVec<AnnotatedAssignmentId, AnnotatedAssignment>,
|
||||
type_aliases: IndexVec<TypeAliasId, TypeAlias>,
|
||||
type_parameters: IndexVec<TypeParameterId, TypeParameter>,
|
||||
globals: IndexVec<GlobalId, Global>,
|
||||
non_locals: IndexVec<NonLocalId, NonLocal>,
|
||||
}
|
||||
|
||||
impl Definitions {
|
||||
pub fn from_module(module: &ModModule, ast_ids: &AstIds, file_id: FileId) -> Self {
|
||||
let mut visitor = DefinitionsVisitor {
|
||||
definitions: Definitions::default(),
|
||||
ast_ids,
|
||||
file_id,
|
||||
};
|
||||
|
||||
visitor.visit_body(&module.body);
|
||||
|
||||
visitor.definitions
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<FunctionId> for Definitions {
|
||||
type Output = Function;
|
||||
|
||||
fn index(&self, index: FunctionId) -> &Self::Output {
|
||||
&self.functions[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<ParameterId> for Definitions {
|
||||
type Output = Parameter;
|
||||
|
||||
fn index(&self, index: ParameterId) -> &Self::Output {
|
||||
&self.parameters[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<ClassId> for Definitions {
|
||||
type Output = Class;
|
||||
|
||||
fn index(&self, index: ClassId) -> &Self::Output {
|
||||
&self.classes[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<AssignmentId> for Definitions {
|
||||
type Output = Assignment;
|
||||
|
||||
fn index(&self, index: AssignmentId) -> &Self::Output {
|
||||
&self.assignments[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<AnnotatedAssignmentId> for Definitions {
|
||||
type Output = AnnotatedAssignment;
|
||||
|
||||
fn index(&self, index: AnnotatedAssignmentId) -> &Self::Output {
|
||||
&self.annotated_assignments[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<TypeAliasId> for Definitions {
|
||||
type Output = TypeAlias;
|
||||
|
||||
fn index(&self, index: TypeAliasId) -> &Self::Output {
|
||||
&self.type_aliases[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<GlobalId> for Definitions {
|
||||
type Output = Global;
|
||||
|
||||
fn index(&self, index: GlobalId) -> &Self::Output {
|
||||
&self.globals[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<NonLocalId> for Definitions {
|
||||
type Output = NonLocal;
|
||||
|
||||
fn index(&self, index: NonLocalId) -> &Self::Output {
|
||||
&self.non_locals[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<TypeParameterId> for Definitions {
|
||||
type Output = TypeParameter;
|
||||
|
||||
fn index(&self, index: TypeParameterId) -> &Self::Output {
|
||||
&self.type_parameters[index]
|
||||
}
|
||||
}
|
||||
|
||||
struct DefinitionsVisitor<'a> {
|
||||
definitions: Definitions,
|
||||
ast_ids: &'a AstIds,
|
||||
file_id: FileId,
|
||||
}
|
||||
|
||||
impl DefinitionsVisitor<'_> {
|
||||
fn ast_id<N: HasAstId>(&self, node: &N) -> HirAstId<N> {
|
||||
HirAstId {
|
||||
file_id: self.file_id,
|
||||
node_id: self.ast_ids.ast_id(node),
|
||||
}
|
||||
}
|
||||
|
||||
fn lower_function_def(&mut self, function: &StmtFunctionDef) -> FunctionId {
|
||||
let name = Name::new(&function.name);
|
||||
|
||||
let first_type_parameter_id = self.definitions.type_parameters.next_index();
|
||||
let mut last_type_parameter_id = first_type_parameter_id;
|
||||
|
||||
if let Some(type_params) = &function.type_params {
|
||||
for parameter in &type_params.type_params {
|
||||
let id = self.lower_type_parameter(parameter);
|
||||
last_type_parameter_id = id;
|
||||
}
|
||||
}
|
||||
|
||||
let parameters = self.lower_parameters(&function.parameters);
|
||||
|
||||
self.definitions.functions.push(Function {
|
||||
name,
|
||||
ast_id: self.ast_id(function),
|
||||
parameters,
|
||||
type_parameters: first_type_parameter_id..last_type_parameter_id,
|
||||
})
|
||||
}
|
||||
|
||||
fn lower_parameters(&mut self, parameters: &ruff_python_ast::Parameters) -> Range<ParameterId> {
|
||||
let first_parameter_id = self.definitions.parameters.next_index();
|
||||
let mut last_parameter_id = first_parameter_id;
|
||||
|
||||
for parameter in ¶meters.posonlyargs {
|
||||
last_parameter_id = self.definitions.parameters.push(Parameter {
|
||||
kind: ParameterKind::PositionalOnly,
|
||||
name: Name::new(¶meter.parameter.name),
|
||||
default: None,
|
||||
ast_id: self.ast_id(¶meter.parameter),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(vararg) = ¶meters.vararg {
|
||||
last_parameter_id = self.definitions.parameters.push(Parameter {
|
||||
kind: ParameterKind::Vararg,
|
||||
name: Name::new(&vararg.name),
|
||||
default: None,
|
||||
ast_id: self.ast_id(vararg),
|
||||
});
|
||||
}
|
||||
|
||||
for parameter in ¶meters.kwonlyargs {
|
||||
last_parameter_id = self.definitions.parameters.push(Parameter {
|
||||
kind: ParameterKind::KeywordOnly,
|
||||
name: Name::new(¶meter.parameter.name),
|
||||
default: None,
|
||||
ast_id: self.ast_id(¶meter.parameter),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(kwarg) = ¶meters.kwarg {
|
||||
last_parameter_id = self.definitions.parameters.push(Parameter {
|
||||
kind: ParameterKind::KeywordOnly,
|
||||
name: Name::new(&kwarg.name),
|
||||
default: None,
|
||||
ast_id: self.ast_id(kwarg),
|
||||
});
|
||||
}
|
||||
|
||||
first_parameter_id..last_parameter_id
|
||||
}
|
||||
|
||||
fn lower_class_def(&mut self, class: &StmtClassDef) -> ClassId {
|
||||
let name = Name::new(&class.name);
|
||||
|
||||
self.definitions.classes.push(Class {
|
||||
name,
|
||||
ast_id: self.ast_id(class),
|
||||
})
|
||||
}
|
||||
|
||||
fn lower_assignment(&mut self, assignment: &StmtAssign) {
|
||||
// FIXME handle multiple names
|
||||
if let Some(Expr::Name(name)) = assignment.targets.first() {
|
||||
self.definitions.assignments.push(Assignment {
|
||||
name: Name::new(&name.id),
|
||||
ast_id: self.ast_id(assignment),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn lower_annotated_assignment(&mut self, annotated_assignment: &StmtAnnAssign) {
|
||||
if let Expr::Name(name) = &*annotated_assignment.target {
|
||||
self.definitions
|
||||
.annotated_assignments
|
||||
.push(AnnotatedAssignment {
|
||||
name: Name::new(&name.id),
|
||||
ast_id: self.ast_id(annotated_assignment),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn lower_type_alias(&mut self, type_alias: &StmtTypeAlias) {
|
||||
if let Expr::Name(name) = &*type_alias.name {
|
||||
let name = Name::new(&name.id);
|
||||
|
||||
let lower_parameters_id = self.definitions.type_parameters.next_index();
|
||||
let mut last_parameter_id = lower_parameters_id;
|
||||
|
||||
if let Some(type_params) = &type_alias.type_params {
|
||||
for type_parameter in &type_params.type_params {
|
||||
let id = self.lower_type_parameter(type_parameter);
|
||||
last_parameter_id = id;
|
||||
}
|
||||
}
|
||||
|
||||
self.definitions.type_aliases.push(TypeAlias {
|
||||
name,
|
||||
ast_id: self.ast_id(type_alias),
|
||||
parameters: lower_parameters_id..last_parameter_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn lower_type_parameter(&mut self, type_parameter: &TypeParam) -> TypeParameterId {
|
||||
match type_parameter {
|
||||
TypeParam::TypeVar(type_var) => {
|
||||
self.definitions
|
||||
.type_parameters
|
||||
.push(TypeParameter::TypeVar(TypeParameterTypeVar {
|
||||
name: Name::new(&type_var.name),
|
||||
ast_id: self.ast_id(type_var),
|
||||
}))
|
||||
}
|
||||
TypeParam::ParamSpec(param_spec) => {
|
||||
self.definitions
|
||||
.type_parameters
|
||||
.push(TypeParameter::ParamSpec(TypeParameterParamSpec {
|
||||
name: Name::new(¶m_spec.name),
|
||||
ast_id: self.ast_id(param_spec),
|
||||
}))
|
||||
}
|
||||
TypeParam::TypeVarTuple(type_var_tuple) => {
|
||||
self.definitions
|
||||
.type_parameters
|
||||
.push(TypeParameter::TypeVarTuple(TypeParameterTypeVarTuple {
|
||||
name: Name::new(&type_var_tuple.name),
|
||||
ast_id: self.ast_id(type_var_tuple),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn lower_import(&mut self, _import: &StmtImport) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
fn lower_import_from(&mut self, _import_from: &StmtImportFrom) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
fn lower_global(&mut self, global: &StmtGlobal) -> GlobalId {
|
||||
self.definitions.globals.push(Global {
|
||||
ast_id: self.ast_id(global),
|
||||
})
|
||||
}
|
||||
|
||||
fn lower_non_local(&mut self, non_local: &StmtNonlocal) -> NonLocalId {
|
||||
self.definitions.non_locals.push(NonLocal {
|
||||
ast_id: self.ast_id(non_local),
|
||||
})
|
||||
}
|
||||
|
||||
fn lower_except_handler(&mut self, _except_handler: &ExceptHandlerExceptHandler) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
fn lower_with_item(&mut self, _with_item: &WithItem) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
fn lower_match_case(&mut self, _match_case: &MatchCase) {
|
||||
// TODO
|
||||
}
|
||||
}
|
||||
|
||||
impl PreorderVisitor<'_> for DefinitionsVisitor<'_> {
|
||||
fn visit_stmt(&mut self, stmt: &Stmt) {
|
||||
match stmt {
|
||||
// Definition statements
|
||||
Stmt::FunctionDef(definition) => {
|
||||
self.lower_function_def(definition);
|
||||
self.visit_body(&definition.body);
|
||||
}
|
||||
Stmt::ClassDef(definition) => {
|
||||
self.lower_class_def(definition);
|
||||
self.visit_body(&definition.body);
|
||||
}
|
||||
Stmt::Assign(assignment) => {
|
||||
self.lower_assignment(assignment);
|
||||
}
|
||||
Stmt::AnnAssign(annotated_assignment) => {
|
||||
self.lower_annotated_assignment(annotated_assignment);
|
||||
}
|
||||
Stmt::TypeAlias(type_alias) => {
|
||||
self.lower_type_alias(type_alias);
|
||||
}
|
||||
|
||||
Stmt::Import(import) => self.lower_import(import),
|
||||
Stmt::ImportFrom(import_from) => self.lower_import_from(import_from),
|
||||
Stmt::Global(global) => {
|
||||
self.lower_global(global);
|
||||
}
|
||||
Stmt::Nonlocal(non_local) => {
|
||||
self.lower_non_local(non_local);
|
||||
}
|
||||
|
||||
// Visit the compound statement bodies because they can contain other definitions.
|
||||
Stmt::For(_)
|
||||
| Stmt::While(_)
|
||||
| Stmt::If(_)
|
||||
| Stmt::With(_)
|
||||
| Stmt::Match(_)
|
||||
| Stmt::Try(_) => {
|
||||
preorder::walk_stmt(self, stmt);
|
||||
}
|
||||
|
||||
// Skip over simple statements because they can't contain any other definitions.
|
||||
Stmt::Return(_)
|
||||
| Stmt::Delete(_)
|
||||
| Stmt::AugAssign(_)
|
||||
| Stmt::Raise(_)
|
||||
| Stmt::Assert(_)
|
||||
| Stmt::Expr(_)
|
||||
| Stmt::Pass(_)
|
||||
| Stmt::Break(_)
|
||||
| Stmt::Continue(_)
|
||||
| Stmt::IpyEscapeCommand(_) => {
|
||||
// No op
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_expr(&mut self, _: &'_ Expr) {}
|
||||
|
||||
fn visit_decorator(&mut self, _decorator: &'_ Decorator) {}
|
||||
|
||||
fn visit_except_handler(&mut self, except_handler: &'_ ExceptHandler) {
|
||||
match except_handler {
|
||||
ExceptHandler::ExceptHandler(except_handler) => {
|
||||
self.lower_except_handler(except_handler);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_with_item(&mut self, with_item: &'_ WithItem) {
|
||||
self.lower_with_item(with_item);
|
||||
}
|
||||
|
||||
fn visit_match_case(&mut self, match_case: &'_ MatchCase) {
|
||||
self.lower_match_case(match_case);
|
||||
self.visit_body(&match_case.body);
|
||||
}
|
||||
}
|
||||
110
crates/red_knot/src/lib.rs
Normal file
110
crates/red_knot/src/lib.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use std::fmt::Formatter;
|
||||
use std::hash::BuildHasherDefault;
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rustc_hash::{FxHashSet, FxHasher};
|
||||
|
||||
use crate::files::FileId;
|
||||
|
||||
pub mod ast_ids;
|
||||
pub mod cache;
|
||||
pub mod cancellation;
|
||||
pub mod db;
|
||||
pub mod files;
|
||||
mod format;
|
||||
pub mod hir;
|
||||
pub mod lint;
|
||||
pub mod module;
|
||||
mod parse;
|
||||
pub mod program;
|
||||
pub mod source;
|
||||
mod symbols;
|
||||
mod types;
|
||||
pub mod watch;
|
||||
|
||||
pub(crate) type FxDashMap<K, V> = dashmap::DashMap<K, V, BuildHasherDefault<FxHasher>>;
|
||||
#[allow(unused)]
|
||||
pub(crate) type FxDashSet<V> = dashmap::DashSet<V, BuildHasherDefault<FxHasher>>;
|
||||
pub(crate) type FxIndexSet<V> = indexmap::set::IndexSet<V, BuildHasherDefault<FxHasher>>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Workspace {
|
||||
/// TODO this should be a resolved path. We should probably use a newtype wrapper that guarantees that
|
||||
/// PATH is a UTF-8 path and is normalized.
|
||||
root: PathBuf,
|
||||
/// The files that are open in the workspace.
|
||||
///
|
||||
/// * Editor: The files that are actively being edited in the editor (the user has a tab open with the file).
|
||||
/// * CLI: The resolved files passed as arguments to the CLI.
|
||||
open_files: FxHashSet<FileId>,
|
||||
}
|
||||
|
||||
impl Workspace {
|
||||
pub fn new(root: PathBuf) -> Self {
|
||||
Self {
|
||||
root,
|
||||
open_files: FxHashSet::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn root(&self) -> &Path {
|
||||
self.root.as_path()
|
||||
}
|
||||
|
||||
// TODO having the content in workspace feels wrong.
|
||||
pub fn open_file(&mut self, file_id: FileId) {
|
||||
self.open_files.insert(file_id);
|
||||
}
|
||||
|
||||
pub fn close_file(&mut self, file_id: FileId) {
|
||||
self.open_files.remove(&file_id);
|
||||
}
|
||||
|
||||
// TODO introduce an `OpenFile` type instead of using an anonymous tuple.
|
||||
pub fn open_files(&self) -> impl Iterator<Item = FileId> + '_ {
|
||||
self.open_files.iter().copied()
|
||||
}
|
||||
|
||||
pub fn is_file_open(&self, file_id: FileId) -> bool {
|
||||
self.open_files.contains(&file_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
|
||||
pub struct Name(smol_str::SmolStr);
|
||||
|
||||
impl Name {
|
||||
#[inline]
|
||||
pub fn new(name: &str) -> Self {
|
||||
Self(smol_str::SmolStr::new(name))
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
self.0.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Name {
|
||||
type Target = str;
|
||||
|
||||
#[inline]
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for Name
|
||||
where
|
||||
T: Into<smol_str::SmolStr>,
|
||||
{
|
||||
fn from(value: T) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Name {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
264
crates/red_knot/src/lint.rs
Normal file
264
crates/red_knot/src/lint.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
use std::cell::RefCell;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use ruff_python_ast::visitor::Visitor;
|
||||
use ruff_python_ast::{ModModule, StringLiteral};
|
||||
|
||||
use crate::cache::KeyValueCache;
|
||||
use crate::db::{HasJar, LintDb, LintJar, QueryResult, SemanticDb};
|
||||
use crate::files::FileId;
|
||||
use crate::parse::Parsed;
|
||||
use crate::source::Source;
|
||||
use crate::symbols::{Definition, SymbolId, SymbolTable};
|
||||
use crate::types::Type;
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(db))]
|
||||
pub(crate) fn lint_syntax<Db>(db: &Db, file_id: FileId) -> QueryResult<Diagnostics>
|
||||
where
|
||||
Db: LintDb + HasJar<LintJar>,
|
||||
{
|
||||
let storage = &db.jar()?.lint_syntax;
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
if std::env::var("RED_KNOT_SLOW_LINT").is_ok() {
|
||||
for i in 0..10 {
|
||||
db.cancelled()?;
|
||||
println!("RED_KNOT_SLOW_LINT is set, sleeping for {i}/10 seconds");
|
||||
std::thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
storage.get(&file_id, |file_id| {
|
||||
let mut diagnostics = Vec::new();
|
||||
|
||||
let source = db.source(*file_id)?;
|
||||
lint_lines(source.text(), &mut diagnostics);
|
||||
|
||||
let parsed = db.parse(*file_id)?;
|
||||
|
||||
if parsed.errors().is_empty() {
|
||||
let ast = parsed.ast();
|
||||
|
||||
let mut visitor = SyntaxLintVisitor {
|
||||
diagnostics,
|
||||
source: source.text(),
|
||||
};
|
||||
visitor.visit_body(&ast.body);
|
||||
diagnostics = visitor.diagnostics;
|
||||
} else {
|
||||
diagnostics.extend(parsed.errors().iter().map(std::string::ToString::to_string));
|
||||
}
|
||||
|
||||
Ok(Diagnostics::from(diagnostics))
|
||||
})
|
||||
}
|
||||
|
||||
fn lint_lines(source: &str, diagnostics: &mut Vec<String>) {
|
||||
for (line_number, line) in source.lines().enumerate() {
|
||||
if line.len() < 88 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let char_count = line.chars().count();
|
||||
if char_count > 88 {
|
||||
diagnostics.push(format!(
|
||||
"Line {} is too long ({} characters)",
|
||||
line_number + 1,
|
||||
char_count
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(db))]
|
||||
pub(crate) fn lint_semantic<Db>(db: &Db, file_id: FileId) -> QueryResult<Diagnostics>
|
||||
where
|
||||
Db: LintDb + HasJar<LintJar>,
|
||||
{
|
||||
let storage = &db.jar()?.lint_semantic;
|
||||
|
||||
storage.get(&file_id, |file_id| {
|
||||
let source = db.source(*file_id)?;
|
||||
let parsed = db.parse(*file_id)?;
|
||||
let symbols = db.symbol_table(*file_id)?;
|
||||
|
||||
let context = SemanticLintContext {
|
||||
file_id: *file_id,
|
||||
source,
|
||||
parsed,
|
||||
symbols,
|
||||
db,
|
||||
diagnostics: RefCell::new(Vec::new()),
|
||||
};
|
||||
|
||||
lint_unresolved_imports(&context)?;
|
||||
|
||||
Ok(Diagnostics::from(context.diagnostics.take()))
|
||||
})
|
||||
}
|
||||
|
||||
fn lint_unresolved_imports(context: &SemanticLintContext) -> QueryResult<()> {
|
||||
// TODO: Consider iterating over the dependencies (imports) only instead of all definitions.
|
||||
for (symbol, definition) in context.symbols().all_definitions() {
|
||||
match definition {
|
||||
Definition::Import(import) => {
|
||||
let ty = context.infer_symbol_type(symbol)?;
|
||||
|
||||
if ty.is_unknown() {
|
||||
context.push_diagnostic(format!("Unresolved module {}", import.module));
|
||||
}
|
||||
}
|
||||
Definition::ImportFrom(import) => {
|
||||
let ty = context.infer_symbol_type(symbol)?;
|
||||
|
||||
if ty.is_unknown() {
|
||||
let module_name = import.module().map(Deref::deref).unwrap_or_default();
|
||||
let message = if import.level() > 0 {
|
||||
format!(
|
||||
"Unresolved relative import '{}' from {}{}",
|
||||
import.name(),
|
||||
".".repeat(import.level() as usize),
|
||||
module_name
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"Unresolved import '{}' from '{}'",
|
||||
import.name(),
|
||||
module_name
|
||||
)
|
||||
};
|
||||
|
||||
context.push_diagnostic(message);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct SemanticLintContext<'a> {
|
||||
file_id: FileId,
|
||||
source: Source,
|
||||
parsed: Parsed,
|
||||
symbols: Arc<SymbolTable>,
|
||||
db: &'a dyn SemanticDb,
|
||||
diagnostics: RefCell<Vec<String>>,
|
||||
}
|
||||
|
||||
impl<'a> SemanticLintContext<'a> {
|
||||
pub fn source_text(&self) -> &str {
|
||||
self.source.text()
|
||||
}
|
||||
|
||||
pub fn file_id(&self) -> FileId {
|
||||
self.file_id
|
||||
}
|
||||
|
||||
pub fn ast(&self) -> &ModModule {
|
||||
self.parsed.ast()
|
||||
}
|
||||
|
||||
pub fn symbols(&self) -> &SymbolTable {
|
||||
&self.symbols
|
||||
}
|
||||
|
||||
pub fn infer_symbol_type(&self, symbol_id: SymbolId) -> QueryResult<Type> {
|
||||
self.db.infer_symbol_type(self.file_id, symbol_id)
|
||||
}
|
||||
|
||||
pub fn push_diagnostic(&self, diagnostic: String) {
|
||||
self.diagnostics.borrow_mut().push(diagnostic);
|
||||
}
|
||||
|
||||
pub fn extend_diagnostics(&mut self, diagnostics: impl IntoIterator<Item = String>) {
|
||||
self.diagnostics.get_mut().extend(diagnostics);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SyntaxLintVisitor<'a> {
|
||||
diagnostics: Vec<String>,
|
||||
source: &'a str,
|
||||
}
|
||||
|
||||
impl Visitor<'_> for SyntaxLintVisitor<'_> {
|
||||
fn visit_string_literal(&mut self, string_literal: &'_ StringLiteral) {
|
||||
// A very naive implementation of use double quotes
|
||||
let text = &self.source[string_literal.range];
|
||||
|
||||
if text.starts_with('\'') {
|
||||
self.diagnostics
|
||||
.push("Use double quotes for strings".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Diagnostics {
|
||||
Empty,
|
||||
List(Arc<Vec<String>>),
|
||||
}
|
||||
|
||||
impl Diagnostics {
|
||||
pub fn as_slice(&self) -> &[String] {
|
||||
match self {
|
||||
Diagnostics::Empty => &[],
|
||||
Diagnostics::List(list) => list.as_slice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for Diagnostics {
|
||||
type Target = [String];
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<String>> for Diagnostics {
|
||||
fn from(value: Vec<String>) -> Self {
|
||||
if value.is_empty() {
|
||||
Diagnostics::Empty
|
||||
} else {
|
||||
Diagnostics::List(Arc::new(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct LintSyntaxStorage(KeyValueCache<FileId, Diagnostics>);
|
||||
|
||||
impl Deref for LintSyntaxStorage {
|
||||
type Target = KeyValueCache<FileId, Diagnostics>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for LintSyntaxStorage {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct LintSemanticStorage(KeyValueCache<FileId, Diagnostics>);
|
||||
|
||||
impl Deref for LintSemanticStorage {
|
||||
type Target = KeyValueCache<FileId, Diagnostics>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for LintSemanticStorage {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
446
crates/red_knot/src/main.rs
Normal file
446
crates/red_knot/src/main.rs
Normal file
@@ -0,0 +1,446 @@
|
||||
#![allow(clippy::dbg_macro)]
|
||||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crossbeam::channel as crossbeam_channel;
|
||||
use rustc_hash::FxHashMap;
|
||||
use tracing::subscriber::Interest;
|
||||
use tracing::{Level, Metadata};
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
use tracing_subscriber::layer::{Context, Filter, SubscriberExt};
|
||||
use tracing_subscriber::{Layer, Registry};
|
||||
use tracing_tree::time::Uptime;
|
||||
|
||||
use red_knot::db::{
|
||||
Database, HasJar, ParallelDatabase, QueryError, SemanticDb, SourceDb, SourceJar,
|
||||
};
|
||||
use red_knot::files::FileId;
|
||||
use red_knot::module::{ModuleSearchPath, ModuleSearchPathKind};
|
||||
use red_knot::program::check::ExecutionMode;
|
||||
use red_knot::program::{FileChange, FileChangeKind, Program};
|
||||
use red_knot::watch::FileWatcher;
|
||||
use red_knot::Workspace;
|
||||
|
||||
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
|
||||
fn main() -> anyhow::Result<()> {
|
||||
setup_tracing();
|
||||
|
||||
let arguments: Vec<_> = std::env::args().collect();
|
||||
|
||||
if arguments.len() < 2 {
|
||||
eprintln!("Usage: red_knot <path>");
|
||||
return Err(anyhow::anyhow!("Invalid arguments"));
|
||||
}
|
||||
|
||||
let entry_point = Path::new(&arguments[1]);
|
||||
|
||||
if !entry_point.exists() {
|
||||
eprintln!("The entry point does not exist.");
|
||||
return Err(anyhow::anyhow!("Invalid arguments"));
|
||||
}
|
||||
|
||||
if !entry_point.is_file() {
|
||||
eprintln!("The entry point is not a file.");
|
||||
return Err(anyhow::anyhow!("Invalid arguments"));
|
||||
}
|
||||
|
||||
let workspace_folder = entry_point.parent().unwrap();
|
||||
let workspace = Workspace::new(workspace_folder.to_path_buf());
|
||||
|
||||
let workspace_search_path = ModuleSearchPath::new(
|
||||
workspace.root().to_path_buf(),
|
||||
ModuleSearchPathKind::FirstParty,
|
||||
);
|
||||
let mut program = Program::new(workspace);
|
||||
program.set_module_search_paths(vec![workspace_search_path]);
|
||||
|
||||
let entry_id = program.file_id(entry_point);
|
||||
program.workspace_mut().open_file(entry_id);
|
||||
|
||||
let (main_loop, main_loop_cancellation_token) = MainLoop::new();
|
||||
|
||||
// Listen to Ctrl+C and abort the watch mode.
|
||||
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
|
||||
ctrlc::set_handler(move || {
|
||||
let mut lock = main_loop_cancellation_token.lock().unwrap();
|
||||
|
||||
if let Some(token) = lock.take() {
|
||||
token.stop();
|
||||
}
|
||||
})?;
|
||||
|
||||
let file_changes_notifier = main_loop.file_changes_notifier();
|
||||
|
||||
// Watch for file changes and re-trigger the analysis.
|
||||
let mut file_watcher = FileWatcher::new(
|
||||
move |changes| {
|
||||
file_changes_notifier.notify(changes);
|
||||
},
|
||||
program.files().clone(),
|
||||
)?;
|
||||
|
||||
file_watcher.watch_folder(workspace_folder)?;
|
||||
|
||||
main_loop.run(&mut program);
|
||||
|
||||
let source_jar: &SourceJar = program.jar().unwrap();
|
||||
|
||||
dbg!(source_jar.parsed.statistics());
|
||||
dbg!(source_jar.sources.statistics());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct MainLoop {
|
||||
orchestrator_sender: crossbeam_channel::Sender<OrchestratorMessage>,
|
||||
main_loop_receiver: crossbeam_channel::Receiver<MainLoopMessage>,
|
||||
}
|
||||
|
||||
impl MainLoop {
|
||||
fn new() -> (Self, MainLoopCancellationToken) {
|
||||
let (orchestrator_sender, orchestrator_receiver) = crossbeam_channel::bounded(1);
|
||||
let (main_loop_sender, main_loop_receiver) = crossbeam_channel::bounded(1);
|
||||
|
||||
let mut orchestrator = Orchestrator {
|
||||
receiver: orchestrator_receiver,
|
||||
sender: main_loop_sender.clone(),
|
||||
revision: 0,
|
||||
};
|
||||
|
||||
std::thread::spawn(move || {
|
||||
orchestrator.run();
|
||||
});
|
||||
|
||||
(
|
||||
Self {
|
||||
orchestrator_sender,
|
||||
main_loop_receiver,
|
||||
},
|
||||
MainLoopCancellationToken {
|
||||
sender: main_loop_sender,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn file_changes_notifier(&self) -> FileChangesNotifier {
|
||||
FileChangesNotifier {
|
||||
sender: self.orchestrator_sender.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn run(self, program: &mut Program) {
|
||||
self.orchestrator_sender
|
||||
.send(OrchestratorMessage::Run)
|
||||
.unwrap();
|
||||
|
||||
for message in &self.main_loop_receiver {
|
||||
tracing::trace!("Main Loop: Tick");
|
||||
|
||||
match message {
|
||||
MainLoopMessage::CheckProgram { revision } => {
|
||||
{
|
||||
let program = program.snapshot();
|
||||
let sender = self.orchestrator_sender.clone();
|
||||
|
||||
// Spawn a new task that checks the program. This needs to be done in a separate thread
|
||||
// to prevent blocking the main loop here.
|
||||
rayon::spawn(move || match program.check(ExecutionMode::ThreadPool) {
|
||||
Ok(result) => {
|
||||
sender
|
||||
.send(OrchestratorMessage::CheckProgramCompleted {
|
||||
diagnostics: result,
|
||||
revision,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
Err(QueryError::Cancelled) => {}
|
||||
});
|
||||
}
|
||||
|
||||
if !program.is_cancelled() {
|
||||
let _ = program.format();
|
||||
}
|
||||
}
|
||||
MainLoopMessage::ApplyChanges(changes) => {
|
||||
// Automatically cancels any pending queries and waits for them to complete.
|
||||
program.apply_changes(changes.iter());
|
||||
}
|
||||
MainLoopMessage::CheckCompleted(diagnostics) => {
|
||||
dbg!(diagnostics);
|
||||
}
|
||||
MainLoopMessage::Exit => {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MainLoop {
|
||||
fn drop(&mut self) {
|
||||
self.orchestrator_sender
|
||||
.send(OrchestratorMessage::Shutdown)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct FileChangesNotifier {
|
||||
sender: crossbeam_channel::Sender<OrchestratorMessage>,
|
||||
}
|
||||
|
||||
impl FileChangesNotifier {
|
||||
fn notify(&self, changes: Vec<FileChange>) {
|
||||
self.sender
|
||||
.send(OrchestratorMessage::FileChanges(changes))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MainLoopCancellationToken {
|
||||
sender: crossbeam_channel::Sender<MainLoopMessage>,
|
||||
}
|
||||
|
||||
impl MainLoopCancellationToken {
|
||||
fn stop(self) {
|
||||
self.sender.send(MainLoopMessage::Exit).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
struct Orchestrator {
|
||||
/// Sends messages to the main loop.
|
||||
sender: crossbeam_channel::Sender<MainLoopMessage>,
|
||||
/// Receives messages from the main loop.
|
||||
receiver: crossbeam_channel::Receiver<OrchestratorMessage>,
|
||||
revision: usize,
|
||||
}
|
||||
|
||||
impl Orchestrator {
|
||||
fn run(&mut self) {
|
||||
while let Ok(message) = self.receiver.recv() {
|
||||
match message {
|
||||
OrchestratorMessage::Run => {
|
||||
self.sender
|
||||
.send(MainLoopMessage::CheckProgram {
|
||||
revision: self.revision,
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
OrchestratorMessage::CheckProgramCompleted {
|
||||
diagnostics,
|
||||
revision,
|
||||
} => {
|
||||
// Only take the diagnostics if they are for the latest revision.
|
||||
if self.revision == revision {
|
||||
self.sender
|
||||
.send(MainLoopMessage::CheckCompleted(diagnostics))
|
||||
.unwrap();
|
||||
} else {
|
||||
tracing::debug!("Discarding diagnostics for outdated revision {revision} (current: {}).", self.revision);
|
||||
}
|
||||
}
|
||||
|
||||
OrchestratorMessage::FileChanges(changes) => {
|
||||
// Request cancellation, but wait until all analysis tasks have completed to
|
||||
// avoid stale messages in the next main loop.
|
||||
|
||||
self.revision += 1;
|
||||
self.debounce_changes(changes);
|
||||
}
|
||||
OrchestratorMessage::Shutdown => {
|
||||
return self.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn debounce_changes(&self, changes: Vec<FileChange>) {
|
||||
let mut aggregated_changes = AggregatedChanges::default();
|
||||
aggregated_changes.extend(changes);
|
||||
|
||||
loop {
|
||||
// Consume possibly incoming file change messages before running a new analysis, but don't wait for more than 100ms.
|
||||
crossbeam_channel::select! {
|
||||
recv(self.receiver) -> message => {
|
||||
match message {
|
||||
Ok(OrchestratorMessage::Shutdown) => {
|
||||
return self.shutdown();
|
||||
}
|
||||
Ok(OrchestratorMessage::FileChanges(file_changes)) => {
|
||||
aggregated_changes.extend(file_changes);
|
||||
}
|
||||
|
||||
Ok(OrchestratorMessage::CheckProgramCompleted { .. })=> {
|
||||
// disregard any outdated completion message.
|
||||
}
|
||||
Ok(OrchestratorMessage::Run) => unreachable!("The orchestrator is already running."),
|
||||
|
||||
Err(_) => {
|
||||
// There are no more senders, no point in waiting for more messages
|
||||
return;
|
||||
}
|
||||
}
|
||||
},
|
||||
default(std::time::Duration::from_millis(10)) => {
|
||||
// No more file changes after 10 ms, send the changes and schedule a new analysis
|
||||
self.sender.send(MainLoopMessage::ApplyChanges(aggregated_changes)).unwrap();
|
||||
self.sender.send(MainLoopMessage::CheckProgram { revision: self.revision}).unwrap();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::unused_self)]
|
||||
fn shutdown(&self) {
|
||||
tracing::trace!("Shutting down orchestrator.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Message sent from the orchestrator to the main loop.
|
||||
#[derive(Debug)]
|
||||
enum MainLoopMessage {
|
||||
CheckProgram { revision: usize },
|
||||
CheckCompleted(Vec<String>),
|
||||
ApplyChanges(AggregatedChanges),
|
||||
Exit,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum OrchestratorMessage {
|
||||
Run,
|
||||
Shutdown,
|
||||
|
||||
CheckProgramCompleted {
|
||||
diagnostics: Vec<String>,
|
||||
revision: usize,
|
||||
},
|
||||
|
||||
FileChanges(Vec<FileChange>),
|
||||
}
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct AggregatedChanges {
|
||||
changes: FxHashMap<FileId, FileChangeKind>,
|
||||
}
|
||||
|
||||
impl AggregatedChanges {
|
||||
fn add(&mut self, change: FileChange) {
|
||||
match self.changes.entry(change.file_id()) {
|
||||
Entry::Occupied(mut entry) => {
|
||||
let merged = entry.get_mut();
|
||||
|
||||
match (merged, change.kind()) {
|
||||
(FileChangeKind::Created, FileChangeKind::Deleted) => {
|
||||
// Deletion after creations means that ruff never saw the file.
|
||||
entry.remove();
|
||||
}
|
||||
(FileChangeKind::Created, FileChangeKind::Modified) => {
|
||||
// No-op, for ruff, modifying a file that it doesn't yet know that it exists is still considered a creation.
|
||||
}
|
||||
|
||||
(FileChangeKind::Modified, FileChangeKind::Created) => {
|
||||
// Uhh, that should probably not happen. Continue considering it a modification.
|
||||
}
|
||||
|
||||
(FileChangeKind::Modified, FileChangeKind::Deleted) => {
|
||||
*entry.get_mut() = FileChangeKind::Deleted;
|
||||
}
|
||||
|
||||
(FileChangeKind::Deleted, FileChangeKind::Created) => {
|
||||
*entry.get_mut() = FileChangeKind::Modified;
|
||||
}
|
||||
|
||||
(FileChangeKind::Deleted, FileChangeKind::Modified) => {
|
||||
// That's weird, but let's consider it a modification.
|
||||
*entry.get_mut() = FileChangeKind::Modified;
|
||||
}
|
||||
|
||||
(FileChangeKind::Created, FileChangeKind::Created)
|
||||
| (FileChangeKind::Modified, FileChangeKind::Modified)
|
||||
| (FileChangeKind::Deleted, FileChangeKind::Deleted) => {
|
||||
// No-op transitions. Some of them should be impossible but we handle them anyway.
|
||||
}
|
||||
}
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
entry.insert(change.kind());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn extend<I>(&mut self, changes: I)
|
||||
where
|
||||
I: IntoIterator<Item = FileChange>,
|
||||
I::IntoIter: ExactSizeIterator,
|
||||
{
|
||||
let iter = changes.into_iter();
|
||||
self.changes.reserve(iter.len());
|
||||
|
||||
for change in iter {
|
||||
self.add(change);
|
||||
}
|
||||
}
|
||||
|
||||
fn iter(&self) -> impl Iterator<Item = FileChange> + '_ {
|
||||
self.changes
|
||||
.iter()
|
||||
.map(|(id, kind)| FileChange::new(*id, *kind))
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_tracing() {
|
||||
let subscriber = Registry::default().with(
|
||||
tracing_tree::HierarchicalLayer::default()
|
||||
.with_indent_lines(true)
|
||||
.with_indent_amount(2)
|
||||
.with_bracketed_fields(true)
|
||||
.with_thread_ids(true)
|
||||
.with_targets(true)
|
||||
.with_writer(|| Box::new(std::io::stderr()))
|
||||
.with_timer(Uptime::default())
|
||||
.with_filter(LoggingFilter {
|
||||
trace_level: Level::TRACE,
|
||||
}),
|
||||
);
|
||||
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
}
|
||||
|
||||
struct LoggingFilter {
|
||||
trace_level: Level,
|
||||
}
|
||||
|
||||
impl LoggingFilter {
|
||||
fn is_enabled(&self, meta: &Metadata<'_>) -> bool {
|
||||
let filter = if meta.target().starts_with("red_knot") || meta.target().starts_with("ruff") {
|
||||
self.trace_level
|
||||
} else {
|
||||
Level::INFO
|
||||
};
|
||||
|
||||
meta.level() <= &filter
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Filter<S> for LoggingFilter {
|
||||
fn enabled(&self, meta: &Metadata<'_>, _cx: &Context<'_, S>) -> bool {
|
||||
self.is_enabled(meta)
|
||||
}
|
||||
|
||||
fn callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest {
|
||||
if self.is_enabled(meta) {
|
||||
Interest::always()
|
||||
} else {
|
||||
Interest::never()
|
||||
}
|
||||
}
|
||||
|
||||
fn max_level_hint(&self) -> Option<LevelFilter> {
|
||||
Some(LevelFilter::from_level(self.trace_level))
|
||||
}
|
||||
}
|
||||
1113
crates/red_knot/src/module.rs
Normal file
1113
crates/red_knot/src/module.rs
Normal file
File diff suppressed because it is too large
Load Diff
95
crates/red_knot/src/parse.rs
Normal file
95
crates/red_knot/src/parse.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::Arc;
|
||||
|
||||
use ruff_python_ast as ast;
|
||||
use ruff_python_parser::{Mode, ParseError};
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
use crate::cache::KeyValueCache;
|
||||
use crate::db::{HasJar, QueryResult, SourceDb, SourceJar};
|
||||
use crate::files::FileId;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Parsed {
|
||||
inner: Arc<ParsedInner>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct ParsedInner {
|
||||
ast: ast::ModModule,
|
||||
errors: Vec<ParseError>,
|
||||
}
|
||||
|
||||
impl Parsed {
|
||||
fn new(ast: ast::ModModule, errors: Vec<ParseError>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(ParsedInner { ast, errors }),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn from_text(text: &str) -> Self {
|
||||
let result = ruff_python_parser::parse(text, Mode::Module);
|
||||
|
||||
let (module, errors) = match result {
|
||||
Ok(ast::Mod::Module(module)) => (module, vec![]),
|
||||
Ok(ast::Mod::Expression(expression)) => (
|
||||
ast::ModModule {
|
||||
range: expression.range(),
|
||||
body: vec![ast::Stmt::Expr(ast::StmtExpr {
|
||||
range: expression.range(),
|
||||
value: expression.body,
|
||||
})],
|
||||
},
|
||||
vec![],
|
||||
),
|
||||
Err(errors) => (
|
||||
ast::ModModule {
|
||||
range: TextRange::default(),
|
||||
body: Vec::new(),
|
||||
},
|
||||
vec![errors],
|
||||
),
|
||||
};
|
||||
|
||||
Parsed::new(module, errors)
|
||||
}
|
||||
|
||||
pub fn ast(&self) -> &ast::ModModule {
|
||||
&self.inner.ast
|
||||
}
|
||||
|
||||
pub fn errors(&self) -> &[ParseError] {
|
||||
&self.inner.errors
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(db))]
|
||||
pub(crate) fn parse<Db>(db: &Db, file_id: FileId) -> QueryResult<Parsed>
|
||||
where
|
||||
Db: SourceDb + HasJar<SourceJar>,
|
||||
{
|
||||
let parsed = db.jar()?;
|
||||
|
||||
parsed.parsed.get(&file_id, |file_id| {
|
||||
let source = db.source(*file_id)?;
|
||||
|
||||
Ok(Parsed::from_text(source.text()))
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ParsedStorage(KeyValueCache<FileId, Parsed>);
|
||||
|
||||
impl Deref for ParsedStorage {
|
||||
type Target = KeyValueCache<FileId, Parsed>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for ParsedStorage {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
425
crates/red_knot/src/program/check.rs
Normal file
425
crates/red_knot/src/program/check.rs
Normal file
@@ -0,0 +1,425 @@
|
||||
use rayon::{current_num_threads, yield_local};
|
||||
use rustc_hash::FxHashSet;
|
||||
|
||||
use crate::db::{Database, LintDb, QueryError, QueryResult, SemanticDb};
|
||||
use crate::files::FileId;
|
||||
use crate::format::{FormatDb, FormatError};
|
||||
use crate::lint::Diagnostics;
|
||||
use crate::program::Program;
|
||||
use crate::symbols::Dependency;
|
||||
|
||||
impl Program {
|
||||
/// Checks all open files in the workspace and its dependencies.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub fn check(&self, mode: ExecutionMode) -> QueryResult<Vec<String>> {
|
||||
self.cancelled()?;
|
||||
|
||||
let mut context = CheckContext::new(self);
|
||||
|
||||
match mode {
|
||||
ExecutionMode::SingleThreaded => SingleThreadedExecutor.run(&mut context)?,
|
||||
ExecutionMode::ThreadPool => ThreadPoolExecutor.run(&mut context)?,
|
||||
};
|
||||
|
||||
Ok(context.finish())
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self, context))]
|
||||
fn check_file(&self, file: FileId, context: &CheckFileContext) -> QueryResult<Diagnostics> {
|
||||
self.cancelled()?;
|
||||
|
||||
let symbol_table = self.symbol_table(file)?;
|
||||
let dependencies = symbol_table.dependencies();
|
||||
|
||||
if !dependencies.is_empty() {
|
||||
let module = self.file_to_module(file)?;
|
||||
|
||||
// TODO scheduling all dependencies here is wasteful if we don't infer any types on them
|
||||
// but I think that's unlikely, so it is okay?
|
||||
// Anyway, we need to figure out a way to retrieve the dependencies of a module
|
||||
// from the persistent cache. So maybe it should be a separate query after all.
|
||||
for dependency in dependencies {
|
||||
let dependency_name = match dependency {
|
||||
Dependency::Module(name) => Some(name.clone()),
|
||||
Dependency::Relative { .. } => match &module {
|
||||
Some(module) => module.resolve_dependency(self, dependency)?,
|
||||
None => None,
|
||||
},
|
||||
};
|
||||
|
||||
if let Some(dependency_name) = dependency_name {
|
||||
// TODO We may want to have a different check functions for non-first-party
|
||||
// files because we only need to index them and not check them.
|
||||
// Supporting non-first-party code also requires supporting typing stubs.
|
||||
if let Some(dependency) = self.resolve_module(dependency_name)? {
|
||||
if dependency.path(self)?.root().kind().is_first_party() {
|
||||
context.schedule_dependency(dependency.path(self)?.file());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut diagnostics = Vec::new();
|
||||
|
||||
if self.workspace().is_file_open(file) {
|
||||
diagnostics.extend_from_slice(&self.lint_syntax(file)?);
|
||||
diagnostics.extend_from_slice(&self.lint_semantic(file)?);
|
||||
|
||||
match self.check_file_formatted(file) {
|
||||
Ok(format_diagnostics) => {
|
||||
diagnostics.extend_from_slice(&format_diagnostics);
|
||||
}
|
||||
Err(FormatError::Query(err)) => {
|
||||
return Err(err);
|
||||
}
|
||||
Err(FormatError::Format(error)) => {
|
||||
diagnostics.push(format!("Error formatting file: {error}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Diagnostics::from(diagnostics))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ExecutionMode {
|
||||
SingleThreaded,
|
||||
ThreadPool,
|
||||
}
|
||||
|
||||
/// Context that stores state information about the entire check operation.
|
||||
struct CheckContext<'a> {
|
||||
/// IDs of the files that have been queued for checking.
|
||||
///
|
||||
/// Used to avoid queuing the same file twice.
|
||||
scheduled_files: FxHashSet<FileId>,
|
||||
|
||||
/// Reference to the program that is checked.
|
||||
program: &'a Program,
|
||||
|
||||
/// The aggregated diagnostics
|
||||
diagnostics: Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> CheckContext<'a> {
|
||||
fn new(program: &'a Program) -> Self {
|
||||
Self {
|
||||
scheduled_files: FxHashSet::default(),
|
||||
program,
|
||||
diagnostics: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the tasks to check all open files in the workspace.
|
||||
fn check_open_files(&mut self) -> Vec<CheckOpenFileTask> {
|
||||
self.scheduled_files
|
||||
.extend(self.program.workspace().open_files());
|
||||
|
||||
self.program
|
||||
.workspace()
|
||||
.open_files()
|
||||
.map(|file_id| CheckOpenFileTask { file_id })
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the task to check a dependency.
|
||||
fn check_dependency(&mut self, file_id: FileId) -> Option<CheckDependencyTask> {
|
||||
if self.scheduled_files.insert(file_id) {
|
||||
Some(CheckDependencyTask { file_id })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Pushes the result for a single file check operation
|
||||
fn push_diagnostics(&mut self, diagnostics: &Diagnostics) {
|
||||
self.diagnostics.extend_from_slice(diagnostics);
|
||||
}
|
||||
|
||||
/// Returns a reference to the program that is being checked.
|
||||
fn program(&self) -> &'a Program {
|
||||
self.program
|
||||
}
|
||||
|
||||
/// Creates a task context that is used to check a single file.
|
||||
fn task_context<'b, S>(&self, dependency_scheduler: &'b S) -> CheckTaskContext<'a, 'b, S>
|
||||
where
|
||||
S: ScheduleDependency,
|
||||
{
|
||||
CheckTaskContext {
|
||||
program: self.program,
|
||||
dependency_scheduler,
|
||||
}
|
||||
}
|
||||
|
||||
fn finish(self) -> Vec<String> {
|
||||
self.diagnostics
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait that abstracts away how a dependency of a file gets scheduled for checking.
|
||||
trait ScheduleDependency {
|
||||
/// Schedules the file with the given ID for checking.
|
||||
fn schedule(&self, file_id: FileId);
|
||||
}
|
||||
|
||||
impl<T> ScheduleDependency for T
|
||||
where
|
||||
T: Fn(FileId),
|
||||
{
|
||||
fn schedule(&self, file_id: FileId) {
|
||||
let f = self;
|
||||
f(file_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Context that is used to run a single file check task.
|
||||
///
|
||||
/// The task is generic over `S` because it is passed across thread boundaries and
|
||||
/// we don't want to add the requirement that [`ScheduleDependency`] must be [`Send`].
|
||||
struct CheckTaskContext<'a, 'scheduler, S>
|
||||
where
|
||||
S: ScheduleDependency,
|
||||
{
|
||||
dependency_scheduler: &'scheduler S,
|
||||
program: &'a Program,
|
||||
}
|
||||
|
||||
impl<'a, 'scheduler, S> CheckTaskContext<'a, 'scheduler, S>
|
||||
where
|
||||
S: ScheduleDependency,
|
||||
{
|
||||
fn as_file_context(&self) -> CheckFileContext<'scheduler> {
|
||||
CheckFileContext {
|
||||
dependency_scheduler: self.dependency_scheduler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Context passed when checking a single file.
|
||||
///
|
||||
/// This is a trimmed down version of [`CheckTaskContext`] with the type parameter `S` erased
|
||||
/// to avoid monomorphization of [`Program:check_file`].
|
||||
struct CheckFileContext<'a> {
|
||||
dependency_scheduler: &'a dyn ScheduleDependency,
|
||||
}
|
||||
|
||||
impl<'a> CheckFileContext<'a> {
|
||||
fn schedule_dependency(&self, file_id: FileId) {
|
||||
self.dependency_scheduler.schedule(file_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CheckFileTask {
|
||||
OpenFile(CheckOpenFileTask),
|
||||
Dependency(CheckDependencyTask),
|
||||
}
|
||||
|
||||
impl CheckFileTask {
|
||||
/// Runs the task and returns the results for checking this file.
|
||||
fn run<S>(&self, context: &CheckTaskContext<S>) -> QueryResult<Diagnostics>
|
||||
where
|
||||
S: ScheduleDependency,
|
||||
{
|
||||
match self {
|
||||
Self::OpenFile(task) => task.run(context),
|
||||
Self::Dependency(task) => task.run(context),
|
||||
}
|
||||
}
|
||||
|
||||
fn file_id(&self) -> FileId {
|
||||
match self {
|
||||
CheckFileTask::OpenFile(task) => task.file_id,
|
||||
CheckFileTask::Dependency(task) => task.file_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Task to check an open file.
|
||||
|
||||
#[derive(Debug)]
|
||||
struct CheckOpenFileTask {
|
||||
file_id: FileId,
|
||||
}
|
||||
|
||||
impl CheckOpenFileTask {
|
||||
fn run<S>(&self, context: &CheckTaskContext<S>) -> QueryResult<Diagnostics>
|
||||
where
|
||||
S: ScheduleDependency,
|
||||
{
|
||||
context
|
||||
.program
|
||||
.check_file(self.file_id, &context.as_file_context())
|
||||
}
|
||||
}
|
||||
|
||||
/// Task to check a dependency file.
|
||||
#[derive(Debug)]
|
||||
struct CheckDependencyTask {
|
||||
file_id: FileId,
|
||||
}
|
||||
|
||||
impl CheckDependencyTask {
|
||||
fn run<S>(&self, context: &CheckTaskContext<S>) -> QueryResult<Diagnostics>
|
||||
where
|
||||
S: ScheduleDependency,
|
||||
{
|
||||
context
|
||||
.program
|
||||
.check_file(self.file_id, &context.as_file_context())
|
||||
}
|
||||
}
|
||||
|
||||
/// Executor that schedules the checking of individual program files.
|
||||
trait CheckExecutor {
|
||||
fn run(self, context: &mut CheckContext) -> QueryResult<()>;
|
||||
}
|
||||
|
||||
/// Executor that runs all check operations on the current thread.
|
||||
///
|
||||
/// The executor does not schedule dependencies for checking.
|
||||
/// The main motivation for scheduling dependencies
|
||||
/// in a multithreaded environment is to parse and index the dependencies concurrently.
|
||||
/// However, that doesn't make sense in a single threaded environment, because the dependencies then compute
|
||||
/// with checking the open files. Checking dependencies in a single threaded environment is more likely
|
||||
/// to hurt performance because we end up analyzing files in their entirety, even if we only need to type check parts of them.
|
||||
#[derive(Debug, Default)]
|
||||
struct SingleThreadedExecutor;
|
||||
|
||||
impl CheckExecutor for SingleThreadedExecutor {
|
||||
fn run(self, context: &mut CheckContext) -> QueryResult<()> {
|
||||
let mut queue = context.check_open_files();
|
||||
|
||||
let noop_schedule_dependency = |_| {};
|
||||
|
||||
while let Some(file) = queue.pop() {
|
||||
context.program().cancelled()?;
|
||||
|
||||
let task_context = context.task_context(&noop_schedule_dependency);
|
||||
context.push_diagnostics(&file.run(&task_context)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Executor that runs the check operations on a thread pool.
|
||||
///
|
||||
/// The executor runs each check operation as its own task using a thread pool.
|
||||
///
|
||||
/// Other than [`SingleThreadedExecutor`], this executor schedules dependencies for checking. It
|
||||
/// even schedules dependencies for checking when the thread pool size is 1 for a better debugging experience.
|
||||
#[derive(Debug, Default)]
|
||||
struct ThreadPoolExecutor;
|
||||
|
||||
impl CheckExecutor for ThreadPoolExecutor {
|
||||
fn run(self, context: &mut CheckContext) -> QueryResult<()> {
|
||||
let num_threads = current_num_threads();
|
||||
let single_threaded = num_threads == 1;
|
||||
let span = tracing::trace_span!("ThreadPoolExecutor::run", num_threads);
|
||||
let _ = span.enter();
|
||||
|
||||
let mut queue: Vec<_> = context
|
||||
.check_open_files()
|
||||
.into_iter()
|
||||
.map(CheckFileTask::OpenFile)
|
||||
.collect();
|
||||
|
||||
let (sender, receiver) = if single_threaded {
|
||||
// Use an unbounded queue for single threaded execution to prevent deadlocks
|
||||
// when a single file schedules multiple dependencies.
|
||||
crossbeam::channel::unbounded()
|
||||
} else {
|
||||
// Use a bounded queue to apply backpressure when the orchestration thread isn't able to keep
|
||||
// up processing messages from the worker threads.
|
||||
crossbeam::channel::bounded(num_threads)
|
||||
};
|
||||
|
||||
let schedule_sender = sender.clone();
|
||||
let schedule_dependency = move |file_id| {
|
||||
schedule_sender
|
||||
.send(ThreadPoolMessage::ScheduleDependency(file_id))
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
let result = rayon::in_place_scope(|scope| {
|
||||
let mut pending = 0usize;
|
||||
|
||||
loop {
|
||||
context.program().cancelled()?;
|
||||
|
||||
// 1. Try to get a queued message to ensure that we have always remaining space in the channel to prevent blocking the worker threads.
|
||||
// 2. Try to process a queued file
|
||||
// 3. If there's no queued file wait for the next incoming message.
|
||||
// 4. Exit if there are no more messages and no senders.
|
||||
let message = if let Ok(message) = receiver.try_recv() {
|
||||
message
|
||||
} else if let Some(task) = queue.pop() {
|
||||
pending += 1;
|
||||
|
||||
let task_context = context.task_context(&schedule_dependency);
|
||||
let sender = sender.clone();
|
||||
let task_span = tracing::trace_span!(
|
||||
parent: &span,
|
||||
"CheckFileTask::run",
|
||||
file_id = task.file_id().as_u32(),
|
||||
);
|
||||
|
||||
scope.spawn(move |_| {
|
||||
task_span.in_scope(|| match task.run(&task_context) {
|
||||
Ok(result) => {
|
||||
sender.send(ThreadPoolMessage::Completed(result)).unwrap();
|
||||
}
|
||||
Err(err) => sender.send(ThreadPoolMessage::Errored(err)).unwrap(),
|
||||
});
|
||||
});
|
||||
|
||||
// If this is a single threaded rayon thread pool, yield the current thread
|
||||
// or we never start processing the work items.
|
||||
if single_threaded {
|
||||
yield_local();
|
||||
}
|
||||
|
||||
continue;
|
||||
} else if let Ok(message) = receiver.recv() {
|
||||
message
|
||||
} else {
|
||||
break;
|
||||
};
|
||||
|
||||
match message {
|
||||
ThreadPoolMessage::ScheduleDependency(dependency) => {
|
||||
if let Some(task) = context.check_dependency(dependency) {
|
||||
queue.push(CheckFileTask::Dependency(task));
|
||||
}
|
||||
}
|
||||
ThreadPoolMessage::Completed(diagnostics) => {
|
||||
context.push_diagnostics(&diagnostics);
|
||||
pending -= 1;
|
||||
|
||||
if pending == 0 && queue.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
ThreadPoolMessage::Errored(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ThreadPoolMessage {
|
||||
ScheduleDependency(FileId),
|
||||
Completed(Diagnostics),
|
||||
Errored(QueryError),
|
||||
}
|
||||
44
crates/red_knot/src/program/format.rs
Normal file
44
crates/red_knot/src/program/format.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use crate::db::{QueryResult, SourceDb};
|
||||
use crate::format::{FormatDb, FormatError, FormattedFile};
|
||||
use crate::program::Program;
|
||||
|
||||
impl Program {
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
pub fn format(&mut self) -> QueryResult<()> {
|
||||
// Formats all open files
|
||||
|
||||
// TODO make `Executor` from `check` reusable.
|
||||
for file in self.workspace.open_files() {
|
||||
match self.format_file(file) {
|
||||
Ok(FormattedFile::Formatted(content)) => {
|
||||
let path = self.file_path(file);
|
||||
|
||||
// TODO: This is problematic because it immediately re-triggers the file watcher.
|
||||
// A possible solution is to track the self "inflicted" changes inside of programs
|
||||
// by tracking the file revision right after the write. It could then use the revision
|
||||
// to determine which changes are safe to ignore (and in which context).
|
||||
// An other alternative is to not write as part of the `format` command and instead
|
||||
// return a Vec with the format results and leave the writing to the caller.
|
||||
// I think that's undesired because a) we still need a way to tell the formatter
|
||||
// that it won't be necessary to format the content again and
|
||||
// b) it would reduce concurrency because the writing would need to wait for the file
|
||||
// formatting to be complete, unless we use some form of communication channel.
|
||||
std::fs::write(path, content).expect("Unable to write file");
|
||||
}
|
||||
Ok(FormattedFile::Unchanged) => {
|
||||
// No op
|
||||
}
|
||||
Err(FormatError::Query(error)) => {
|
||||
return Err(error);
|
||||
}
|
||||
Err(FormatError::Format(error)) => {
|
||||
// TODO proper error handling. We should either propagate this error or
|
||||
// emit a diagnostic (probably this).
|
||||
tracing::warn!("Failed to format file: {}", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
252
crates/red_knot/src/program/mod.rs
Normal file
252
crates/red_knot/src/program/mod.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use ruff_formatter::PrintedRange;
|
||||
use ruff_text_size::TextRange;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::db::{
|
||||
Database, Db, DbRuntime, HasJar, HasJars, JarsStorage, LintDb, LintJar, ParallelDatabase,
|
||||
QueryResult, SemanticDb, SemanticJar, Snapshot, SourceDb, SourceJar,
|
||||
};
|
||||
use crate::files::{FileId, Files};
|
||||
use crate::format::{
|
||||
check_formatted, format_file, format_file_range, FormatDb, FormatError, FormatJar,
|
||||
FormattedFile,
|
||||
};
|
||||
use crate::lint::{lint_semantic, lint_syntax, Diagnostics};
|
||||
use crate::module::{
|
||||
add_module, file_to_module, path_to_module, resolve_module, set_module_search_paths, Module,
|
||||
ModuleData, ModuleName, ModuleSearchPath,
|
||||
};
|
||||
use crate::parse::{parse, Parsed};
|
||||
use crate::source::{source_text, Source};
|
||||
use crate::symbols::{symbol_table, SymbolId, SymbolTable};
|
||||
use crate::types::{infer_symbol_type, Type};
|
||||
use crate::Workspace;
|
||||
|
||||
pub mod check;
|
||||
mod format;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Program {
|
||||
jars: JarsStorage<Program>,
|
||||
files: Files,
|
||||
workspace: Workspace,
|
||||
}
|
||||
|
||||
impl Program {
|
||||
pub fn new(workspace: Workspace) -> Self {
|
||||
Self {
|
||||
jars: JarsStorage::default(),
|
||||
files: Files::default(),
|
||||
workspace,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_changes<I>(&mut self, changes: I)
|
||||
where
|
||||
I: IntoIterator<Item = FileChange>,
|
||||
{
|
||||
let (source, semantic, lint, format) = self.jars_mut();
|
||||
for change in changes {
|
||||
semantic.module_resolver.remove_module(change.id);
|
||||
semantic.symbol_tables.remove(&change.id);
|
||||
source.sources.remove(&change.id);
|
||||
source.parsed.remove(&change.id);
|
||||
// TODO: remove all dependent modules as well
|
||||
semantic.type_store.remove_module(change.id);
|
||||
lint.lint_syntax.remove(&change.id);
|
||||
lint.lint_semantic.remove(&change.id);
|
||||
format.formatted.remove(&change.id);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn files(&self) -> &Files {
|
||||
&self.files
|
||||
}
|
||||
|
||||
pub fn workspace(&self) -> &Workspace {
|
||||
&self.workspace
|
||||
}
|
||||
|
||||
pub fn workspace_mut(&mut self) -> &mut Workspace {
|
||||
&mut self.workspace
|
||||
}
|
||||
}
|
||||
|
||||
impl SourceDb for Program {
|
||||
fn file_id(&self, path: &Path) -> FileId {
|
||||
self.files.intern(path)
|
||||
}
|
||||
|
||||
fn file_path(&self, file_id: FileId) -> Arc<Path> {
|
||||
self.files.path(file_id)
|
||||
}
|
||||
|
||||
fn source(&self, file_id: FileId) -> QueryResult<Source> {
|
||||
source_text(self, file_id)
|
||||
}
|
||||
|
||||
fn parse(&self, file_id: FileId) -> QueryResult<Parsed> {
|
||||
parse(self, file_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl SemanticDb for Program {
|
||||
fn resolve_module(&self, name: ModuleName) -> QueryResult<Option<Module>> {
|
||||
resolve_module(self, name)
|
||||
}
|
||||
|
||||
fn file_to_module(&self, file_id: FileId) -> QueryResult<Option<Module>> {
|
||||
file_to_module(self, file_id)
|
||||
}
|
||||
|
||||
fn path_to_module(&self, path: &Path) -> QueryResult<Option<Module>> {
|
||||
path_to_module(self, path)
|
||||
}
|
||||
|
||||
fn symbol_table(&self, file_id: FileId) -> QueryResult<Arc<SymbolTable>> {
|
||||
symbol_table(self, file_id)
|
||||
}
|
||||
|
||||
fn infer_symbol_type(&self, file_id: FileId, symbol_id: SymbolId) -> QueryResult<Type> {
|
||||
infer_symbol_type(self, file_id, symbol_id)
|
||||
}
|
||||
|
||||
// Mutations
|
||||
fn add_module(&mut self, path: &Path) -> Option<(Module, Vec<Arc<ModuleData>>)> {
|
||||
add_module(self, path)
|
||||
}
|
||||
|
||||
fn set_module_search_paths(&mut self, paths: Vec<ModuleSearchPath>) {
|
||||
set_module_search_paths(self, paths);
|
||||
}
|
||||
}
|
||||
|
||||
impl LintDb for Program {
|
||||
fn lint_syntax(&self, file_id: FileId) -> QueryResult<Diagnostics> {
|
||||
lint_syntax(self, file_id)
|
||||
}
|
||||
|
||||
fn lint_semantic(&self, file_id: FileId) -> QueryResult<Diagnostics> {
|
||||
lint_semantic(self, file_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl FormatDb for Program {
|
||||
fn format_file(&self, file_id: FileId) -> Result<FormattedFile, FormatError> {
|
||||
format_file(self, file_id)
|
||||
}
|
||||
|
||||
fn format_file_range(
|
||||
&self,
|
||||
file_id: FileId,
|
||||
range: TextRange,
|
||||
) -> Result<PrintedRange, FormatError> {
|
||||
format_file_range(self, file_id, range)
|
||||
}
|
||||
|
||||
fn check_file_formatted(&self, file_id: FileId) -> Result<Diagnostics, FormatError> {
|
||||
check_formatted(self, file_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl Db for Program {}
|
||||
|
||||
impl Database for Program {
|
||||
fn runtime(&self) -> &DbRuntime {
|
||||
self.jars.runtime()
|
||||
}
|
||||
|
||||
fn runtime_mut(&mut self) -> &mut DbRuntime {
|
||||
self.jars.runtime_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelDatabase for Program {
|
||||
fn snapshot(&self) -> Snapshot<Self> {
|
||||
Snapshot::new(Self {
|
||||
jars: self.jars.snapshot(),
|
||||
files: self.files.clone(),
|
||||
workspace: self.workspace.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJars for Program {
|
||||
type Jars = (SourceJar, SemanticJar, LintJar, FormatJar);
|
||||
|
||||
fn jars(&self) -> QueryResult<&Self::Jars> {
|
||||
self.jars.jars()
|
||||
}
|
||||
|
||||
fn jars_mut(&mut self) -> &mut Self::Jars {
|
||||
self.jars.jars_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJar<SourceJar> for Program {
|
||||
fn jar(&self) -> QueryResult<&SourceJar> {
|
||||
Ok(&self.jars()?.0)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut SourceJar {
|
||||
&mut self.jars_mut().0
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJar<SemanticJar> for Program {
|
||||
fn jar(&self) -> QueryResult<&SemanticJar> {
|
||||
Ok(&self.jars()?.1)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut SemanticJar {
|
||||
&mut self.jars_mut().1
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJar<LintJar> for Program {
|
||||
fn jar(&self) -> QueryResult<&LintJar> {
|
||||
Ok(&self.jars()?.2)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut LintJar {
|
||||
&mut self.jars_mut().2
|
||||
}
|
||||
}
|
||||
|
||||
impl HasJar<FormatJar> for Program {
|
||||
fn jar(&self) -> QueryResult<&FormatJar> {
|
||||
Ok(&self.jars()?.3)
|
||||
}
|
||||
|
||||
fn jar_mut(&mut self) -> &mut FormatJar {
|
||||
&mut self.jars_mut().3
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct FileChange {
|
||||
id: FileId,
|
||||
kind: FileChangeKind,
|
||||
}
|
||||
|
||||
impl FileChange {
|
||||
pub fn new(file_id: FileId, kind: FileChangeKind) -> Self {
|
||||
Self { id: file_id, kind }
|
||||
}
|
||||
|
||||
pub fn file_id(&self) -> FileId {
|
||||
self.id
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> FileChangeKind {
|
||||
self.kind
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum FileChangeKind {
|
||||
Created,
|
||||
Modified,
|
||||
Deleted,
|
||||
}
|
||||
96
crates/red_knot/src/source.rs
Normal file
96
crates/red_knot/src/source.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use crate::cache::KeyValueCache;
|
||||
use crate::db::{HasJar, QueryResult, SourceDb, SourceJar};
|
||||
use ruff_notebook::Notebook;
|
||||
use ruff_python_ast::PySourceType;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::files::FileId;
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(db))]
|
||||
pub(crate) fn source_text<Db>(db: &Db, file_id: FileId) -> QueryResult<Source>
|
||||
where
|
||||
Db: SourceDb + HasJar<SourceJar>,
|
||||
{
|
||||
let sources = &db.jar()?.sources;
|
||||
|
||||
sources.get(&file_id, |file_id| {
|
||||
let path = db.file_path(*file_id);
|
||||
|
||||
let source_text = std::fs::read_to_string(&path).unwrap_or_else(|err| {
|
||||
tracing::error!("Failed to read file '{path:?}: {err}'. Falling back to empty text");
|
||||
String::new()
|
||||
});
|
||||
|
||||
let python_ty = PySourceType::from(&path);
|
||||
|
||||
let kind = match python_ty {
|
||||
PySourceType::Python => {
|
||||
SourceKind::Python(Arc::from(source_text))
|
||||
}
|
||||
PySourceType::Stub => SourceKind::Stub(Arc::from(source_text)),
|
||||
PySourceType::Ipynb => {
|
||||
let notebook = Notebook::from_source_code(&source_text).unwrap_or_else(|err| {
|
||||
// TODO should this be changed to never fail?
|
||||
// or should we instead add a diagnostic somewhere? But what would we return in this case?
|
||||
tracing::error!(
|
||||
"Failed to parse notebook '{path:?}: {err}'. Falling back to an empty notebook"
|
||||
);
|
||||
Notebook::from_source_code("").unwrap()
|
||||
});
|
||||
|
||||
SourceKind::IpyNotebook(Arc::new(notebook))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Source { kind })
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SourceKind {
|
||||
Python(Arc<str>),
|
||||
Stub(Arc<str>),
|
||||
IpyNotebook(Arc<Notebook>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct Source {
|
||||
kind: SourceKind,
|
||||
}
|
||||
|
||||
impl Source {
|
||||
pub fn python<T: Into<Arc<str>>>(source: T) -> Self {
|
||||
Self {
|
||||
kind: SourceKind::Python(source.into()),
|
||||
}
|
||||
}
|
||||
pub fn kind(&self) -> &SourceKind {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
pub fn text(&self) -> &str {
|
||||
match &self.kind {
|
||||
SourceKind::Python(text) => text,
|
||||
SourceKind::Stub(text) => text,
|
||||
SourceKind::IpyNotebook(notebook) => notebook.source_code(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SourceStorage(pub(crate) KeyValueCache<FileId, Source>);
|
||||
|
||||
impl Deref for SourceStorage {
|
||||
type Target = KeyValueCache<FileId, Source>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for SourceStorage {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
939
crates/red_knot/src/symbols.rs
Normal file
939
crates/red_knot/src/symbols.rs
Normal file
@@ -0,0 +1,939 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::iter::{Copied, DoubleEndedIterator, FusedIterator};
|
||||
use std::num::NonZeroU32;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::Arc;
|
||||
|
||||
use bitflags::bitflags;
|
||||
use hashbrown::hash_map::{Keys, RawEntryMut};
|
||||
use rustc_hash::{FxHashMap, FxHasher};
|
||||
|
||||
use ruff_index::{newtype_index, IndexVec};
|
||||
use ruff_python_ast as ast;
|
||||
use ruff_python_ast::visitor::preorder::PreorderVisitor;
|
||||
|
||||
use crate::ast_ids::TypedNodeKey;
|
||||
use crate::cache::KeyValueCache;
|
||||
use crate::db::{HasJar, QueryResult, SemanticDb, SemanticJar};
|
||||
use crate::files::FileId;
|
||||
use crate::module::ModuleName;
|
||||
use crate::Name;
|
||||
|
||||
#[allow(unreachable_pub)]
|
||||
#[tracing::instrument(level = "debug", skip(db))]
|
||||
pub fn symbol_table<Db>(db: &Db, file_id: FileId) -> QueryResult<Arc<SymbolTable>>
|
||||
where
|
||||
Db: SemanticDb + HasJar<SemanticJar>,
|
||||
{
|
||||
let jar = db.jar()?;
|
||||
|
||||
jar.symbol_tables.get(&file_id, |_| {
|
||||
let parsed = db.parse(file_id)?;
|
||||
Ok(Arc::from(SymbolTable::from_ast(parsed.ast())))
|
||||
})
|
||||
}
|
||||
|
||||
type Map<K, V> = hashbrown::HashMap<K, V, ()>;
|
||||
|
||||
#[newtype_index]
|
||||
pub(crate) struct ScopeId;
|
||||
|
||||
impl ScopeId {
|
||||
pub(crate) fn scope(self, table: &SymbolTable) -> &Scope {
|
||||
&table.scopes_by_id[self]
|
||||
}
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
pub struct SymbolId;
|
||||
|
||||
impl SymbolId {
|
||||
pub(crate) fn symbol(self, table: &SymbolTable) -> &Symbol {
|
||||
&table.symbols_by_id[self]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub(crate) enum ScopeKind {
|
||||
Module,
|
||||
Annotation,
|
||||
Class,
|
||||
Function,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Scope {
|
||||
name: Name,
|
||||
kind: ScopeKind,
|
||||
child_scopes: Vec<ScopeId>,
|
||||
// symbol IDs, hashed by symbol name
|
||||
symbols_by_name: Map<SymbolId, ()>,
|
||||
}
|
||||
|
||||
impl Scope {
|
||||
pub(crate) fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
|
||||
pub(crate) fn kind(&self) -> ScopeKind {
|
||||
self.kind
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum Kind {
|
||||
FreeVar,
|
||||
CellVar,
|
||||
CellVarAssigned,
|
||||
ExplicitGlobal,
|
||||
ImplicitGlobal,
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
#[derive(Copy,Clone,Debug)]
|
||||
pub(crate) struct SymbolFlags: u8 {
|
||||
const IS_USED = 1 << 0;
|
||||
const IS_DEFINED = 1 << 1;
|
||||
/// TODO: This flag is not yet set by anything
|
||||
const MARKED_GLOBAL = 1 << 2;
|
||||
/// TODO: This flag is not yet set by anything
|
||||
const MARKED_NONLOCAL = 1 << 3;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Symbol {
|
||||
name: Name,
|
||||
flags: SymbolFlags,
|
||||
// kind: Kind,
|
||||
}
|
||||
|
||||
impl Symbol {
|
||||
pub(crate) fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
|
||||
/// Is the symbol used in its containing scope?
|
||||
pub(crate) fn is_used(&self) -> bool {
|
||||
self.flags.contains(SymbolFlags::IS_USED)
|
||||
}
|
||||
|
||||
/// Is the symbol defined in its containing scope?
|
||||
pub(crate) fn is_defined(&self) -> bool {
|
||||
self.flags.contains(SymbolFlags::IS_DEFINED)
|
||||
}
|
||||
|
||||
// TODO: implement Symbol.kind 2-pass analysis to categorize as: free-var, cell-var,
|
||||
// explicit-global, implicit-global and implement Symbol.kind by modifying the preorder
|
||||
// traversal code
|
||||
}
|
||||
|
||||
// TODO storing TypedNodeKey for definitions means we have to search to find them again in the AST;
|
||||
// this is at best O(log n). If looking up definitions is a bottleneck we should look for
|
||||
// alternatives here.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) enum Definition {
|
||||
// For the import cases, we don't need reference to any arbitrary AST subtrees (annotations,
|
||||
// RHS), and referencing just the import statement node is imprecise (a single import statement
|
||||
// can assign many symbols, we'd have to re-search for the one we care about), so we just copy
|
||||
// the small amount of information we need from the AST.
|
||||
Import(ImportDefinition),
|
||||
ImportFrom(ImportFromDefinition),
|
||||
ClassDef(TypedNodeKey<ast::StmtClassDef>),
|
||||
FunctionDef(TypedNodeKey<ast::StmtFunctionDef>),
|
||||
Assignment(TypedNodeKey<ast::StmtAssign>),
|
||||
AnnotatedAssignment(TypedNodeKey<ast::StmtAnnAssign>),
|
||||
// TODO with statements, except handlers, function args...
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct ImportDefinition {
|
||||
pub(crate) module: ModuleName,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct ImportFromDefinition {
|
||||
pub(crate) module: Option<ModuleName>,
|
||||
pub(crate) name: Name,
|
||||
pub(crate) level: u32,
|
||||
}
|
||||
|
||||
impl ImportFromDefinition {
|
||||
pub(crate) fn module(&self) -> Option<&ModuleName> {
|
||||
self.module.as_ref()
|
||||
}
|
||||
|
||||
pub(crate) fn name(&self) -> &Name {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub(crate) fn level(&self) -> u32 {
|
||||
self.level
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Dependency {
|
||||
Module(ModuleName),
|
||||
Relative {
|
||||
level: NonZeroU32,
|
||||
module: Option<ModuleName>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Table of all symbols in all scopes for a module.
|
||||
#[derive(Debug)]
|
||||
pub struct SymbolTable {
|
||||
scopes_by_id: IndexVec<ScopeId, Scope>,
|
||||
symbols_by_id: IndexVec<SymbolId, Symbol>,
|
||||
defs: FxHashMap<SymbolId, Vec<Definition>>,
|
||||
dependencies: Vec<Dependency>,
|
||||
}
|
||||
|
||||
impl SymbolTable {
|
||||
pub(crate) fn from_ast(module: &ast::ModModule) -> Self {
|
||||
let root_scope_id = SymbolTable::root_scope_id();
|
||||
let mut builder = SymbolTableBuilder {
|
||||
table: SymbolTable::new(),
|
||||
scopes: vec![root_scope_id],
|
||||
current_definition: None,
|
||||
};
|
||||
builder.visit_body(&module.body);
|
||||
builder.table
|
||||
}
|
||||
|
||||
pub(crate) fn new() -> Self {
|
||||
let mut table = SymbolTable {
|
||||
scopes_by_id: IndexVec::new(),
|
||||
symbols_by_id: IndexVec::new(),
|
||||
defs: FxHashMap::default(),
|
||||
dependencies: Vec::new(),
|
||||
};
|
||||
table.scopes_by_id.push(Scope {
|
||||
name: Name::new("<module>"),
|
||||
kind: ScopeKind::Module,
|
||||
child_scopes: Vec::new(),
|
||||
symbols_by_name: Map::default(),
|
||||
});
|
||||
table
|
||||
}
|
||||
|
||||
pub(crate) fn dependencies(&self) -> &[Dependency] {
|
||||
&self.dependencies
|
||||
}
|
||||
|
||||
pub(crate) const fn root_scope_id() -> ScopeId {
|
||||
ScopeId::from_usize(0)
|
||||
}
|
||||
|
||||
pub(crate) fn root_scope(&self) -> &Scope {
|
||||
&self.scopes_by_id[SymbolTable::root_scope_id()]
|
||||
}
|
||||
|
||||
pub(crate) fn symbol_ids_for_scope(&self, scope_id: ScopeId) -> Copied<Keys<SymbolId, ()>> {
|
||||
self.scopes_by_id[scope_id].symbols_by_name.keys().copied()
|
||||
}
|
||||
|
||||
pub(crate) fn symbols_for_scope(
|
||||
&self,
|
||||
scope_id: ScopeId,
|
||||
) -> SymbolIterator<Copied<Keys<SymbolId, ()>>> {
|
||||
SymbolIterator {
|
||||
table: self,
|
||||
ids: self.symbol_ids_for_scope(scope_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn root_symbol_ids(&self) -> Copied<Keys<SymbolId, ()>> {
|
||||
self.symbol_ids_for_scope(SymbolTable::root_scope_id())
|
||||
}
|
||||
|
||||
pub(crate) fn root_symbols(&self) -> SymbolIterator<Copied<Keys<SymbolId, ()>>> {
|
||||
self.symbols_for_scope(SymbolTable::root_scope_id())
|
||||
}
|
||||
|
||||
pub(crate) fn child_scope_ids_of(&self, scope_id: ScopeId) -> &[ScopeId] {
|
||||
&self.scopes_by_id[scope_id].child_scopes
|
||||
}
|
||||
|
||||
pub(crate) fn child_scopes_of(&self, scope_id: ScopeId) -> ScopeIterator<&[ScopeId]> {
|
||||
ScopeIterator {
|
||||
table: self,
|
||||
ids: self.child_scope_ids_of(scope_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn root_child_scope_ids(&self) -> &[ScopeId] {
|
||||
self.child_scope_ids_of(SymbolTable::root_scope_id())
|
||||
}
|
||||
|
||||
pub(crate) fn root_child_scopes(&self) -> ScopeIterator<&[ScopeId]> {
|
||||
self.child_scopes_of(SymbolTable::root_scope_id())
|
||||
}
|
||||
|
||||
pub(crate) fn symbol_id_by_name(&self, scope_id: ScopeId, name: &str) -> Option<SymbolId> {
|
||||
let scope = &self.scopes_by_id[scope_id];
|
||||
let hash = SymbolTable::hash_name(name);
|
||||
let name = Name::new(name);
|
||||
scope
|
||||
.symbols_by_name
|
||||
.raw_entry()
|
||||
.from_hash(hash, |symid| self.symbols_by_id[*symid].name == name)
|
||||
.map(|(symbol_id, ())| *symbol_id)
|
||||
}
|
||||
|
||||
pub(crate) fn symbol_by_name(&self, scope_id: ScopeId, name: &str) -> Option<&Symbol> {
|
||||
Some(&self.symbols_by_id[self.symbol_id_by_name(scope_id, name)?])
|
||||
}
|
||||
|
||||
pub(crate) fn root_symbol_id_by_name(&self, name: &str) -> Option<SymbolId> {
|
||||
self.symbol_id_by_name(SymbolTable::root_scope_id(), name)
|
||||
}
|
||||
|
||||
pub(crate) fn root_symbol_by_name(&self, name: &str) -> Option<&Symbol> {
|
||||
self.symbol_by_name(SymbolTable::root_scope_id(), name)
|
||||
}
|
||||
|
||||
pub(crate) fn definitions(&self, symbol_id: SymbolId) -> &[Definition] {
|
||||
self.defs
|
||||
.get(&symbol_id)
|
||||
.map(std::vec::Vec::as_slice)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub(crate) fn all_definitions(&self) -> impl Iterator<Item = (SymbolId, &Definition)> + '_ {
|
||||
self.defs
|
||||
.iter()
|
||||
.flat_map(|(sym_id, defs)| defs.iter().map(move |def| (*sym_id, def)))
|
||||
}
|
||||
|
||||
fn add_or_update_symbol(
|
||||
&mut self,
|
||||
scope_id: ScopeId,
|
||||
name: &str,
|
||||
flags: SymbolFlags,
|
||||
) -> SymbolId {
|
||||
let hash = SymbolTable::hash_name(name);
|
||||
let scope = &mut self.scopes_by_id[scope_id];
|
||||
let name = Name::new(name);
|
||||
|
||||
let entry = scope
|
||||
.symbols_by_name
|
||||
.raw_entry_mut()
|
||||
.from_hash(hash, |existing| self.symbols_by_id[*existing].name == name);
|
||||
|
||||
match entry {
|
||||
RawEntryMut::Occupied(entry) => {
|
||||
if let Some(symbol) = self.symbols_by_id.get_mut(*entry.key()) {
|
||||
symbol.flags.insert(flags);
|
||||
};
|
||||
*entry.key()
|
||||
}
|
||||
RawEntryMut::Vacant(entry) => {
|
||||
let id = self.symbols_by_id.push(Symbol { name, flags });
|
||||
entry.insert_with_hasher(hash, id, (), |_| hash);
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn add_child_scope(
|
||||
&mut self,
|
||||
parent_scope_id: ScopeId,
|
||||
name: &str,
|
||||
kind: ScopeKind,
|
||||
) -> ScopeId {
|
||||
let new_scope_id = self.scopes_by_id.push(Scope {
|
||||
name: Name::new(name),
|
||||
kind,
|
||||
child_scopes: Vec::new(),
|
||||
symbols_by_name: Map::default(),
|
||||
});
|
||||
let parent_scope = &mut self.scopes_by_id[parent_scope_id];
|
||||
parent_scope.child_scopes.push(new_scope_id);
|
||||
new_scope_id
|
||||
}
|
||||
|
||||
fn hash_name(name: &str) -> u64 {
|
||||
let mut hasher = FxHasher::default();
|
||||
name.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct SymbolIterator<'a, I> {
|
||||
table: &'a SymbolTable,
|
||||
ids: I,
|
||||
}
|
||||
|
||||
impl<'a, I> Iterator for SymbolIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = SymbolId>,
|
||||
{
|
||||
type Item = &'a Symbol;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let id = self.ids.next()?;
|
||||
Some(&self.table.symbols_by_id[id])
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.ids.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, I> FusedIterator for SymbolIterator<'a, I> where
|
||||
I: Iterator<Item = SymbolId> + FusedIterator
|
||||
{
|
||||
}
|
||||
|
||||
impl<'a, I> DoubleEndedIterator for SymbolIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = SymbolId> + DoubleEndedIterator,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
let id = self.ids.next_back()?;
|
||||
Some(&self.table.symbols_by_id[id])
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ScopeIterator<'a, I> {
|
||||
table: &'a SymbolTable,
|
||||
ids: I,
|
||||
}
|
||||
|
||||
impl<'a, I> Iterator for ScopeIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = ScopeId>,
|
||||
{
|
||||
type Item = &'a Scope;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let id = self.ids.next()?;
|
||||
Some(&self.table.scopes_by_id[id])
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.ids.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, I> FusedIterator for ScopeIterator<'a, I> where I: Iterator<Item = ScopeId> + FusedIterator {}
|
||||
|
||||
impl<'a, I> DoubleEndedIterator for ScopeIterator<'a, I>
|
||||
where
|
||||
I: Iterator<Item = ScopeId> + DoubleEndedIterator,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
let id = self.ids.next_back()?;
|
||||
Some(&self.table.scopes_by_id[id])
|
||||
}
|
||||
}
|
||||
|
||||
struct SymbolTableBuilder {
|
||||
table: SymbolTable,
|
||||
scopes: Vec<ScopeId>,
|
||||
/// the definition whose target(s) we are currently walking
|
||||
current_definition: Option<Definition>,
|
||||
}
|
||||
|
||||
impl SymbolTableBuilder {
|
||||
fn add_or_update_symbol(&mut self, identifier: &str, flags: SymbolFlags) -> SymbolId {
|
||||
self.table
|
||||
.add_or_update_symbol(self.cur_scope(), identifier, flags)
|
||||
}
|
||||
|
||||
fn add_or_update_symbol_with_def(
|
||||
&mut self,
|
||||
identifier: &str,
|
||||
definition: Definition,
|
||||
) -> SymbolId {
|
||||
let symbol_id = self.add_or_update_symbol(identifier, SymbolFlags::IS_DEFINED);
|
||||
self.table
|
||||
.defs
|
||||
.entry(symbol_id)
|
||||
.or_default()
|
||||
.push(definition);
|
||||
symbol_id
|
||||
}
|
||||
|
||||
fn push_scope(&mut self, child_of: ScopeId, name: &str, kind: ScopeKind) -> ScopeId {
|
||||
let scope_id = self.table.add_child_scope(child_of, name, kind);
|
||||
self.scopes.push(scope_id);
|
||||
scope_id
|
||||
}
|
||||
|
||||
fn pop_scope(&mut self) -> ScopeId {
|
||||
self.scopes
|
||||
.pop()
|
||||
.expect("Scope stack should never be empty")
|
||||
}
|
||||
|
||||
fn cur_scope(&self) -> ScopeId {
|
||||
*self
|
||||
.scopes
|
||||
.last()
|
||||
.expect("Scope stack should never be empty")
|
||||
}
|
||||
|
||||
fn with_type_params(
|
||||
&mut self,
|
||||
name: &str,
|
||||
params: &Option<Box<ast::TypeParams>>,
|
||||
nested: impl FnOnce(&mut Self),
|
||||
) {
|
||||
if let Some(type_params) = params {
|
||||
self.push_scope(self.cur_scope(), name, ScopeKind::Annotation);
|
||||
for type_param in &type_params.type_params {
|
||||
let name = match type_param {
|
||||
ast::TypeParam::TypeVar(ast::TypeParamTypeVar { name, .. }) => name,
|
||||
ast::TypeParam::ParamSpec(ast::TypeParamParamSpec { name, .. }) => name,
|
||||
ast::TypeParam::TypeVarTuple(ast::TypeParamTypeVarTuple { name, .. }) => name,
|
||||
};
|
||||
self.add_or_update_symbol(name, SymbolFlags::IS_DEFINED);
|
||||
}
|
||||
}
|
||||
nested(self);
|
||||
if params.is_some() {
|
||||
self.pop_scope();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PreorderVisitor<'_> for SymbolTableBuilder {
|
||||
fn visit_expr(&mut self, expr: &ast::Expr) {
|
||||
if let ast::Expr::Name(ast::ExprName { id, ctx, .. }) = expr {
|
||||
let flags = match ctx {
|
||||
ast::ExprContext::Load => SymbolFlags::IS_USED,
|
||||
ast::ExprContext::Store => SymbolFlags::IS_DEFINED,
|
||||
ast::ExprContext::Del => SymbolFlags::IS_DEFINED,
|
||||
ast::ExprContext::Invalid => SymbolFlags::empty(),
|
||||
};
|
||||
self.add_or_update_symbol(id, flags);
|
||||
if flags.contains(SymbolFlags::IS_DEFINED) {
|
||||
if let Some(curdef) = self.current_definition.clone() {
|
||||
self.add_or_update_symbol_with_def(id, curdef);
|
||||
}
|
||||
}
|
||||
}
|
||||
ast::visitor::preorder::walk_expr(self, expr);
|
||||
}
|
||||
|
||||
fn visit_stmt(&mut self, stmt: &ast::Stmt) {
|
||||
// TODO need to capture more definition statements here
|
||||
match stmt {
|
||||
ast::Stmt::ClassDef(node) => {
|
||||
let def = Definition::ClassDef(TypedNodeKey::from_node(node));
|
||||
self.add_or_update_symbol_with_def(&node.name, def);
|
||||
self.with_type_params(&node.name, &node.type_params, |builder| {
|
||||
builder.push_scope(builder.cur_scope(), &node.name, ScopeKind::Class);
|
||||
ast::visitor::preorder::walk_stmt(builder, stmt);
|
||||
builder.pop_scope();
|
||||
});
|
||||
}
|
||||
ast::Stmt::FunctionDef(node) => {
|
||||
let def = Definition::FunctionDef(TypedNodeKey::from_node(node));
|
||||
self.add_or_update_symbol_with_def(&node.name, def);
|
||||
self.with_type_params(&node.name, &node.type_params, |builder| {
|
||||
builder.push_scope(builder.cur_scope(), &node.name, ScopeKind::Function);
|
||||
ast::visitor::preorder::walk_stmt(builder, stmt);
|
||||
builder.pop_scope();
|
||||
});
|
||||
}
|
||||
ast::Stmt::Import(ast::StmtImport { names, .. }) => {
|
||||
for alias in names {
|
||||
let symbol_name = if let Some(asname) = &alias.asname {
|
||||
asname.id.as_str()
|
||||
} else {
|
||||
alias.name.id.split('.').next().unwrap()
|
||||
};
|
||||
|
||||
let module = ModuleName::new(&alias.name.id);
|
||||
|
||||
let def = Definition::Import(ImportDefinition {
|
||||
module: module.clone(),
|
||||
});
|
||||
self.add_or_update_symbol_with_def(symbol_name, def);
|
||||
self.table.dependencies.push(Dependency::Module(module));
|
||||
}
|
||||
}
|
||||
ast::Stmt::ImportFrom(ast::StmtImportFrom {
|
||||
module,
|
||||
names,
|
||||
level,
|
||||
..
|
||||
}) => {
|
||||
let module = module.as_ref().map(|m| ModuleName::new(&m.id));
|
||||
|
||||
for alias in names {
|
||||
let symbol_name = if let Some(asname) = &alias.asname {
|
||||
asname.id.as_str()
|
||||
} else {
|
||||
alias.name.id.as_str()
|
||||
};
|
||||
let def = Definition::ImportFrom(ImportFromDefinition {
|
||||
module: module.clone(),
|
||||
name: Name::new(&alias.name.id),
|
||||
level: *level,
|
||||
});
|
||||
self.add_or_update_symbol_with_def(symbol_name, def);
|
||||
}
|
||||
|
||||
let dependency = if let Some(module) = module {
|
||||
match NonZeroU32::new(*level) {
|
||||
Some(level) => Dependency::Relative {
|
||||
level,
|
||||
module: Some(module),
|
||||
},
|
||||
None => Dependency::Module(module),
|
||||
}
|
||||
} else {
|
||||
Dependency::Relative {
|
||||
level: NonZeroU32::new(*level)
|
||||
.expect("Import without a module to have a level > 0"),
|
||||
module,
|
||||
}
|
||||
};
|
||||
|
||||
self.table.dependencies.push(dependency);
|
||||
}
|
||||
ast::Stmt::Assign(node) => {
|
||||
debug_assert!(self.current_definition.is_none());
|
||||
self.current_definition =
|
||||
Some(Definition::Assignment(TypedNodeKey::from_node(node)));
|
||||
ast::visitor::preorder::walk_stmt(self, stmt);
|
||||
self.current_definition = None;
|
||||
}
|
||||
_ => {
|
||||
ast::visitor::preorder::walk_stmt(self, stmt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SymbolTablesStorage(KeyValueCache<FileId, Arc<SymbolTable>>);
|
||||
|
||||
impl Deref for SymbolTablesStorage {
|
||||
type Target = KeyValueCache<FileId, Arc<SymbolTable>>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for SymbolTablesStorage {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use textwrap::dedent;
|
||||
|
||||
use crate::parse::Parsed;
|
||||
use crate::symbols::ScopeKind;
|
||||
|
||||
use super::{SymbolFlags, SymbolId, SymbolIterator, SymbolTable};
|
||||
|
||||
mod from_ast {
|
||||
use super::*;
|
||||
|
||||
fn parse(code: &str) -> Parsed {
|
||||
Parsed::from_text(&dedent(code))
|
||||
}
|
||||
|
||||
fn names<I>(it: SymbolIterator<I>) -> Vec<&str>
|
||||
where
|
||||
I: Iterator<Item = SymbolId>,
|
||||
{
|
||||
let mut symbols: Vec<_> = it.map(|sym| sym.name.as_str()).collect();
|
||||
symbols.sort_unstable();
|
||||
symbols
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let parsed = parse("");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()).len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let parsed = parse("x");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["x"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("x").unwrap())
|
||||
.len(),
|
||||
0
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn annotation_only() {
|
||||
let parsed = parse("x: int");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["int", "x"]);
|
||||
// TODO record definition
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import() {
|
||||
let parsed = parse("import foo");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["foo"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("foo").unwrap())
|
||||
.len(),
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_sub() {
|
||||
let parsed = parse("import foo.bar");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["foo"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_as() {
|
||||
let parsed = parse("import foo.bar as baz");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["baz"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_from() {
|
||||
let parsed = parse("from bar import foo");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["foo"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("foo").unwrap())
|
||||
.len(),
|
||||
1
|
||||
);
|
||||
assert!(
|
||||
table.root_symbol_id_by_name("foo").is_some_and(|sid| {
|
||||
let s = sid.symbol(&table);
|
||||
s.is_defined() || !s.is_used()
|
||||
}),
|
||||
"symbols that are defined get the defined flag"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn assign() {
|
||||
let parsed = parse("x = foo");
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["foo", "x"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("x").unwrap())
|
||||
.len(),
|
||||
1
|
||||
);
|
||||
assert!(
|
||||
table.root_symbol_id_by_name("foo").is_some_and(|sid| {
|
||||
let s = sid.symbol(&table);
|
||||
!s.is_defined() && s.is_used()
|
||||
}),
|
||||
"a symbol used but not defined in a scope should have only the used flag"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn class_scope() {
|
||||
let parsed = parse(
|
||||
"
|
||||
class C:
|
||||
x = 1
|
||||
y = 2
|
||||
",
|
||||
);
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["C", "y"]);
|
||||
let scopes = table.root_child_scope_ids();
|
||||
assert_eq!(scopes.len(), 1);
|
||||
let c_scope = scopes[0].scope(&table);
|
||||
assert_eq!(c_scope.kind(), ScopeKind::Class);
|
||||
assert_eq!(c_scope.name(), "C");
|
||||
assert_eq!(names(table.symbols_for_scope(scopes[0])), vec!["x"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("C").unwrap())
|
||||
.len(),
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn func_scope() {
|
||||
let parsed = parse(
|
||||
"
|
||||
def func():
|
||||
x = 1
|
||||
y = 2
|
||||
",
|
||||
);
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["func", "y"]);
|
||||
let scopes = table.root_child_scope_ids();
|
||||
assert_eq!(scopes.len(), 1);
|
||||
let func_scope = scopes[0].scope(&table);
|
||||
assert_eq!(func_scope.kind(), ScopeKind::Function);
|
||||
assert_eq!(func_scope.name(), "func");
|
||||
assert_eq!(names(table.symbols_for_scope(scopes[0])), vec!["x"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("func").unwrap())
|
||||
.len(),
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dupes() {
|
||||
let parsed = parse(
|
||||
"
|
||||
def func():
|
||||
x = 1
|
||||
def func():
|
||||
y = 2
|
||||
",
|
||||
);
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["func"]);
|
||||
let scopes = table.root_child_scope_ids();
|
||||
assert_eq!(scopes.len(), 2);
|
||||
let func_scope_1 = scopes[0].scope(&table);
|
||||
let func_scope_2 = scopes[1].scope(&table);
|
||||
assert_eq!(func_scope_1.kind(), ScopeKind::Function);
|
||||
assert_eq!(func_scope_1.name(), "func");
|
||||
assert_eq!(func_scope_2.kind(), ScopeKind::Function);
|
||||
assert_eq!(func_scope_2.name(), "func");
|
||||
assert_eq!(names(table.symbols_for_scope(scopes[0])), vec!["x"]);
|
||||
assert_eq!(names(table.symbols_for_scope(scopes[1])), vec!["y"]);
|
||||
assert_eq!(
|
||||
table
|
||||
.definitions(table.root_symbol_id_by_name("func").unwrap())
|
||||
.len(),
|
||||
2
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generic_func() {
|
||||
let parsed = parse(
|
||||
"
|
||||
def func[T]():
|
||||
x = 1
|
||||
",
|
||||
);
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["func"]);
|
||||
let scopes = table.root_child_scope_ids();
|
||||
assert_eq!(scopes.len(), 1);
|
||||
let ann_scope_id = scopes[0];
|
||||
let ann_scope = ann_scope_id.scope(&table);
|
||||
assert_eq!(ann_scope.kind(), ScopeKind::Annotation);
|
||||
assert_eq!(ann_scope.name(), "func");
|
||||
assert_eq!(names(table.symbols_for_scope(ann_scope_id)), vec!["T"]);
|
||||
let scopes = table.child_scope_ids_of(ann_scope_id);
|
||||
assert_eq!(scopes.len(), 1);
|
||||
let func_scope_id = scopes[0];
|
||||
let func_scope = func_scope_id.scope(&table);
|
||||
assert_eq!(func_scope.kind(), ScopeKind::Function);
|
||||
assert_eq!(func_scope.name(), "func");
|
||||
assert_eq!(names(table.symbols_for_scope(func_scope_id)), vec!["x"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generic_class() {
|
||||
let parsed = parse(
|
||||
"
|
||||
class C[T]:
|
||||
x = 1
|
||||
",
|
||||
);
|
||||
let table = SymbolTable::from_ast(parsed.ast());
|
||||
assert_eq!(names(table.root_symbols()), vec!["C"]);
|
||||
let scopes = table.root_child_scope_ids();
|
||||
assert_eq!(scopes.len(), 1);
|
||||
let ann_scope_id = scopes[0];
|
||||
let ann_scope = ann_scope_id.scope(&table);
|
||||
assert_eq!(ann_scope.kind(), ScopeKind::Annotation);
|
||||
assert_eq!(ann_scope.name(), "C");
|
||||
assert_eq!(names(table.symbols_for_scope(ann_scope_id)), vec!["T"]);
|
||||
assert!(
|
||||
table
|
||||
.symbol_by_name(ann_scope_id, "T")
|
||||
.is_some_and(|s| s.is_defined() && !s.is_used()),
|
||||
"type parameters are defined by the scope that introduces them"
|
||||
);
|
||||
let scopes = table.child_scope_ids_of(ann_scope_id);
|
||||
assert_eq!(scopes.len(), 1);
|
||||
let func_scope_id = scopes[0];
|
||||
let func_scope = func_scope_id.scope(&table);
|
||||
assert_eq!(func_scope.kind(), ScopeKind::Class);
|
||||
assert_eq!(func_scope.name(), "C");
|
||||
assert_eq!(names(table.symbols_for_scope(func_scope_id)), vec!["x"]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_same_name_symbol_twice() {
|
||||
let mut table = SymbolTable::new();
|
||||
let root_scope_id = SymbolTable::root_scope_id();
|
||||
let symbol_id_1 = table.add_or_update_symbol(root_scope_id, "foo", SymbolFlags::IS_DEFINED);
|
||||
let symbol_id_2 = table.add_or_update_symbol(root_scope_id, "foo", SymbolFlags::IS_USED);
|
||||
assert_eq!(symbol_id_1, symbol_id_2);
|
||||
assert!(symbol_id_1.symbol(&table).is_used(), "flags must merge");
|
||||
assert!(symbol_id_1.symbol(&table).is_defined(), "flags must merge");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_different_named_symbols() {
|
||||
let mut table = SymbolTable::new();
|
||||
let root_scope_id = SymbolTable::root_scope_id();
|
||||
let symbol_id_1 = table.add_or_update_symbol(root_scope_id, "foo", SymbolFlags::empty());
|
||||
let symbol_id_2 = table.add_or_update_symbol(root_scope_id, "bar", SymbolFlags::empty());
|
||||
assert_ne!(symbol_id_1, symbol_id_2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_child_scope_with_symbol() {
|
||||
let mut table = SymbolTable::new();
|
||||
let root_scope_id = SymbolTable::root_scope_id();
|
||||
let foo_symbol_top = table.add_or_update_symbol(root_scope_id, "foo", SymbolFlags::empty());
|
||||
let c_scope = table.add_child_scope(root_scope_id, "C", ScopeKind::Class);
|
||||
let foo_symbol_inner = table.add_or_update_symbol(c_scope, "foo", SymbolFlags::empty());
|
||||
assert_ne!(foo_symbol_top, foo_symbol_inner);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scope_from_id() {
|
||||
let table = SymbolTable::new();
|
||||
let root_scope_id = SymbolTable::root_scope_id();
|
||||
let scope = root_scope_id.scope(&table);
|
||||
assert_eq!(scope.name.as_str(), "<module>");
|
||||
assert_eq!(scope.kind, ScopeKind::Module);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn symbol_from_id() {
|
||||
let mut table = SymbolTable::new();
|
||||
let root_scope_id = SymbolTable::root_scope_id();
|
||||
let foo_symbol_id = table.add_or_update_symbol(root_scope_id, "foo", SymbolFlags::empty());
|
||||
let symbol = foo_symbol_id.symbol(&table);
|
||||
assert_eq!(symbol.name.as_str(), "foo");
|
||||
}
|
||||
}
|
||||
560
crates/red_knot/src/types.rs
Normal file
560
crates/red_knot/src/types.rs
Normal file
@@ -0,0 +1,560 @@
|
||||
#![allow(dead_code)]
|
||||
use crate::ast_ids::NodeKey;
|
||||
use crate::files::FileId;
|
||||
use crate::symbols::SymbolId;
|
||||
use crate::{FxDashMap, FxIndexSet, Name};
|
||||
use ruff_index::{newtype_index, IndexVec};
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
pub(crate) mod infer;
|
||||
|
||||
pub(crate) use infer::infer_symbol_type;
|
||||
|
||||
/// unique ID for a type
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum Type {
|
||||
/// the dynamic or gradual type: a statically-unknown set of values
|
||||
Any,
|
||||
/// the empty set of values
|
||||
Never,
|
||||
/// unknown type (no annotation)
|
||||
/// equivalent to Any, or to object in strict mode
|
||||
Unknown,
|
||||
/// name is not bound to any value
|
||||
Unbound,
|
||||
/// a specific function object
|
||||
Function(FunctionTypeId),
|
||||
/// a specific class object
|
||||
Class(ClassTypeId),
|
||||
/// the set of Python objects with the given class in their __class__'s method resolution order
|
||||
Instance(ClassTypeId),
|
||||
Union(UnionTypeId),
|
||||
Intersection(IntersectionTypeId),
|
||||
// TODO protocols, callable types, overloads, generics, type vars
|
||||
}
|
||||
|
||||
impl Type {
|
||||
fn display<'a>(&'a self, store: &'a TypeStore) -> DisplayType<'a> {
|
||||
DisplayType { ty: self, store }
|
||||
}
|
||||
|
||||
pub const fn is_unbound(&self) -> bool {
|
||||
matches!(self, Type::Unbound)
|
||||
}
|
||||
|
||||
pub const fn is_unknown(&self) -> bool {
|
||||
matches!(self, Type::Unknown)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FunctionTypeId> for Type {
|
||||
fn from(id: FunctionTypeId) -> Self {
|
||||
Type::Function(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<UnionTypeId> for Type {
|
||||
fn from(id: UnionTypeId) -> Self {
|
||||
Type::Union(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IntersectionTypeId> for Type {
|
||||
fn from(id: IntersectionTypeId) -> Self {
|
||||
Type::Intersection(id)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: currently calling `get_function` et al and holding on to the `FunctionTypeRef` will lock a
|
||||
// shard of this dashmap, for as long as you hold the reference. This may be a problem. We could
|
||||
// switch to having all the arenas hold Arc, or we could see if we can split up ModuleTypeStore,
|
||||
// and/or give it inner mutability and finer-grained internal locking.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TypeStore {
|
||||
modules: FxDashMap<FileId, ModuleTypeStore>,
|
||||
}
|
||||
|
||||
impl TypeStore {
|
||||
pub fn remove_module(&mut self, file_id: FileId) {
|
||||
self.modules.remove(&file_id);
|
||||
}
|
||||
|
||||
pub fn cache_symbol_type(&self, file_id: FileId, symbol_id: SymbolId, ty: Type) {
|
||||
self.add_or_get_module(file_id)
|
||||
.symbol_types
|
||||
.insert(symbol_id, ty);
|
||||
}
|
||||
|
||||
pub fn cache_node_type(&self, file_id: FileId, node_key: NodeKey, ty: Type) {
|
||||
self.add_or_get_module(file_id)
|
||||
.node_types
|
||||
.insert(node_key, ty);
|
||||
}
|
||||
|
||||
pub fn get_cached_symbol_type(&self, file_id: FileId, symbol_id: SymbolId) -> Option<Type> {
|
||||
self.try_get_module(file_id)?
|
||||
.symbol_types
|
||||
.get(&symbol_id)
|
||||
.copied()
|
||||
}
|
||||
|
||||
pub fn get_cached_node_type(&self, file_id: FileId, node_key: &NodeKey) -> Option<Type> {
|
||||
self.try_get_module(file_id)?
|
||||
.node_types
|
||||
.get(node_key)
|
||||
.copied()
|
||||
}
|
||||
|
||||
fn add_or_get_module(&self, file_id: FileId) -> ModuleStoreRefMut {
|
||||
self.modules
|
||||
.entry(file_id)
|
||||
.or_insert_with(|| ModuleTypeStore::new(file_id))
|
||||
}
|
||||
|
||||
fn get_module(&self, file_id: FileId) -> ModuleStoreRef {
|
||||
self.try_get_module(file_id).expect("module should exist")
|
||||
}
|
||||
|
||||
fn try_get_module(&self, file_id: FileId) -> Option<ModuleStoreRef> {
|
||||
self.modules.get(&file_id)
|
||||
}
|
||||
|
||||
fn add_function(&self, file_id: FileId, name: &str) -> FunctionTypeId {
|
||||
self.add_or_get_module(file_id).add_function(name)
|
||||
}
|
||||
|
||||
fn add_class(&self, file_id: FileId, name: &str, bases: Vec<Type>) -> ClassTypeId {
|
||||
self.add_or_get_module(file_id).add_class(name, bases)
|
||||
}
|
||||
|
||||
fn add_union(&mut self, file_id: FileId, elems: &[Type]) -> UnionTypeId {
|
||||
self.add_or_get_module(file_id).add_union(elems)
|
||||
}
|
||||
|
||||
fn add_intersection(
|
||||
&mut self,
|
||||
file_id: FileId,
|
||||
positive: &[Type],
|
||||
negative: &[Type],
|
||||
) -> IntersectionTypeId {
|
||||
self.add_or_get_module(file_id)
|
||||
.add_intersection(positive, negative)
|
||||
}
|
||||
|
||||
fn get_function(&self, id: FunctionTypeId) -> FunctionTypeRef {
|
||||
FunctionTypeRef {
|
||||
module_store: self.get_module(id.file_id),
|
||||
function_id: id.func_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_class(&self, id: ClassTypeId) -> ClassTypeRef {
|
||||
ClassTypeRef {
|
||||
module_store: self.get_module(id.file_id),
|
||||
class_id: id.class_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_union(&self, id: UnionTypeId) -> UnionTypeRef {
|
||||
UnionTypeRef {
|
||||
module_store: self.get_module(id.file_id),
|
||||
union_id: id.union_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_intersection(&self, id: IntersectionTypeId) -> IntersectionTypeRef {
|
||||
IntersectionTypeRef {
|
||||
module_store: self.get_module(id.file_id),
|
||||
intersection_id: id.intersection_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ModuleStoreRef<'a> = dashmap::mapref::one::Ref<
|
||||
'a,
|
||||
FileId,
|
||||
ModuleTypeStore,
|
||||
std::hash::BuildHasherDefault<rustc_hash::FxHasher>,
|
||||
>;
|
||||
|
||||
type ModuleStoreRefMut<'a> = dashmap::mapref::one::RefMut<
|
||||
'a,
|
||||
FileId,
|
||||
ModuleTypeStore,
|
||||
std::hash::BuildHasherDefault<rustc_hash::FxHasher>,
|
||||
>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct FunctionTypeRef<'a> {
|
||||
module_store: ModuleStoreRef<'a>,
|
||||
function_id: ModuleFunctionTypeId,
|
||||
}
|
||||
|
||||
impl<'a> std::ops::Deref for FunctionTypeRef<'a> {
|
||||
type Target = FunctionType;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.module_store.get_function(self.function_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ClassTypeRef<'a> {
|
||||
module_store: ModuleStoreRef<'a>,
|
||||
class_id: ModuleClassTypeId,
|
||||
}
|
||||
|
||||
impl<'a> std::ops::Deref for ClassTypeRef<'a> {
|
||||
type Target = ClassType;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.module_store.get_class(self.class_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UnionTypeRef<'a> {
|
||||
module_store: ModuleStoreRef<'a>,
|
||||
union_id: ModuleUnionTypeId,
|
||||
}
|
||||
|
||||
impl<'a> std::ops::Deref for UnionTypeRef<'a> {
|
||||
type Target = UnionType;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.module_store.get_union(self.union_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct IntersectionTypeRef<'a> {
|
||||
module_store: ModuleStoreRef<'a>,
|
||||
intersection_id: ModuleIntersectionTypeId,
|
||||
}
|
||||
|
||||
impl<'a> std::ops::Deref for IntersectionTypeRef<'a> {
|
||||
type Target = IntersectionType;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.module_store.get_intersection(self.intersection_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct FunctionTypeId {
|
||||
file_id: FileId,
|
||||
func_id: ModuleFunctionTypeId,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct ClassTypeId {
|
||||
file_id: FileId,
|
||||
class_id: ModuleClassTypeId,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct UnionTypeId {
|
||||
file_id: FileId,
|
||||
union_id: ModuleUnionTypeId,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct IntersectionTypeId {
|
||||
file_id: FileId,
|
||||
intersection_id: ModuleIntersectionTypeId,
|
||||
}
|
||||
|
||||
#[newtype_index]
|
||||
struct ModuleFunctionTypeId;
|
||||
|
||||
#[newtype_index]
|
||||
struct ModuleClassTypeId;
|
||||
|
||||
#[newtype_index]
|
||||
struct ModuleUnionTypeId;
|
||||
|
||||
#[newtype_index]
|
||||
struct ModuleIntersectionTypeId;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ModuleTypeStore {
|
||||
file_id: FileId,
|
||||
/// arena of all function types defined in this module
|
||||
functions: IndexVec<ModuleFunctionTypeId, FunctionType>,
|
||||
/// arena of all class types defined in this module
|
||||
classes: IndexVec<ModuleClassTypeId, ClassType>,
|
||||
/// arenda of all union types created in this module
|
||||
unions: IndexVec<ModuleUnionTypeId, UnionType>,
|
||||
/// arena of all intersection types created in this module
|
||||
intersections: IndexVec<ModuleIntersectionTypeId, IntersectionType>,
|
||||
/// cached types of symbols in this module
|
||||
symbol_types: FxHashMap<SymbolId, Type>,
|
||||
/// cached types of AST nodes in this module
|
||||
node_types: FxHashMap<NodeKey, Type>,
|
||||
}
|
||||
|
||||
impl ModuleTypeStore {
|
||||
fn new(file_id: FileId) -> Self {
|
||||
Self {
|
||||
file_id,
|
||||
functions: IndexVec::default(),
|
||||
classes: IndexVec::default(),
|
||||
unions: IndexVec::default(),
|
||||
intersections: IndexVec::default(),
|
||||
symbol_types: FxHashMap::default(),
|
||||
node_types: FxHashMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_function(&mut self, name: &str) -> FunctionTypeId {
|
||||
let func_id = self.functions.push(FunctionType {
|
||||
name: Name::new(name),
|
||||
});
|
||||
FunctionTypeId {
|
||||
file_id: self.file_id,
|
||||
func_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_class(&mut self, name: &str, bases: Vec<Type>) -> ClassTypeId {
|
||||
let class_id = self.classes.push(ClassType {
|
||||
name: Name::new(name),
|
||||
// TODO: if no bases are given, that should imply [object]
|
||||
bases,
|
||||
});
|
||||
ClassTypeId {
|
||||
file_id: self.file_id,
|
||||
class_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_union(&mut self, elems: &[Type]) -> UnionTypeId {
|
||||
let union_id = self.unions.push(UnionType {
|
||||
elements: elems.iter().copied().collect(),
|
||||
});
|
||||
UnionTypeId {
|
||||
file_id: self.file_id,
|
||||
union_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_intersection(&mut self, positive: &[Type], negative: &[Type]) -> IntersectionTypeId {
|
||||
let intersection_id = self.intersections.push(IntersectionType {
|
||||
positive: positive.iter().copied().collect(),
|
||||
negative: negative.iter().copied().collect(),
|
||||
});
|
||||
IntersectionTypeId {
|
||||
file_id: self.file_id,
|
||||
intersection_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_function(&self, func_id: ModuleFunctionTypeId) -> &FunctionType {
|
||||
&self.functions[func_id]
|
||||
}
|
||||
|
||||
fn get_class(&self, class_id: ModuleClassTypeId) -> &ClassType {
|
||||
&self.classes[class_id]
|
||||
}
|
||||
|
||||
fn get_union(&self, union_id: ModuleUnionTypeId) -> &UnionType {
|
||||
&self.unions[union_id]
|
||||
}
|
||||
|
||||
fn get_intersection(&self, intersection_id: ModuleIntersectionTypeId) -> &IntersectionType {
|
||||
&self.intersections[intersection_id]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct DisplayType<'a> {
|
||||
ty: &'a Type,
|
||||
store: &'a TypeStore,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DisplayType<'_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.ty {
|
||||
Type::Any => f.write_str("Any"),
|
||||
Type::Never => f.write_str("Never"),
|
||||
Type::Unknown => f.write_str("Unknown"),
|
||||
Type::Unbound => f.write_str("Unbound"),
|
||||
// TODO functions and classes should display using a fully qualified name
|
||||
Type::Class(class_id) => {
|
||||
f.write_str("Literal[")?;
|
||||
f.write_str(self.store.get_class(*class_id).name())?;
|
||||
f.write_str("]")
|
||||
}
|
||||
Type::Instance(class_id) => f.write_str(self.store.get_class(*class_id).name()),
|
||||
Type::Function(func_id) => f.write_str(self.store.get_function(*func_id).name()),
|
||||
Type::Union(union_id) => self
|
||||
.store
|
||||
.get_module(union_id.file_id)
|
||||
.get_union(union_id.union_id)
|
||||
.display(f, self.store),
|
||||
Type::Intersection(int_id) => self
|
||||
.store
|
||||
.get_module(int_id.file_id)
|
||||
.get_intersection(int_id.intersection_id)
|
||||
.display(f, self.store),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ClassType {
|
||||
name: Name,
|
||||
bases: Vec<Type>,
|
||||
}
|
||||
|
||||
impl ClassType {
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
|
||||
fn bases(&self) -> &[Type] {
|
||||
self.bases.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct FunctionType {
|
||||
name: Name,
|
||||
}
|
||||
|
||||
impl FunctionType {
|
||||
fn name(&self) -> &str {
|
||||
self.name.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct UnionType {
|
||||
// the union type includes values in any of these types
|
||||
elements: FxIndexSet<Type>,
|
||||
}
|
||||
|
||||
impl UnionType {
|
||||
fn display(&self, f: &mut std::fmt::Formatter<'_>, store: &TypeStore) -> std::fmt::Result {
|
||||
f.write_str("(")?;
|
||||
let mut first = true;
|
||||
for ty in &self.elements {
|
||||
if !first {
|
||||
f.write_str(" | ")?;
|
||||
};
|
||||
first = false;
|
||||
write!(f, "{}", ty.display(store))?;
|
||||
}
|
||||
f.write_str(")")
|
||||
}
|
||||
}
|
||||
|
||||
// Negation types aren't expressible in annotations, and are most likely to arise from type
|
||||
// narrowing along with intersections (e.g. `if not isinstance(...)`), so we represent them
|
||||
// directly in intersections rather than as a separate type. This sacrifices some efficiency in the
|
||||
// case where a Not appears outside an intersection (unclear when that could even happen, but we'd
|
||||
// have to represent it as a single-element intersection if it did) in exchange for better
|
||||
// efficiency in the not-within-intersection case.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct IntersectionType {
|
||||
// the intersection type includes only values in all of these types
|
||||
positive: FxIndexSet<Type>,
|
||||
// negated elements of the intersection, e.g.
|
||||
negative: FxIndexSet<Type>,
|
||||
}
|
||||
|
||||
impl IntersectionType {
|
||||
fn display(&self, f: &mut std::fmt::Formatter<'_>, store: &TypeStore) -> std::fmt::Result {
|
||||
f.write_str("(")?;
|
||||
let mut first = true;
|
||||
for (neg, ty) in self
|
||||
.positive
|
||||
.iter()
|
||||
.map(|ty| (false, ty))
|
||||
.chain(self.negative.iter().map(|ty| (true, ty)))
|
||||
{
|
||||
if !first {
|
||||
f.write_str(" & ")?;
|
||||
};
|
||||
first = false;
|
||||
if neg {
|
||||
f.write_str("~")?;
|
||||
};
|
||||
write!(f, "{}", ty.display(store))?;
|
||||
}
|
||||
f.write_str(")")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::files::Files;
|
||||
use crate::types::{Type, TypeStore};
|
||||
use crate::FxIndexSet;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn add_class() {
|
||||
let store = TypeStore::default();
|
||||
let files = Files::default();
|
||||
let file_id = files.intern(Path::new("/foo"));
|
||||
let id = store.add_class(file_id, "C", Vec::new());
|
||||
assert_eq!(store.get_class(id).name(), "C");
|
||||
let inst = Type::Instance(id);
|
||||
assert_eq!(format!("{}", inst.display(&store)), "C");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_function() {
|
||||
let store = TypeStore::default();
|
||||
let files = Files::default();
|
||||
let file_id = files.intern(Path::new("/foo"));
|
||||
let id = store.add_function(file_id, "func");
|
||||
assert_eq!(store.get_function(id).name(), "func");
|
||||
let func = Type::Function(id);
|
||||
assert_eq!(format!("{}", func.display(&store)), "func");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_union() {
|
||||
let mut store = TypeStore::default();
|
||||
let files = Files::default();
|
||||
let file_id = files.intern(Path::new("/foo"));
|
||||
let c1 = store.add_class(file_id, "C1", Vec::new());
|
||||
let c2 = store.add_class(file_id, "C2", Vec::new());
|
||||
let elems = vec![Type::Instance(c1), Type::Instance(c2)];
|
||||
let id = store.add_union(file_id, &elems);
|
||||
assert_eq!(
|
||||
store.get_union(id).elements,
|
||||
elems.into_iter().collect::<FxIndexSet<_>>()
|
||||
);
|
||||
let union = Type::Union(id);
|
||||
assert_eq!(format!("{}", union.display(&store)), "(C1 | C2)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_intersection() {
|
||||
let mut store = TypeStore::default();
|
||||
let files = Files::default();
|
||||
let file_id = files.intern(Path::new("/foo"));
|
||||
let c1 = store.add_class(file_id, "C1", Vec::new());
|
||||
let c2 = store.add_class(file_id, "C2", Vec::new());
|
||||
let c3 = store.add_class(file_id, "C3", Vec::new());
|
||||
let pos = vec![Type::Instance(c1), Type::Instance(c2)];
|
||||
let neg = vec![Type::Instance(c3)];
|
||||
let id = store.add_intersection(file_id, &pos, &neg);
|
||||
assert_eq!(
|
||||
store.get_intersection(id).positive,
|
||||
pos.into_iter().collect::<FxIndexSet<_>>()
|
||||
);
|
||||
assert_eq!(
|
||||
store.get_intersection(id).negative,
|
||||
neg.into_iter().collect::<FxIndexSet<_>>()
|
||||
);
|
||||
let intersection = Type::Intersection(id);
|
||||
assert_eq!(
|
||||
format!("{}", intersection.display(&store)),
|
||||
"(C1 & C2 & ~C3)"
|
||||
);
|
||||
}
|
||||
}
|
||||
217
crates/red_knot/src/types/infer.rs
Normal file
217
crates/red_knot/src/types/infer.rs
Normal file
@@ -0,0 +1,217 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use ruff_python_ast::AstNode;
|
||||
|
||||
use crate::db::{HasJar, QueryResult, SemanticDb, SemanticJar};
|
||||
use crate::module::ModuleName;
|
||||
use crate::symbols::{Definition, ImportFromDefinition, SymbolId};
|
||||
use crate::types::Type;
|
||||
use crate::FileId;
|
||||
use ruff_python_ast as ast;
|
||||
|
||||
// FIXME: Figure out proper dead-lock free synchronisation now that this takes `&db` instead of `&mut db`.
|
||||
#[tracing::instrument(level = "trace", skip(db))]
|
||||
pub fn infer_symbol_type<Db>(db: &Db, file_id: FileId, symbol_id: SymbolId) -> QueryResult<Type>
|
||||
where
|
||||
Db: SemanticDb + HasJar<SemanticJar>,
|
||||
{
|
||||
let symbols = db.symbol_table(file_id)?;
|
||||
let defs = symbols.definitions(symbol_id);
|
||||
|
||||
if let Some(ty) = db
|
||||
.jar()?
|
||||
.type_store
|
||||
.get_cached_symbol_type(file_id, symbol_id)
|
||||
{
|
||||
return Ok(ty);
|
||||
}
|
||||
|
||||
// TODO handle multiple defs, conditional defs...
|
||||
assert_eq!(defs.len(), 1);
|
||||
let type_store = &db.jar()?.type_store;
|
||||
|
||||
let ty = match &defs[0] {
|
||||
Definition::ImportFrom(ImportFromDefinition {
|
||||
module,
|
||||
name,
|
||||
level,
|
||||
}) => {
|
||||
// TODO relative imports
|
||||
assert!(matches!(level, 0));
|
||||
let module_name = ModuleName::new(module.as_ref().expect("TODO relative imports"));
|
||||
if let Some(module) = db.resolve_module(module_name)? {
|
||||
let remote_file_id = module.path(db)?.file();
|
||||
let remote_symbols = db.symbol_table(remote_file_id)?;
|
||||
if let Some(remote_symbol_id) = remote_symbols.root_symbol_id_by_name(name) {
|
||||
db.infer_symbol_type(remote_file_id, remote_symbol_id)?
|
||||
} else {
|
||||
Type::Unknown
|
||||
}
|
||||
} else {
|
||||
Type::Unknown
|
||||
}
|
||||
}
|
||||
Definition::ClassDef(node_key) => {
|
||||
if let Some(ty) = type_store.get_cached_node_type(file_id, node_key.erased()) {
|
||||
ty
|
||||
} else {
|
||||
let parsed = db.parse(file_id)?;
|
||||
let ast = parsed.ast();
|
||||
let node = node_key.resolve_unwrap(ast.as_any_node_ref());
|
||||
|
||||
let mut bases = Vec::with_capacity(node.bases().len());
|
||||
|
||||
for base in node.bases() {
|
||||
bases.push(infer_expr_type(db, file_id, base)?);
|
||||
}
|
||||
|
||||
let ty = Type::Class(type_store.add_class(file_id, &node.name.id, bases));
|
||||
type_store.cache_node_type(file_id, *node_key.erased(), ty);
|
||||
ty
|
||||
}
|
||||
}
|
||||
Definition::FunctionDef(node_key) => {
|
||||
if let Some(ty) = type_store.get_cached_node_type(file_id, node_key.erased()) {
|
||||
ty
|
||||
} else {
|
||||
let parsed = db.parse(file_id)?;
|
||||
let ast = parsed.ast();
|
||||
let node = node_key
|
||||
.resolve(ast.as_any_node_ref())
|
||||
.expect("node key should resolve");
|
||||
|
||||
let ty = type_store.add_function(file_id, &node.name.id).into();
|
||||
type_store.cache_node_type(file_id, *node_key.erased(), ty);
|
||||
ty
|
||||
}
|
||||
}
|
||||
Definition::Assignment(node_key) => {
|
||||
let parsed = db.parse(file_id)?;
|
||||
let ast = parsed.ast();
|
||||
let node = node_key.resolve_unwrap(ast.as_any_node_ref());
|
||||
// TODO handle unpacking assignment correctly
|
||||
infer_expr_type(db, file_id, &node.value)?
|
||||
}
|
||||
_ => todo!("other kinds of definitions"),
|
||||
};
|
||||
|
||||
type_store.cache_symbol_type(file_id, symbol_id, ty);
|
||||
|
||||
// TODO record dependencies
|
||||
Ok(ty)
|
||||
}
|
||||
|
||||
fn infer_expr_type<Db>(db: &Db, file_id: FileId, expr: &ast::Expr) -> QueryResult<Type>
|
||||
where
|
||||
Db: SemanticDb + HasJar<SemanticJar>,
|
||||
{
|
||||
// TODO cache the resolution of the type on the node
|
||||
let symbols = db.symbol_table(file_id)?;
|
||||
match expr {
|
||||
ast::Expr::Name(name) => {
|
||||
if let Some(symbol_id) = symbols.root_symbol_id_by_name(&name.id) {
|
||||
db.infer_symbol_type(file_id, symbol_id)
|
||||
} else {
|
||||
Ok(Type::Unknown)
|
||||
}
|
||||
}
|
||||
_ => todo!("full expression type resolution"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::db::tests::TestDb;
|
||||
use crate::db::{HasJar, SemanticDb, SemanticJar};
|
||||
use crate::module::{ModuleName, ModuleSearchPath, ModuleSearchPathKind};
|
||||
use crate::types::Type;
|
||||
|
||||
// TODO with virtual filesystem we shouldn't have to write files to disk for these
|
||||
// tests
|
||||
|
||||
struct TestCase {
|
||||
temp_dir: tempfile::TempDir,
|
||||
db: TestDb,
|
||||
|
||||
src: ModuleSearchPath,
|
||||
}
|
||||
|
||||
fn create_test() -> std::io::Result<TestCase> {
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
|
||||
let src = temp_dir.path().join("src");
|
||||
std::fs::create_dir(&src)?;
|
||||
let src = ModuleSearchPath::new(src.canonicalize()?, ModuleSearchPathKind::FirstParty);
|
||||
|
||||
let roots = vec![src.clone()];
|
||||
|
||||
let mut db = TestDb::default();
|
||||
db.set_module_search_paths(roots);
|
||||
|
||||
Ok(TestCase { temp_dir, db, src })
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn follow_import_to_class() -> anyhow::Result<()> {
|
||||
let case = create_test()?;
|
||||
let db = &case.db;
|
||||
|
||||
let a_path = case.src.path().join("a.py");
|
||||
let b_path = case.src.path().join("b.py");
|
||||
std::fs::write(a_path, "from b import C as D; E = D")?;
|
||||
std::fs::write(b_path, "class C: pass")?;
|
||||
let a_file = db
|
||||
.resolve_module(ModuleName::new("a"))?
|
||||
.expect("module should be found")
|
||||
.path(db)?
|
||||
.file();
|
||||
let a_syms = db.symbol_table(a_file)?;
|
||||
let e_sym = a_syms
|
||||
.root_symbol_id_by_name("E")
|
||||
.expect("E symbol should be found");
|
||||
|
||||
let ty = db.infer_symbol_type(a_file, e_sym)?;
|
||||
|
||||
let jar = HasJar::<SemanticJar>::jar(db)?;
|
||||
assert!(matches!(ty, Type::Class(_)));
|
||||
assert_eq!(format!("{}", ty.display(&jar.type_store)), "Literal[C]");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_base_class_by_name() -> anyhow::Result<()> {
|
||||
let case = create_test()?;
|
||||
let db = &case.db;
|
||||
|
||||
let path = case.src.path().join("mod.py");
|
||||
std::fs::write(path, "class Base: pass\nclass Sub(Base): pass")?;
|
||||
let file = db
|
||||
.resolve_module(ModuleName::new("mod"))?
|
||||
.expect("module should be found")
|
||||
.path(db)?
|
||||
.file();
|
||||
let syms = db.symbol_table(file)?;
|
||||
let sym = syms
|
||||
.root_symbol_id_by_name("Sub")
|
||||
.expect("Sub symbol should be found");
|
||||
|
||||
let ty = db.infer_symbol_type(file, sym)?;
|
||||
|
||||
let Type::Class(class_id) = ty else {
|
||||
panic!("Sub is not a Class")
|
||||
};
|
||||
let jar = HasJar::<SemanticJar>::jar(db)?;
|
||||
let base_names: Vec<_> = jar
|
||||
.type_store
|
||||
.get_class(class_id)
|
||||
.bases()
|
||||
.iter()
|
||||
.map(|base_ty| format!("{}", base_ty.display(&jar.type_store)))
|
||||
.collect();
|
||||
|
||||
assert_eq!(base_names, vec!["Literal[Base]"]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
78
crates/red_knot/src/watch.rs
Normal file
78
crates/red_knot/src/watch.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use anyhow::Context;
|
||||
use std::path::Path;
|
||||
|
||||
use crate::files::Files;
|
||||
use crate::program::{FileChange, FileChangeKind};
|
||||
use notify::event::{CreateKind, RemoveKind};
|
||||
use notify::{recommended_watcher, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
|
||||
|
||||
pub struct FileWatcher {
|
||||
watcher: RecommendedWatcher,
|
||||
}
|
||||
|
||||
pub trait EventHandler: Send + 'static {
|
||||
fn handle(&self, changes: Vec<FileChange>);
|
||||
}
|
||||
|
||||
impl<F> EventHandler for F
|
||||
where
|
||||
F: Fn(Vec<FileChange>) + Send + 'static,
|
||||
{
|
||||
fn handle(&self, changes: Vec<FileChange>) {
|
||||
let f = self;
|
||||
f(changes);
|
||||
}
|
||||
}
|
||||
|
||||
impl FileWatcher {
|
||||
pub fn new<E>(handler: E, files: Files) -> anyhow::Result<Self>
|
||||
where
|
||||
E: EventHandler,
|
||||
{
|
||||
Self::from_handler(Box::new(handler), files)
|
||||
}
|
||||
|
||||
fn from_handler(handler: Box<dyn EventHandler>, files: Files) -> anyhow::Result<Self> {
|
||||
let watcher = recommended_watcher(move |changes: notify::Result<Event>| {
|
||||
match changes {
|
||||
Ok(event) => {
|
||||
// TODO verify that this handles all events correctly
|
||||
let change_kind = match event.kind {
|
||||
EventKind::Create(CreateKind::File) => FileChangeKind::Created,
|
||||
EventKind::Modify(_) => FileChangeKind::Modified,
|
||||
EventKind::Remove(RemoveKind::File) => FileChangeKind::Deleted,
|
||||
_ => {
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mut changes = Vec::new();
|
||||
|
||||
for path in event.paths {
|
||||
if path.is_file() {
|
||||
let id = files.intern(&path);
|
||||
changes.push(FileChange::new(id, change_kind));
|
||||
}
|
||||
}
|
||||
|
||||
if !changes.is_empty() {
|
||||
handler.handle(changes);
|
||||
}
|
||||
}
|
||||
// TODO proper error handling
|
||||
Err(err) => {
|
||||
panic!("Error: {err}");
|
||||
}
|
||||
}
|
||||
})
|
||||
.context("Failed to create file watcher.")?;
|
||||
|
||||
Ok(Self { watcher })
|
||||
}
|
||||
|
||||
pub fn watch_folder(&mut self, path: &Path) -> anyhow::Result<()> {
|
||||
self.watcher.watch(path, RecursiveMode::Recursive)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ruff"
|
||||
version = "0.3.4"
|
||||
version = "0.4.2"
|
||||
publish = false
|
||||
authors = { workspace = true }
|
||||
edition = { workspace = true }
|
||||
|
||||
@@ -190,7 +190,7 @@ pub struct CheckCommand {
|
||||
pub output_format: Option<SerializationFormat>,
|
||||
|
||||
/// Specify file to write the linter output to (default: stdout).
|
||||
#[arg(short, long)]
|
||||
#[arg(short, long, env = "RUFF_OUTPUT_FILE")]
|
||||
pub output_file: Option<PathBuf>,
|
||||
/// The minimum Python version that should be supported.
|
||||
#[arg(long, value_enum)]
|
||||
|
||||
@@ -375,15 +375,17 @@ pub(crate) fn init(path: &Path) -> Result<()> {
|
||||
fs::create_dir_all(path.join(VERSION))?;
|
||||
|
||||
// Add the CACHEDIR.TAG.
|
||||
if !cachedir::is_tagged(path)? {
|
||||
cachedir::add_tag(path)?;
|
||||
}
|
||||
cachedir::ensure_tag(path)?;
|
||||
|
||||
// Add the .gitignore.
|
||||
let gitignore_path = path.join(".gitignore");
|
||||
if !gitignore_path.exists() {
|
||||
let mut file = fs::File::create(gitignore_path)?;
|
||||
file.write_all(b"# Automatically created by ruff.\n*\n")?;
|
||||
match fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open(path.join(".gitignore"))
|
||||
{
|
||||
Ok(mut file) => file.write_all(b"# Automatically created by ruff.\n*\n")?,
|
||||
Err(err) if err.kind() == io::ErrorKind::AlreadyExists => (),
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -149,6 +149,13 @@ pub fn run(
|
||||
#[cfg(windows)]
|
||||
assert!(colored::control::set_virtual_terminal(true).is_ok());
|
||||
|
||||
// support FORCE_COLOR env var
|
||||
if let Some(force_color) = std::env::var_os("FORCE_COLOR") {
|
||||
if force_color.len() > 0 {
|
||||
colored::control::set_override(true);
|
||||
}
|
||||
}
|
||||
|
||||
set_up_logging(global_options.log_level())?;
|
||||
|
||||
if let Some(deprecated_alias_warning) = deprecated_alias_warning {
|
||||
|
||||
@@ -70,7 +70,7 @@ pub(crate) fn version() -> VersionInfo {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use insta::{assert_display_snapshot, assert_json_snapshot};
|
||||
use insta::{assert_json_snapshot, assert_snapshot};
|
||||
|
||||
use super::{CommitInfo, VersionInfo};
|
||||
|
||||
@@ -80,7 +80,7 @@ mod tests {
|
||||
version: "0.0.0".to_string(),
|
||||
commit_info: None,
|
||||
};
|
||||
assert_display_snapshot!(version);
|
||||
assert_snapshot!(version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -95,7 +95,7 @@ mod tests {
|
||||
commits_since_last_tag: 0,
|
||||
}),
|
||||
};
|
||||
assert_display_snapshot!(version);
|
||||
assert_snapshot!(version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -110,7 +110,7 @@ mod tests {
|
||||
commits_since_last_tag: 24,
|
||||
}),
|
||||
};
|
||||
assert_display_snapshot!(version);
|
||||
assert_snapshot!(version);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -523,7 +523,7 @@ from module import =
|
||||
----- stdout -----
|
||||
|
||||
----- stderr -----
|
||||
error: Failed to parse main.py:2:20: Unexpected token '='
|
||||
error: Failed to parse main.py:2:20: Expected an import name
|
||||
"###);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -731,11 +731,11 @@ fn stdin_parse_error() {
|
||||
success: false
|
||||
exit_code: 1
|
||||
----- stdout -----
|
||||
-:1:17: E999 SyntaxError: Unexpected token '='
|
||||
-:1:17: E999 SyntaxError: Expected an import name
|
||||
Found 1 error.
|
||||
|
||||
----- stderr -----
|
||||
error: Failed to parse at 1:17: Unexpected token '='
|
||||
error: Failed to parse at 1:17: Expected an import name
|
||||
"###);
|
||||
}
|
||||
|
||||
|
||||
@@ -1168,3 +1168,165 @@ def func():
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Per-file selects via ! negation in per-file-ignores
|
||||
#[test]
|
||||
fn negated_per_file_ignores() -> Result<()> {
|
||||
let tempdir = TempDir::new()?;
|
||||
let ruff_toml = tempdir.path().join("ruff.toml");
|
||||
fs::write(
|
||||
&ruff_toml,
|
||||
r#"
|
||||
[lint.per-file-ignores]
|
||||
"!selected.py" = ["RUF"]
|
||||
"#,
|
||||
)?;
|
||||
let selected = tempdir.path().join("selected.py");
|
||||
fs::write(selected, "")?;
|
||||
let ignored = tempdir.path().join("ignored.py");
|
||||
fs::write(ignored, "")?;
|
||||
|
||||
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
|
||||
.args(STDIN_BASE_OPTIONS)
|
||||
.arg("--config")
|
||||
.arg(&ruff_toml)
|
||||
.arg("--select")
|
||||
.arg("RUF901")
|
||||
.current_dir(&tempdir)
|
||||
, @r###"
|
||||
success: false
|
||||
exit_code: 1
|
||||
----- stdout -----
|
||||
selected.py:1:1: RUF901 [*] Hey this is a stable test rule with a safe fix.
|
||||
Found 1 error.
|
||||
[*] 1 fixable with the `--fix` option.
|
||||
|
||||
----- stderr -----
|
||||
"###);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negated_per_file_ignores_absolute() -> Result<()> {
|
||||
let tempdir = TempDir::new()?;
|
||||
let ruff_toml = tempdir.path().join("ruff.toml");
|
||||
fs::write(
|
||||
&ruff_toml,
|
||||
r#"
|
||||
[lint.per-file-ignores]
|
||||
"!src/**.py" = ["RUF"]
|
||||
"#,
|
||||
)?;
|
||||
let src_dir = tempdir.path().join("src");
|
||||
fs::create_dir(&src_dir)?;
|
||||
let selected = src_dir.join("selected.py");
|
||||
fs::write(selected, "")?;
|
||||
let ignored = tempdir.path().join("ignored.py");
|
||||
fs::write(ignored, "")?;
|
||||
|
||||
insta::with_settings!({filters => vec![
|
||||
// Replace windows paths
|
||||
(r"\\", "/"),
|
||||
]}, {
|
||||
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
|
||||
.args(STDIN_BASE_OPTIONS)
|
||||
.arg("--config")
|
||||
.arg(&ruff_toml)
|
||||
.arg("--select")
|
||||
.arg("RUF901")
|
||||
.current_dir(&tempdir)
|
||||
, @r###"
|
||||
success: false
|
||||
exit_code: 1
|
||||
----- stdout -----
|
||||
src/selected.py:1:1: RUF901 [*] Hey this is a stable test rule with a safe fix.
|
||||
Found 1 error.
|
||||
[*] 1 fixable with the `--fix` option.
|
||||
|
||||
----- stderr -----
|
||||
"###);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// patterns are additive, can't use negative patterns to "un-ignore"
|
||||
#[test]
|
||||
fn negated_per_file_ignores_overlap() -> Result<()> {
|
||||
let tempdir = TempDir::new()?;
|
||||
let ruff_toml = tempdir.path().join("ruff.toml");
|
||||
fs::write(
|
||||
&ruff_toml,
|
||||
r#"
|
||||
[lint.per-file-ignores]
|
||||
"*.py" = ["RUF"]
|
||||
"!foo.py" = ["RUF"]
|
||||
"#,
|
||||
)?;
|
||||
let foo_file = tempdir.path().join("foo.py");
|
||||
fs::write(foo_file, "")?;
|
||||
let bar_file = tempdir.path().join("bar.py");
|
||||
fs::write(bar_file, "")?;
|
||||
|
||||
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
|
||||
.args(STDIN_BASE_OPTIONS)
|
||||
.arg("--config")
|
||||
.arg(&ruff_toml)
|
||||
.arg("--select")
|
||||
.arg("RUF901")
|
||||
.current_dir(&tempdir)
|
||||
, @r###"
|
||||
success: true
|
||||
exit_code: 0
|
||||
----- stdout -----
|
||||
All checks passed!
|
||||
|
||||
----- stderr -----
|
||||
"###);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unused_interaction() -> Result<()> {
|
||||
let tempdir = TempDir::new()?;
|
||||
let ruff_toml = tempdir.path().join("ruff.toml");
|
||||
fs::write(
|
||||
&ruff_toml,
|
||||
r#"
|
||||
[lint]
|
||||
select = ["F"]
|
||||
"#,
|
||||
)?;
|
||||
|
||||
insta::with_settings!({
|
||||
filters => vec![(tempdir_filter(&tempdir).as_str(), "[TMP]/")]
|
||||
}, {
|
||||
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
|
||||
.args(STDIN_BASE_OPTIONS)
|
||||
.arg("--config")
|
||||
.arg(&ruff_toml)
|
||||
.args(["--stdin-filename", "test.py"])
|
||||
.arg("--fix")
|
||||
.arg("-")
|
||||
.pass_stdin(r#"
|
||||
import os # F401
|
||||
|
||||
def function():
|
||||
import os # F811
|
||||
print(os.name)
|
||||
"#), @r###"
|
||||
success: true
|
||||
exit_code: 0
|
||||
----- stdout -----
|
||||
|
||||
import os # F401
|
||||
|
||||
def function():
|
||||
print(os.name)
|
||||
|
||||
----- stderr -----
|
||||
Found 1 error (1 fixed, 0 remaining).
|
||||
"###);
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -50,8 +50,10 @@ file_resolver.exclude = [
|
||||
"venv",
|
||||
]
|
||||
file_resolver.extend_exclude = [
|
||||
"crates/ruff/resources/",
|
||||
"crates/ruff_linter/resources/",
|
||||
"crates/ruff_python_formatter/resources/",
|
||||
"crates/ruff_python_parser/resources/",
|
||||
]
|
||||
file_resolver.force_exclude = false
|
||||
file_resolver.include = [
|
||||
@@ -67,128 +69,128 @@ file_resolver.project_root = "[BASEPATH]"
|
||||
linter.exclude = []
|
||||
linter.project_root = "[BASEPATH]"
|
||||
linter.rules.enabled = [
|
||||
MultipleImportsOnOneLine,
|
||||
ModuleImportNotAtTopOfFile,
|
||||
MultipleStatementsOnOneLineColon,
|
||||
MultipleStatementsOnOneLineSemicolon,
|
||||
UselessSemicolon,
|
||||
NoneComparison,
|
||||
TrueFalseComparison,
|
||||
NotInTest,
|
||||
NotIsTest,
|
||||
TypeComparison,
|
||||
BareExcept,
|
||||
LambdaAssignment,
|
||||
AmbiguousVariableName,
|
||||
AmbiguousClassName,
|
||||
AmbiguousFunctionName,
|
||||
IOError,
|
||||
SyntaxError,
|
||||
UnusedImport,
|
||||
ImportShadowedByLoopVar,
|
||||
UndefinedLocalWithImportStar,
|
||||
LateFutureImport,
|
||||
UndefinedLocalWithImportStarUsage,
|
||||
UndefinedLocalWithNestedImportStarUsage,
|
||||
FutureFeatureNotDefined,
|
||||
PercentFormatInvalidFormat,
|
||||
PercentFormatExpectedMapping,
|
||||
PercentFormatExpectedSequence,
|
||||
PercentFormatExtraNamedArguments,
|
||||
PercentFormatMissingArgument,
|
||||
PercentFormatMixedPositionalAndNamed,
|
||||
PercentFormatPositionalCountMismatch,
|
||||
PercentFormatStarRequiresSequence,
|
||||
PercentFormatUnsupportedFormatCharacter,
|
||||
StringDotFormatInvalidFormat,
|
||||
StringDotFormatExtraNamedArguments,
|
||||
StringDotFormatExtraPositionalArguments,
|
||||
StringDotFormatMissingArguments,
|
||||
StringDotFormatMixingAutomatic,
|
||||
FStringMissingPlaceholders,
|
||||
MultiValueRepeatedKeyLiteral,
|
||||
MultiValueRepeatedKeyVariable,
|
||||
ExpressionsInStarAssignment,
|
||||
MultipleStarredExpressions,
|
||||
AssertTuple,
|
||||
IsLiteral,
|
||||
InvalidPrintSyntax,
|
||||
IfTuple,
|
||||
BreakOutsideLoop,
|
||||
ContinueOutsideLoop,
|
||||
YieldOutsideFunction,
|
||||
ReturnOutsideFunction,
|
||||
DefaultExceptNotLast,
|
||||
ForwardAnnotationSyntaxError,
|
||||
RedefinedWhileUnused,
|
||||
UndefinedName,
|
||||
UndefinedExport,
|
||||
UndefinedLocal,
|
||||
UnusedVariable,
|
||||
UnusedAnnotation,
|
||||
RaiseNotImplemented,
|
||||
multiple-imports-on-one-line (E401),
|
||||
module-import-not-at-top-of-file (E402),
|
||||
multiple-statements-on-one-line-colon (E701),
|
||||
multiple-statements-on-one-line-semicolon (E702),
|
||||
useless-semicolon (E703),
|
||||
none-comparison (E711),
|
||||
true-false-comparison (E712),
|
||||
not-in-test (E713),
|
||||
not-is-test (E714),
|
||||
type-comparison (E721),
|
||||
bare-except (E722),
|
||||
lambda-assignment (E731),
|
||||
ambiguous-variable-name (E741),
|
||||
ambiguous-class-name (E742),
|
||||
ambiguous-function-name (E743),
|
||||
io-error (E902),
|
||||
syntax-error (E999),
|
||||
unused-import (F401),
|
||||
import-shadowed-by-loop-var (F402),
|
||||
undefined-local-with-import-star (F403),
|
||||
late-future-import (F404),
|
||||
undefined-local-with-import-star-usage (F405),
|
||||
undefined-local-with-nested-import-star-usage (F406),
|
||||
future-feature-not-defined (F407),
|
||||
percent-format-invalid-format (F501),
|
||||
percent-format-expected-mapping (F502),
|
||||
percent-format-expected-sequence (F503),
|
||||
percent-format-extra-named-arguments (F504),
|
||||
percent-format-missing-argument (F505),
|
||||
percent-format-mixed-positional-and-named (F506),
|
||||
percent-format-positional-count-mismatch (F507),
|
||||
percent-format-star-requires-sequence (F508),
|
||||
percent-format-unsupported-format-character (F509),
|
||||
string-dot-format-invalid-format (F521),
|
||||
string-dot-format-extra-named-arguments (F522),
|
||||
string-dot-format-extra-positional-arguments (F523),
|
||||
string-dot-format-missing-arguments (F524),
|
||||
string-dot-format-mixing-automatic (F525),
|
||||
f-string-missing-placeholders (F541),
|
||||
multi-value-repeated-key-literal (F601),
|
||||
multi-value-repeated-key-variable (F602),
|
||||
expressions-in-star-assignment (F621),
|
||||
multiple-starred-expressions (F622),
|
||||
assert-tuple (F631),
|
||||
is-literal (F632),
|
||||
invalid-print-syntax (F633),
|
||||
if-tuple (F634),
|
||||
break-outside-loop (F701),
|
||||
continue-outside-loop (F702),
|
||||
yield-outside-function (F704),
|
||||
return-outside-function (F706),
|
||||
default-except-not-last (F707),
|
||||
forward-annotation-syntax-error (F722),
|
||||
redefined-while-unused (F811),
|
||||
undefined-name (F821),
|
||||
undefined-export (F822),
|
||||
undefined-local (F823),
|
||||
unused-variable (F841),
|
||||
unused-annotation (F842),
|
||||
raise-not-implemented (F901),
|
||||
]
|
||||
linter.rules.should_fix = [
|
||||
MultipleImportsOnOneLine,
|
||||
ModuleImportNotAtTopOfFile,
|
||||
MultipleStatementsOnOneLineColon,
|
||||
MultipleStatementsOnOneLineSemicolon,
|
||||
UselessSemicolon,
|
||||
NoneComparison,
|
||||
TrueFalseComparison,
|
||||
NotInTest,
|
||||
NotIsTest,
|
||||
TypeComparison,
|
||||
BareExcept,
|
||||
LambdaAssignment,
|
||||
AmbiguousVariableName,
|
||||
AmbiguousClassName,
|
||||
AmbiguousFunctionName,
|
||||
IOError,
|
||||
SyntaxError,
|
||||
UnusedImport,
|
||||
ImportShadowedByLoopVar,
|
||||
UndefinedLocalWithImportStar,
|
||||
LateFutureImport,
|
||||
UndefinedLocalWithImportStarUsage,
|
||||
UndefinedLocalWithNestedImportStarUsage,
|
||||
FutureFeatureNotDefined,
|
||||
PercentFormatInvalidFormat,
|
||||
PercentFormatExpectedMapping,
|
||||
PercentFormatExpectedSequence,
|
||||
PercentFormatExtraNamedArguments,
|
||||
PercentFormatMissingArgument,
|
||||
PercentFormatMixedPositionalAndNamed,
|
||||
PercentFormatPositionalCountMismatch,
|
||||
PercentFormatStarRequiresSequence,
|
||||
PercentFormatUnsupportedFormatCharacter,
|
||||
StringDotFormatInvalidFormat,
|
||||
StringDotFormatExtraNamedArguments,
|
||||
StringDotFormatExtraPositionalArguments,
|
||||
StringDotFormatMissingArguments,
|
||||
StringDotFormatMixingAutomatic,
|
||||
FStringMissingPlaceholders,
|
||||
MultiValueRepeatedKeyLiteral,
|
||||
MultiValueRepeatedKeyVariable,
|
||||
ExpressionsInStarAssignment,
|
||||
MultipleStarredExpressions,
|
||||
AssertTuple,
|
||||
IsLiteral,
|
||||
InvalidPrintSyntax,
|
||||
IfTuple,
|
||||
BreakOutsideLoop,
|
||||
ContinueOutsideLoop,
|
||||
YieldOutsideFunction,
|
||||
ReturnOutsideFunction,
|
||||
DefaultExceptNotLast,
|
||||
ForwardAnnotationSyntaxError,
|
||||
RedefinedWhileUnused,
|
||||
UndefinedName,
|
||||
UndefinedExport,
|
||||
UndefinedLocal,
|
||||
UnusedVariable,
|
||||
UnusedAnnotation,
|
||||
RaiseNotImplemented,
|
||||
multiple-imports-on-one-line (E401),
|
||||
module-import-not-at-top-of-file (E402),
|
||||
multiple-statements-on-one-line-colon (E701),
|
||||
multiple-statements-on-one-line-semicolon (E702),
|
||||
useless-semicolon (E703),
|
||||
none-comparison (E711),
|
||||
true-false-comparison (E712),
|
||||
not-in-test (E713),
|
||||
not-is-test (E714),
|
||||
type-comparison (E721),
|
||||
bare-except (E722),
|
||||
lambda-assignment (E731),
|
||||
ambiguous-variable-name (E741),
|
||||
ambiguous-class-name (E742),
|
||||
ambiguous-function-name (E743),
|
||||
io-error (E902),
|
||||
syntax-error (E999),
|
||||
unused-import (F401),
|
||||
import-shadowed-by-loop-var (F402),
|
||||
undefined-local-with-import-star (F403),
|
||||
late-future-import (F404),
|
||||
undefined-local-with-import-star-usage (F405),
|
||||
undefined-local-with-nested-import-star-usage (F406),
|
||||
future-feature-not-defined (F407),
|
||||
percent-format-invalid-format (F501),
|
||||
percent-format-expected-mapping (F502),
|
||||
percent-format-expected-sequence (F503),
|
||||
percent-format-extra-named-arguments (F504),
|
||||
percent-format-missing-argument (F505),
|
||||
percent-format-mixed-positional-and-named (F506),
|
||||
percent-format-positional-count-mismatch (F507),
|
||||
percent-format-star-requires-sequence (F508),
|
||||
percent-format-unsupported-format-character (F509),
|
||||
string-dot-format-invalid-format (F521),
|
||||
string-dot-format-extra-named-arguments (F522),
|
||||
string-dot-format-extra-positional-arguments (F523),
|
||||
string-dot-format-missing-arguments (F524),
|
||||
string-dot-format-mixing-automatic (F525),
|
||||
f-string-missing-placeholders (F541),
|
||||
multi-value-repeated-key-literal (F601),
|
||||
multi-value-repeated-key-variable (F602),
|
||||
expressions-in-star-assignment (F621),
|
||||
multiple-starred-expressions (F622),
|
||||
assert-tuple (F631),
|
||||
is-literal (F632),
|
||||
invalid-print-syntax (F633),
|
||||
if-tuple (F634),
|
||||
break-outside-loop (F701),
|
||||
continue-outside-loop (F702),
|
||||
yield-outside-function (F704),
|
||||
return-outside-function (F706),
|
||||
default-except-not-last (F707),
|
||||
forward-annotation-syntax-error (F722),
|
||||
redefined-while-unused (F811),
|
||||
undefined-name (F821),
|
||||
undefined-export (F822),
|
||||
undefined-local (F823),
|
||||
unused-variable (F841),
|
||||
unused-annotation (F842),
|
||||
raise-not-implemented (F901),
|
||||
]
|
||||
linter.per_file_ignores = {}
|
||||
linter.safety_table.forced_safe = []
|
||||
@@ -231,7 +233,7 @@ linter.flake8_bandit.check_typed_exception = false
|
||||
linter.flake8_bugbear.extend_immutable_calls = []
|
||||
linter.flake8_builtins.builtins_ignorelist = []
|
||||
linter.flake8_comprehensions.allow_dict_calls_with_keyword_arguments = false
|
||||
linter.flake8_copyright.notice_rgx = (?i)Copyright\s+((?:\(C\)|©)\s+)?\d{4}(-\d{4})*
|
||||
linter.flake8_copyright.notice_rgx = (?i)Copyright\s+((?:\(C\)|©)\s+)?\d{4}((-|,\s)\d{4})*
|
||||
linter.flake8_copyright.author = none
|
||||
linter.flake8_copyright.min_file_size = 0
|
||||
linter.flake8_errmsg.max_string_length = 0
|
||||
|
||||
@@ -11,9 +11,9 @@ repository = { workspace = true }
|
||||
license = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
itertools = { workspace = true }
|
||||
glob = { workspace = true }
|
||||
globset = { workspace = true }
|
||||
itertools = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
filetime = { workspace = true }
|
||||
seahash = { workspace = true }
|
||||
|
||||
@@ -65,7 +65,7 @@ use seahash::SeaHasher;
|
||||
/// The main reason is that hashes and cache keys have different constraints:
|
||||
///
|
||||
/// * Cache keys are less performance sensitive: Hashes must be super fast to compute for performant hashed-collections. That's
|
||||
/// why some standard types don't implement [`Hash`] where it would be safe to to implement [`CacheKey`], e.g. `HashSet`
|
||||
/// why some standard types don't implement [`Hash`] where it would be safe to implement [`CacheKey`], e.g. `HashSet`
|
||||
/// * Cache keys must be deterministic where hash keys do not have this constraint. That's why pointers don't implement [`CacheKey`] but they implement [`Hash`].
|
||||
/// * Ideally, cache keys are portable
|
||||
///
|
||||
|
||||
@@ -71,6 +71,14 @@ impl Diagnostic {
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes `self` and returns a new `Diagnostic` with the given parent node.
|
||||
#[inline]
|
||||
#[must_use]
|
||||
pub fn with_parent(mut self, parent: TextSize) -> Self {
|
||||
self.set_parent(parent);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the location of the diagnostic's parent node.
|
||||
#[inline]
|
||||
pub fn set_parent(&mut self, parent: TextSize) {
|
||||
|
||||
@@ -25,11 +25,6 @@ pub trait Violation: Debug + PartialEq + Eq {
|
||||
/// The message used to describe the violation.
|
||||
fn message(&self) -> String;
|
||||
|
||||
/// The explanation used in documentation and elsewhere.
|
||||
fn explanation() -> Option<&'static str> {
|
||||
None
|
||||
}
|
||||
|
||||
// TODO(micha): Move `fix_title` to `Fix`, add new `advice` method that is shown as an advice.
|
||||
// Change the `Diagnostic` renderer to show the advice, and render the fix message after the `Suggested fix: <here>`
|
||||
|
||||
@@ -50,11 +45,6 @@ pub trait AlwaysFixableViolation: Debug + PartialEq + Eq {
|
||||
/// The message used to describe the violation.
|
||||
fn message(&self) -> String;
|
||||
|
||||
/// The explanation used in documentation and elsewhere.
|
||||
fn explanation() -> Option<&'static str> {
|
||||
None
|
||||
}
|
||||
|
||||
/// The title displayed for the available fix.
|
||||
fn fix_title(&self) -> String;
|
||||
|
||||
@@ -71,10 +61,6 @@ impl<V: AlwaysFixableViolation> Violation for V {
|
||||
<Self as AlwaysFixableViolation>::message(self)
|
||||
}
|
||||
|
||||
fn explanation() -> Option<&'static str> {
|
||||
<Self as AlwaysFixableViolation>::explanation()
|
||||
}
|
||||
|
||||
fn fix_title(&self) -> Option<String> {
|
||||
Some(<Self as AlwaysFixableViolation>::fix_title(self))
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ pub const fn empty_line() -> Line {
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// The line breaks are emitted as spaces if the enclosing `Group` fits on a a single line:
|
||||
/// The line breaks are emitted as spaces if the enclosing `Group` fits on a single line:
|
||||
/// ```
|
||||
/// use ruff_formatter::{format, format_args};
|
||||
/// use ruff_formatter::prelude::*;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ruff_linter"
|
||||
version = "0.3.4"
|
||||
version = "0.4.2"
|
||||
publish = false
|
||||
authors = { workspace = true }
|
||||
edition = { workspace = true }
|
||||
|
||||
@@ -17,3 +17,9 @@ urllib.request.URLopener().open(fullurl='http://www.google.com')
|
||||
urllib.request.URLopener().open('http://www.google.com')
|
||||
urllib.request.URLopener().open('file:///foo/bar/baz')
|
||||
urllib.request.URLopener().open(url)
|
||||
|
||||
urllib.request.urlopen(url=urllib.request.Request('http://www.google.com'))
|
||||
urllib.request.urlopen(url=urllib.request.Request('http://www.google.com'), **kwargs)
|
||||
urllib.request.urlopen(urllib.request.Request('http://www.google.com'))
|
||||
urllib.request.urlopen(urllib.request.Request('file:///foo/bar/baz'))
|
||||
urllib.request.urlopen(urllib.request.Request(url))
|
||||
|
||||
@@ -124,3 +124,8 @@ try:
|
||||
pass
|
||||
except Exception:
|
||||
error("...", exc_info=True)
|
||||
|
||||
try:
|
||||
...
|
||||
except Exception as e:
|
||||
raise ValueError from e
|
||||
|
||||
@@ -31,8 +31,7 @@ def function(
|
||||
kwonly_nonboolvalued_boolhint: bool = 1,
|
||||
kwonly_nonboolvalued_boolstrhint: "bool" = 1,
|
||||
**kw,
|
||||
):
|
||||
...
|
||||
): ...
|
||||
|
||||
|
||||
def used(do):
|
||||
@@ -131,4 +130,27 @@ class Fit:
|
||||
def __post_init__(self, force: bool) -> None:
|
||||
print(force)
|
||||
|
||||
|
||||
Fit(force=True)
|
||||
|
||||
|
||||
# https://github.com/astral-sh/ruff/issues/10356
|
||||
from django.db.models import Case, Q, Value, When
|
||||
|
||||
|
||||
qs.annotate(
|
||||
is_foo_or_bar=Case(
|
||||
When(Q(is_foo=True) | Q(is_bar=True)),
|
||||
then=Value(True),
|
||||
),
|
||||
default=Value(False),
|
||||
)
|
||||
|
||||
|
||||
# https://github.com/astral-sh/ruff/issues/10485
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
foo: bool = Field(True, exclude=True)
|
||||
|
||||
@@ -6,6 +6,25 @@ def this_is_a_bug():
|
||||
print("Ooh, callable! Or is it?")
|
||||
|
||||
|
||||
def still_a_bug():
|
||||
import builtins
|
||||
o = object()
|
||||
if builtins.hasattr(o, "__call__"):
|
||||
print("B U G")
|
||||
if builtins.getattr(o, "__call__", False):
|
||||
print("B U G")
|
||||
|
||||
|
||||
def trickier_fix_for_this_one():
|
||||
o = object()
|
||||
|
||||
def callable(x):
|
||||
return True
|
||||
|
||||
if hasattr(o, "__call__"):
|
||||
print("STILL a bug!")
|
||||
|
||||
|
||||
def this_is_fine():
|
||||
o = object()
|
||||
if callable(o):
|
||||
|
||||
20
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B006_8.py
vendored
Normal file
20
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B006_8.py
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
def foo(a: list = []):
|
||||
raise NotImplementedError("")
|
||||
|
||||
|
||||
def bar(a: dict = {}):
|
||||
""" This one also has a docstring"""
|
||||
raise NotImplementedError("and has some text in here")
|
||||
|
||||
|
||||
def baz(a: list = []):
|
||||
"""This one raises a different exception"""
|
||||
raise IndexError()
|
||||
|
||||
|
||||
def qux(a: list = []):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def quux(a: list = []):
|
||||
raise NotImplemented
|
||||
@@ -23,3 +23,15 @@ def okay(data: custom.ImmutableTypeA = foo()):
|
||||
|
||||
def error_due_to_missing_import(data: List[str] = Depends(None)):
|
||||
...
|
||||
|
||||
|
||||
class Class:
|
||||
pass
|
||||
|
||||
|
||||
def okay(obj=Class()):
|
||||
...
|
||||
|
||||
|
||||
def error(obj=OtherClass()):
|
||||
...
|
||||
|
||||
@@ -64,3 +64,6 @@ setattr(*foo, "bar", None)
|
||||
# Regression test for: https://github.com/astral-sh/ruff/issues/7455#issuecomment-1739800901
|
||||
getattr(self.
|
||||
registration.registry, '__name__')
|
||||
|
||||
import builtins
|
||||
builtins.getattr(foo, "bar")
|
||||
|
||||
@@ -115,25 +115,25 @@ class non_keyword_abcmeta_2(abc.ABCMeta): # safe
|
||||
|
||||
|
||||
# very invalid code, but that's up to mypy et al to check
|
||||
class keyword_abc_1(metaclass=ABC): # safe
|
||||
class keyword_abc_1(metaclass=ABC): # incorrect but outside scope of this check
|
||||
def method(self):
|
||||
foo()
|
||||
|
||||
|
||||
class keyword_abc_2(metaclass=abc.ABC): # safe
|
||||
class keyword_abc_2(metaclass=abc.ABC): # incorrect but outside scope of this check
|
||||
def method(self):
|
||||
foo()
|
||||
|
||||
|
||||
class abc_set_class_variable_1(ABC): # safe
|
||||
class abc_set_class_variable_1(ABC): # safe (abstract attribute)
|
||||
foo: int
|
||||
|
||||
|
||||
class abc_set_class_variable_2(ABC): # safe
|
||||
class abc_set_class_variable_2(ABC): # error (not an abstract attribute)
|
||||
foo = 2
|
||||
|
||||
|
||||
class abc_set_class_variable_3(ABC): # safe
|
||||
class abc_set_class_variable_3(ABC): # error (not an abstract attribute)
|
||||
foo: int = 2
|
||||
|
||||
|
||||
|
||||
@@ -174,6 +174,49 @@ for (_key1, _key2), (_value1, _value2) in groupby(
|
||||
collect_shop_items("Jane", group[1])
|
||||
collect_shop_items("Joe", group[1])
|
||||
|
||||
# Shouldn't trigger the warning when there is a continue, break statement.
|
||||
for _section, section_items in groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
continue
|
||||
elif _section == "frozen items":
|
||||
collect_shop_items(shopper, section_items)
|
||||
break
|
||||
collect_shop_items(shopper, section_items)
|
||||
|
||||
# Shouldn't trigger the warning when there is a return statement.
|
||||
for _section, section_items in groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
return
|
||||
elif _section == "frozen items":
|
||||
return section_items
|
||||
collect_shop_items(shopper, section_items)
|
||||
|
||||
# Should trigger the warning for duplicate access, even if is a return statement after.
|
||||
for _section, section_items in groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
collect_shop_items(shopper, section_items)
|
||||
return
|
||||
|
||||
# Should trigger the warning for duplicate access, even if is a return in another branch.
|
||||
for _section, section_items in groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
return
|
||||
elif _section == "frozen items":
|
||||
collect_shop_items(shopper, section_items)
|
||||
collect_shop_items(shopper, section_items)
|
||||
|
||||
# Should trigger, since only one branch has a return statement.
|
||||
for _section, section_items in groupby(items, key=lambda p: p[1]):
|
||||
if _section == "greens":
|
||||
collect_shop_items(shopper, section_items)
|
||||
return
|
||||
elif _section == "frozen items":
|
||||
collect_shop_items(shopper, section_items)
|
||||
collect_shop_items(shopper, section_items) # B031
|
||||
|
||||
# Let's redefine the `groupby` function to make sure we pick up the correct one.
|
||||
# NOTE: This should always be at the end of the file.
|
||||
|
||||
@@ -23,3 +23,7 @@ zip([1, 2, 3], repeat(1, times=None))
|
||||
# Errors (limited iterators).
|
||||
zip([1, 2, 3], repeat(1, 1))
|
||||
zip([1, 2, 3], repeat(1, times=4))
|
||||
|
||||
import builtins
|
||||
# Still an error even though it uses the qualified name
|
||||
builtins.zip([1, 2, 3])
|
||||
|
||||
160
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B909.py
vendored
Normal file
160
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B909.py
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
"""
|
||||
Should emit:
|
||||
B909 - on lines 11, 25, 26, 40, 46
|
||||
"""
|
||||
|
||||
# lists
|
||||
|
||||
some_list = [1, 2, 3]
|
||||
some_other_list = [1, 2, 3]
|
||||
for elem in some_list:
|
||||
# errors
|
||||
some_list.remove(0)
|
||||
del some_list[2]
|
||||
some_list.append(elem)
|
||||
some_list.sort()
|
||||
some_list.reverse()
|
||||
some_list.clear()
|
||||
some_list.extend([1, 2])
|
||||
some_list.insert(1, 1)
|
||||
some_list.pop(1)
|
||||
some_list.pop()
|
||||
|
||||
# conditional break should error
|
||||
if elem == 2:
|
||||
some_list.remove(0)
|
||||
if elem == 3:
|
||||
break
|
||||
|
||||
# non-errors
|
||||
some_other_list.remove(elem)
|
||||
del some_list
|
||||
del some_other_list
|
||||
found_idx = some_list.index(elem)
|
||||
some_list = 3
|
||||
|
||||
# unconditional break should not error
|
||||
if elem == 2:
|
||||
some_list.remove(elem)
|
||||
break
|
||||
|
||||
|
||||
# dicts
|
||||
mydicts = {"a": {"foo": 1, "bar": 2}}
|
||||
|
||||
for elem in mydicts:
|
||||
# errors
|
||||
mydicts.popitem()
|
||||
mydicts.setdefault("foo", 1)
|
||||
mydicts.update({"foo": "bar"})
|
||||
|
||||
# no errors
|
||||
elem.popitem()
|
||||
elem.setdefault("foo", 1)
|
||||
elem.update({"foo": "bar"})
|
||||
|
||||
# sets
|
||||
|
||||
myset = {1, 2, 3}
|
||||
|
||||
for _ in myset:
|
||||
# errors
|
||||
myset.update({4, 5})
|
||||
myset.intersection_update({4, 5})
|
||||
myset.difference_update({4, 5})
|
||||
myset.symmetric_difference_update({4, 5})
|
||||
myset.add(4)
|
||||
myset.discard(3)
|
||||
|
||||
# no errors
|
||||
del myset
|
||||
|
||||
|
||||
# members
|
||||
class A:
|
||||
some_list: list
|
||||
|
||||
def __init__(self, ls):
|
||||
self.some_list = list(ls)
|
||||
|
||||
|
||||
a = A((1, 2, 3))
|
||||
# ensure member accesses are handled as errors
|
||||
for elem in a.some_list:
|
||||
a.some_list.remove(0)
|
||||
del a.some_list[2]
|
||||
|
||||
|
||||
# Augassign should error
|
||||
|
||||
foo = [1, 2, 3]
|
||||
bar = [4, 5, 6]
|
||||
for _ in foo:
|
||||
foo *= 2
|
||||
foo += bar
|
||||
foo[1] = 9
|
||||
foo[1:2] = bar
|
||||
foo[1:2:3] = bar
|
||||
|
||||
foo = {1, 2, 3}
|
||||
bar = {4, 5, 6}
|
||||
for _ in foo: # should error
|
||||
foo |= bar
|
||||
foo &= bar
|
||||
foo -= bar
|
||||
foo ^= bar
|
||||
|
||||
|
||||
# more tests for unconditional breaks - should not error
|
||||
for _ in foo:
|
||||
foo.remove(1)
|
||||
for _ in bar:
|
||||
bar.remove(1)
|
||||
break
|
||||
break
|
||||
|
||||
# should not error
|
||||
for _ in foo:
|
||||
foo.remove(1)
|
||||
for _ in bar:
|
||||
...
|
||||
break
|
||||
|
||||
# should error (?)
|
||||
for _ in foo:
|
||||
foo.remove(1)
|
||||
if bar:
|
||||
bar.remove(1)
|
||||
break
|
||||
break
|
||||
|
||||
# should error
|
||||
for _ in foo:
|
||||
if bar:
|
||||
pass
|
||||
else:
|
||||
foo.remove(1)
|
||||
|
||||
# should error
|
||||
for elem in some_list:
|
||||
if some_list.pop() == 2:
|
||||
pass
|
||||
|
||||
# should not error
|
||||
for elem in some_list:
|
||||
if some_list.pop() == 2:
|
||||
break
|
||||
|
||||
# should error
|
||||
for elem in some_list:
|
||||
if some_list.pop() == 2:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
|
||||
# should not error
|
||||
for elem in some_list:
|
||||
del some_list[elem]
|
||||
some_list[elem] = 1
|
||||
some_list.remove(elem)
|
||||
some_list.discard(elem)
|
||||
@@ -1,20 +1,30 @@
|
||||
x = set(x for x in range(3))
|
||||
x = set(x for x in range(3))
|
||||
y = f"{set(a if a < 6 else 0 for a in range(3))}"
|
||||
_ = "{}".format(set(a if a < 6 else 0 for a in range(3)))
|
||||
print(f"Hello {set(a for a in range(3))} World")
|
||||
|
||||
# Cannot conbime with C416. Should use set comprehension here.
|
||||
even_nums = set(2 * x for x in range(3))
|
||||
odd_nums = set(
|
||||
2 * x + 1 for x in range(3)
|
||||
)
|
||||
small_nums = f"{set(a if a < 6 else 0 for a in range(3))}"
|
||||
|
||||
def f(x):
|
||||
return x
|
||||
|
||||
|
||||
print(f'Hello {set(a for a in "abc")} World')
|
||||
print(f"Hello {set(a for a in 'abc')} World")
|
||||
print(f"Hello {set(f(a) for a in 'abc')} World")
|
||||
print(f"Hello { set(f(a) for a in 'abc') } World")
|
||||
|
||||
|
||||
# Short-circuit case, combine with C416 and should produce x = set(range(3))
|
||||
x = set(x for x in range(3))
|
||||
x = set(
|
||||
x for x in range(3)
|
||||
)
|
||||
print(f"Hello {set(a for a in range(3))} World")
|
||||
print(f"{set(a for a in 'abc') - set(a for a in 'ab')}")
|
||||
print(f"{ set(a for a in 'abc') - set(a for a in 'ab') }")
|
||||
|
||||
# The fix generated for this diagnostic is incorrect, as we add additional space
|
||||
# around the set comprehension.
|
||||
print(f"{ {set(a for a in 'abc')} }")
|
||||
|
||||
# Not built-in set.
|
||||
def set(*args, **kwargs):
|
||||
return None
|
||||
|
||||
set(2 * x for x in range(3))
|
||||
set(x for x in range(3))
|
||||
|
||||
@@ -13,6 +13,10 @@ all(x.id for x in bar)
|
||||
all(x.id for x in bar)
|
||||
any(x.id for x in bar)
|
||||
all((x.id for x in bar))
|
||||
# we don't lint on these in stable yet
|
||||
sum([x.val for x in bar])
|
||||
min([x.val for x in bar])
|
||||
max([x.val for x in bar])
|
||||
|
||||
|
||||
async def f() -> bool:
|
||||
|
||||
8
crates/ruff_linter/resources/test/fixtures/flake8_comprehensions/C419_1.py
vendored
Normal file
8
crates/ruff_linter/resources/test/fixtures/flake8_comprehensions/C419_1.py
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
sum([x.val for x in bar])
|
||||
min([x.val for x in bar])
|
||||
max([x.val for x in bar])
|
||||
|
||||
# Ok
|
||||
sum(x.val for x in bar)
|
||||
min(x.val for x in bar)
|
||||
max(x.val for x in bar)
|
||||
3
crates/ruff_linter/resources/test/fixtures/flake8_comprehensions/C419_2.py
vendored
Normal file
3
crates/ruff_linter/resources/test/fixtures/flake8_comprehensions/C419_2.py
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# no lint if shadowed
|
||||
def all(x): pass
|
||||
all([x.id for x in bar])
|
||||
@@ -33,3 +33,9 @@ from datetime import datetime
|
||||
|
||||
# no replace orastimezone unqualified
|
||||
datetime.strptime("something", "something")
|
||||
|
||||
# F-strings
|
||||
datetime.strptime("something", f"%Y-%m-%dT%H:%M:%S{('.%f' if millis else '')}%z")
|
||||
datetime.strptime("something", f"%Y-%m-%d %H:%M:%S%z")
|
||||
# F-string is implicitly concatenated to another string
|
||||
datetime.strptime("something", f"%Y-%m-%dT%H:%M:%S{('.%f' if millis else '')}" "%z")
|
||||
|
||||
@@ -21,6 +21,7 @@ def unconventional_aliases():
|
||||
import tkinter as tkr
|
||||
import networkx as nxy
|
||||
|
||||
|
||||
def conventional_aliases():
|
||||
import altair as alt
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
10
crates/ruff_linter/resources/test/fixtures/flake8_import_conventions/same_name.py
vendored
Normal file
10
crates/ruff_linter/resources/test/fixtures/flake8_import_conventions/same_name.py
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
def no_alias():
|
||||
from django.conf import settings
|
||||
|
||||
|
||||
def conventional_alias():
|
||||
from django.conf import settings as settings
|
||||
|
||||
|
||||
def unconventional_alias():
|
||||
from django.conf import settings as s
|
||||
@@ -7,6 +7,9 @@ logging.log(logging.INFO, f"Hello {name}")
|
||||
_LOGGER = logging.getLogger()
|
||||
_LOGGER.info(f"{__name__}")
|
||||
|
||||
logging.getLogger().info(f"{name}")
|
||||
|
||||
from logging import info
|
||||
|
||||
info(f"{name}")
|
||||
info(f"{__name__}")
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
# PIE808
|
||||
range(0, 10)
|
||||
|
||||
import builtins
|
||||
builtins.range(0, 10)
|
||||
|
||||
# OK
|
||||
range(x, 10)
|
||||
range(-15, 10)
|
||||
|
||||
14
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_2.py
vendored
Normal file
14
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_2.py
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Tests to ensure we correctly rename references inside `__all__`"""
|
||||
|
||||
from collections.abc import Set
|
||||
|
||||
__all__ = ["Set"]
|
||||
|
||||
if True:
|
||||
__all__ += [r'''Set''']
|
||||
|
||||
if 1:
|
||||
__all__ += ["S" "e" "t"]
|
||||
|
||||
if not False:
|
||||
__all__ += ["Se" 't']
|
||||
14
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_2.pyi
vendored
Normal file
14
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_2.pyi
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Tests to ensure we correctly rename references inside `__all__`"""
|
||||
|
||||
from collections.abc import Set
|
||||
|
||||
__all__ = ["Set"]
|
||||
|
||||
if True:
|
||||
__all__ += [r'''Set''']
|
||||
|
||||
if 1:
|
||||
__all__ += ["S" "e" "t"]
|
||||
|
||||
if not False:
|
||||
__all__ += ["Se" 't']
|
||||
6
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_3.py
vendored
Normal file
6
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_3.py
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
Tests for PYI025 where the import is marked as re-exported
|
||||
through usage of a "redundant" `import Set as Set` alias
|
||||
"""
|
||||
|
||||
from collections.abc import Set as Set # PYI025 triggered but fix is not marked as safe
|
||||
6
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_3.pyi
vendored
Normal file
6
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI025_3.pyi
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
Tests for PYI025 where the import is marked as re-exported
|
||||
through usage of a "redundant" `import Set as Set` alias
|
||||
"""
|
||||
|
||||
from collections.abc import Set as Set # PYI025 triggered but fix is not marked as safe
|
||||
@@ -14,6 +14,15 @@ IntOrStr: TypeAlias = int | str
|
||||
IntOrFloat: Foo = int | float
|
||||
AliasNone: typing.TypeAlias = None
|
||||
|
||||
# these are ok
|
||||
class NotAnEnum:
|
||||
NOT_A_STUB_SO_THIS_IS_FINE = None
|
||||
|
||||
from enum import Enum
|
||||
|
||||
class FooEnum(Enum): ...
|
||||
|
||||
class BarEnum(FooEnum):
|
||||
BAR = None
|
||||
|
||||
VarAlias = str
|
||||
AliasFoo = Foo
|
||||
|
||||
@@ -13,6 +13,16 @@ IntOrStr: TypeAlias = int | str
|
||||
IntOrFloat: Foo = int | float
|
||||
AliasNone: typing.TypeAlias = None
|
||||
|
||||
class NotAnEnum:
|
||||
FLAG_THIS = None
|
||||
|
||||
# these are ok
|
||||
from enum import Enum
|
||||
|
||||
class FooEnum(Enum): ...
|
||||
|
||||
class BarEnum(FooEnum):
|
||||
BAR = None
|
||||
|
||||
VarAlias = str
|
||||
AliasFoo = Foo
|
||||
|
||||
@@ -195,6 +195,13 @@ class BadAsyncIterator(collections.abc.AsyncIterator[str]):
|
||||
def __aiter__(self) -> typing.AsyncIterator[str]:
|
||||
... # Y034 "__aiter__" methods in classes like "BadAsyncIterator" usually return "self" at runtime. Consider using "typing_extensions.Self" in "BadAsyncIterator.__aiter__", e.g. "def __aiter__(self) -> Self: ..." # Y022 Use "collections.abc.AsyncIterator[T]" instead of "typing.AsyncIterator[T]" (PEP 585 syntax)
|
||||
|
||||
class SubclassOfBadIterator3(BadIterator3):
|
||||
def __iter__(self) -> Iterator[int]: # Y034
|
||||
...
|
||||
|
||||
class SubclassOfBadAsyncIterator(BadAsyncIterator):
|
||||
def __aiter__(self) -> collections.abc.AsyncIterator[str]: # Y034
|
||||
...
|
||||
|
||||
class AsyncIteratorReturningAsyncIterable:
|
||||
def __aiter__(self) -> AsyncIterable[str]:
|
||||
@@ -225,6 +232,11 @@ class MetaclassInWhichSelfCannotBeUsed4(ABCMeta):
|
||||
async def __aenter__(self) -> MetaclassInWhichSelfCannotBeUsed4: ...
|
||||
def __isub__(self, other: MetaclassInWhichSelfCannotBeUsed4) -> MetaclassInWhichSelfCannotBeUsed4: ...
|
||||
|
||||
class SubclassOfMetaclassInWhichSelfCannotBeUsed(MetaclassInWhichSelfCannotBeUsed4):
|
||||
def __new__(cls) -> SubclassOfMetaclassInWhichSelfCannotBeUsed: ...
|
||||
def __enter__(self) -> SubclassOfMetaclassInWhichSelfCannotBeUsed: ...
|
||||
async def __aenter__(self) -> SubclassOfMetaclassInWhichSelfCannotBeUsed: ...
|
||||
def __isub__(self, other: SubclassOfMetaclassInWhichSelfCannotBeUsed) -> SubclassOfMetaclassInWhichSelfCannotBeUsed: ...
|
||||
|
||||
class Abstract(Iterator[str]):
|
||||
@abstractmethod
|
||||
|
||||
@@ -3,7 +3,7 @@ import types
|
||||
import typing
|
||||
from collections.abc import Awaitable
|
||||
from types import TracebackType
|
||||
from typing import Any, Type
|
||||
from typing import Any, Type, overload
|
||||
|
||||
import _typeshed
|
||||
import typing_extensions
|
||||
@@ -73,3 +73,97 @@ class BadFive:
|
||||
class BadSix:
|
||||
def __exit__(self, typ, exc, tb, weird_extra_arg, extra_arg2 = None) -> None: ... # PYI036: Extra arg must have default
|
||||
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ... # PYI036: kwargs must have default
|
||||
|
||||
class AllPositionalOnlyArgs:
|
||||
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ...
|
||||
|
||||
class BadAllPositionalOnlyArgs:
|
||||
def __exit__(self, typ: type[Exception] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType, /) -> None: ...
|
||||
|
||||
# Definitions not in a class scope can do whatever, we don't care
|
||||
def __exit__(self, *args: bool) -> None: ...
|
||||
async def __aexit__(self, *, go_crazy: bytes) -> list[str]: ...
|
||||
|
||||
# Here come the overloads...
|
||||
|
||||
class AcceptableOverload1:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: None, exc: None, exc_tb: None) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, exc_typ: type[BaseException], exc: BaseException, exc_tb: TracebackType) -> None: ...
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None) -> None: ...
|
||||
|
||||
# Using `object` or `Unused` in an overload definition is kinda strange,
|
||||
# but let's allow it to be on the safe side
|
||||
class AcceptableOverload2:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: None, exc: None, exc_tb: object) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, exc_typ: Unused, exc: BaseException, exc_tb: object) -> None: ...
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None) -> None: ...
|
||||
|
||||
class AcceptableOverload3:
|
||||
# Just ignore any overloads that don't have exactly 3 annotated non-self parameters.
|
||||
# We don't have the ability (yet) to do arbitrary checking
|
||||
# of whether one function definition is a subtype of another...
|
||||
@overload
|
||||
def __exit__(self, exc_typ: bool, exc: bool, exc_tb: bool, weird_extra_arg: bool) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, exc_typ: bool, /, exc: bool, exc_tb: bool, *, keyword_only: str) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, *args: object) -> None: ...
|
||||
async def __aexit__(self, *args: object) -> None: ...
|
||||
|
||||
class AcceptableOverload4:
|
||||
# Same as above
|
||||
@overload
|
||||
def __exit__(self, exc_typ: type[Exception], exc: type[Exception], exc_tb: types.TracebackType) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, exc_typ: type[Exception], exc: type[Exception], exc_tb: types.TracebackType, *, extra: str = "foo") -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, exc_typ: None, exc: None, tb: None) -> None: ...
|
||||
async def __aexit__(self, *args: object) -> None: ...
|
||||
|
||||
class StrangeNumberOfOverloads:
|
||||
# Only one overload? Type checkers will emit an error, but we should just ignore it
|
||||
@overload
|
||||
def __exit__(self, exc_typ: bool, exc: bool, tb: bool) -> None: ...
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
# More than two overloads? Anything could be going on; again, just ignore all the overloads
|
||||
@overload
|
||||
async def __aexit__(self, arg: bool) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, arg: None, arg2: None, arg3: None) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, arg: bool, arg2: bool, arg3: bool) -> None: ...
|
||||
async def __aexit__(self, exc_typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
|
||||
# TODO: maybe we should emit an error on this one as well?
|
||||
class BizarreAsyncSyncOverloadMismatch:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: bool, exc: bool, tb: bool) -> None: ...
|
||||
@overload
|
||||
async def __exit__(self, exc_typ: bool, exc: bool, tb: bool) -> None: ...
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
|
||||
class UnacceptableOverload1:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: None, exc: None, tb: None) -> None: ... # Okay
|
||||
@overload
|
||||
def __exit__(self, exc_typ: Exception, exc: Exception, tb: TracebackType) -> None: ... # PYI036
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
|
||||
class UnacceptableOverload2:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: None, tb: None) -> None: ... # PYI036
|
||||
@overload
|
||||
def __exit__(self, exc_typ: object, exc: Exception, tb: builtins.TracebackType) -> None: ... # PYI036
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
|
||||
@@ -3,7 +3,7 @@ import types
|
||||
import typing
|
||||
from collections.abc import Awaitable
|
||||
from types import TracebackType
|
||||
from typing import Any, Type
|
||||
from typing import Any, Type, overload
|
||||
|
||||
import _typeshed
|
||||
import typing_extensions
|
||||
@@ -73,3 +73,93 @@ class BadFive:
|
||||
class BadSix:
|
||||
def __exit__(self, typ, exc, tb, weird_extra_arg, extra_arg2 = None) -> None: ... # PYI036: Extra arg must have default
|
||||
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ... # PYI036: kwargs must have default
|
||||
|
||||
|
||||
def isolated_scope():
|
||||
from builtins import type as Type
|
||||
|
||||
class ShouldNotError:
|
||||
def __exit__(self, typ: Type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
|
||||
|
||||
class AllPositionalOnlyArgs:
|
||||
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ...
|
||||
|
||||
class BadAllPositionalOnlyArgs:
|
||||
def __exit__(self, typ: type[Exception] | None, exc: BaseException | None, tb: TracebackType | None, /) -> None: ...
|
||||
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType, /) -> None: ...
|
||||
|
||||
# Definitions not in a class scope can do whatever, we don't care
|
||||
def __exit__(self, *args: bool) -> None: ...
|
||||
async def __aexit__(self, *, go_crazy: bytes) -> list[str]: ...
|
||||
|
||||
# Here come the overloads...
|
||||
|
||||
class AcceptableOverload1:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: None, exc: None, exc_tb: None) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, exc_typ: type[BaseException], exc: BaseException, exc_tb: TracebackType) -> None: ...
|
||||
|
||||
# Using `object` or `Unused` in an overload definition is kinda strange,
|
||||
# but let's allow it to be on the safe side
|
||||
class AcceptableOverload2:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: None, exc: None, exc_tb: object) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, exc_typ: Unused, exc: BaseException, exc_tb: object) -> None: ...
|
||||
|
||||
class AcceptableOverload3:
|
||||
# Just ignore any overloads that don't have exactly 3 annotated non-self parameters.
|
||||
# We don't have the ability (yet) to do arbitrary checking
|
||||
# of whether one function definition is a subtype of another...
|
||||
@overload
|
||||
def __exit__(self, exc_typ: bool, exc: bool, exc_tb: bool, weird_extra_arg: bool) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, exc_typ: bool, /, exc: bool, exc_tb: bool, *, keyword_only: str) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, *args: object) -> None: ...
|
||||
|
||||
class AcceptableOverload4:
|
||||
# Same as above
|
||||
@overload
|
||||
def __exit__(self, exc_typ: type[Exception], exc: type[Exception], exc_tb: types.TracebackType) -> None: ...
|
||||
@overload
|
||||
def __exit__(self, *args: object) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, exc_typ: type[Exception], exc: type[Exception], exc_tb: types.TracebackType, *, extra: str = "foo") -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, exc_typ: None, exc: None, tb: None) -> None: ...
|
||||
|
||||
class StrangeNumberOfOverloads:
|
||||
# Only one overload? Type checkers will emit an error, but we should just ignore it
|
||||
@overload
|
||||
def __exit__(self, exc_typ: bool, exc: bool, tb: bool) -> None: ...
|
||||
# More than two overloads? Anything could be going on; again, just ignore all the overloads
|
||||
@overload
|
||||
async def __aexit__(self, arg: bool) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, arg: None, arg2: None, arg3: None) -> None: ...
|
||||
@overload
|
||||
async def __aexit__(self, arg: bool, arg2: bool, arg3: bool) -> None: ...
|
||||
|
||||
# TODO: maybe we should emit an error on this one as well?
|
||||
class BizarreAsyncSyncOverloadMismatch:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: bool, exc: bool, tb: bool) -> None: ...
|
||||
@overload
|
||||
async def __exit__(self, exc_typ: bool, exc: bool, tb: bool) -> None: ...
|
||||
|
||||
class UnacceptableOverload1:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: None, exc: None, tb: None) -> None: ... # Okay
|
||||
@overload
|
||||
def __exit__(self, exc_typ: Exception, exc: Exception, tb: TracebackType) -> None: ... # PYI036
|
||||
|
||||
class UnacceptableOverload2:
|
||||
@overload
|
||||
def __exit__(self, exc_typ: type[BaseException] | None, exc: None, tb: None) -> None: ... # PYI036
|
||||
@overload
|
||||
def __exit__(self, exc_typ: object, exc: Exception, tb: builtins.TracebackType) -> None: ... # PYI036
|
||||
|
||||
@@ -80,5 +80,13 @@ def test_single_list_of_lists(param):
|
||||
@pytest.mark.parametrize("a", [1, 2])
|
||||
@pytest.mark.parametrize(("b", "c"), ((3, 4), (5, 6)))
|
||||
@pytest.mark.parametrize("d", [3,])
|
||||
def test_multiple_decorators(a, b, c):
|
||||
@pytest.mark.parametrize(
|
||||
"d",
|
||||
[("3", "4")],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"e",
|
||||
[("3", "4"),],
|
||||
)
|
||||
def test_multiple_decorators(a, b, c, d, e):
|
||||
pass
|
||||
|
||||
@@ -51,3 +51,8 @@ def test_error_parentheses_trailing_comma(x):
|
||||
@pytest.mark.parametrize("x", [1, 2])
|
||||
def test_ok(x):
|
||||
...
|
||||
|
||||
|
||||
@pytest.mark.parametrize('data, spec', [(1.0, 1.0), (1.0, 1.0)])
|
||||
def test_numbers(data, spec):
|
||||
...
|
||||
|
||||
7
crates/ruff_linter/resources/test/fixtures/flake8_quotes/doubles_all.py
vendored
Normal file
7
crates/ruff_linter/resources/test/fixtures/flake8_quotes/doubles_all.py
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
"""This is a docstring."""
|
||||
|
||||
this_is_an_inline_string = "double quote string"
|
||||
|
||||
this_is_a_multiline_string = """
|
||||
double quote string
|
||||
"""
|
||||
@@ -2,3 +2,8 @@ this_should_be_linted = 'single quote string'
|
||||
this_should_be_linted = u'double quote string'
|
||||
this_should_be_linted = f'double quote string'
|
||||
this_should_be_linted = f'double {"quote"} string'
|
||||
|
||||
# https://github.com/astral-sh/ruff/issues/10546
|
||||
x: "Literal['foo', 'bar']"
|
||||
# https://github.com/astral-sh/ruff/issues/10761
|
||||
f"Before {f'x {x}' if y else f'foo {z}'} after"
|
||||
|
||||
@@ -406,3 +406,18 @@ def foo():
|
||||
with contextlib.suppress(Exception):
|
||||
y = 2
|
||||
return y
|
||||
|
||||
|
||||
# See: https://github.com/astral-sh/ruff/issues/10732
|
||||
def func(a: dict[str, int]) -> list[dict[str, int]]:
|
||||
services: list[dict[str, int]]
|
||||
if "services" in a:
|
||||
services = a["services"]
|
||||
return services
|
||||
|
||||
|
||||
# See: https://github.com/astral-sh/ruff/issues/10732
|
||||
def func(a: dict[str, int]) -> list[dict[str, int]]:
|
||||
if "services" in a:
|
||||
services = a["services"]
|
||||
return services
|
||||
|
||||
@@ -52,32 +52,32 @@ def f():
|
||||
return False
|
||||
|
||||
|
||||
def f():
|
||||
# SIM103
|
||||
if a:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def f():
|
||||
# OK
|
||||
if a:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def f():
|
||||
# OK
|
||||
if a:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def f():
|
||||
# SIM103 (but not fixable)
|
||||
if a:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def f():
|
||||
# OK
|
||||
if a:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def f():
|
||||
# OK
|
||||
if a:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def f():
|
||||
# OK
|
||||
def bool():
|
||||
return False
|
||||
if a:
|
||||
@@ -86,6 +86,14 @@ def f():
|
||||
return False
|
||||
|
||||
|
||||
def f():
|
||||
# SIM103
|
||||
if keys is not None and notice.key not in keys:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
###
|
||||
# Positive cases (preview)
|
||||
###
|
||||
|
||||
@@ -58,3 +58,8 @@ for key in (
|
||||
.keys()
|
||||
):
|
||||
continue
|
||||
|
||||
from builtins import dict as SneakyDict
|
||||
|
||||
d = SneakyDict()
|
||||
key in d.keys() # SIM118
|
||||
|
||||
@@ -21,3 +21,5 @@ for k, v in zip(d2.keys(), d2.values()): # SIM911
|
||||
...
|
||||
|
||||
items = zip(x.keys(), x.values()) # OK
|
||||
|
||||
items.bar = zip(x.keys(), x.values()) # OK
|
||||
|
||||
@@ -11,3 +11,11 @@ from enum import Enum
|
||||
|
||||
class Fine(str, Enum): # Ok
|
||||
__slots__ = ["foo"]
|
||||
|
||||
|
||||
class SubEnum(Enum):
|
||||
pass
|
||||
|
||||
|
||||
class Ok(str, SubEnum): # Ok
|
||||
pass
|
||||
|
||||
@@ -19,3 +19,9 @@ class Bad(Tuple[str, int, float]): # SLOT001
|
||||
|
||||
class Good(Tuple[str, int, float]): # OK
|
||||
__slots__ = ("foo",)
|
||||
|
||||
|
||||
import builtins
|
||||
|
||||
class AlsoBad(builtins.tuple[int, int]): # SLOT001
|
||||
pass
|
||||
|
||||
@@ -6,6 +6,14 @@ class Bad(namedtuple("foo", ["str", "int"])): # SLOT002
|
||||
pass
|
||||
|
||||
|
||||
class UnusualButStillBad(NamedTuple("foo", [("x", int, "y", int)])): # SLOT002
|
||||
pass
|
||||
|
||||
|
||||
class UnusualButOkay(NamedTuple("foo", [("x", int, "y", int)])):
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class Good(namedtuple("foo", ["str", "int"])): # OK
|
||||
__slots__ = ("foo",)
|
||||
|
||||
|
||||
@@ -3,3 +3,5 @@ import ruff
|
||||
import leading_prefix
|
||||
import os
|
||||
from . import leading_prefix
|
||||
from .. import trailing_prefix
|
||||
from ruff import check
|
||||
|
||||
6
crates/ruff_linter/resources/test/fixtures/isort/unicode.py
vendored
Normal file
6
crates/ruff_linter/resources/test/fixtures/isort/unicode.py
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
from astropy.constants import hbar as ℏ
|
||||
from numpy import pi as π
|
||||
import numpy as ℂℇℊℋℌℍℎℐℑℒℓℕℤΩℨKÅℬℭℯℰℱℹℴ
|
||||
import numpy as CƐgHHHhIILlNZΩZKÅBCeEFio
|
||||
|
||||
h = 2 * π * ℏ
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user