Compare commits

..

1 Commits

Author SHA1 Message Date
Micha Reiser
82f33db5e6 Inline NodeKey construction and avoid AnyNodeRef 2024-08-21 19:04:02 +02:00
6532 changed files with 78358 additions and 320619 deletions

View File

@@ -8,7 +8,3 @@ benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.'wasm32-unknown-unknown']
# See https://docs.rs/getrandom/latest/getrandom/#webassembly-support
rustflags = ["--cfg", 'getrandom_backend="wasm_js"']

View File

@@ -6,10 +6,3 @@ failure-output = "immediate-final"
fail-fast = false
status-level = "skip"
# Mark tests that take longer than 1s as slow.
# Terminate after 60s as a stop-gap measure to terminate on deadlock.
slow-timeout = { period = "1s", terminate-after = 60 }
# Show slow jobs in the final summary
final-status-level = "slow"

View File

@@ -20,7 +20,7 @@
"extensions": [
"ms-python.python",
"rust-lang.rust-analyzer",
"fill-labs.dependi",
"serayuzgur.crates",
"tamasfe.even-better-toml",
"Swellaby.vscode-rust-test-adapter",
"charliermarsh.ruff"

View File

@@ -17,7 +17,4 @@ indent_size = 4
trim_trailing_whitespace = false
[*.md]
max_line_length = 100
[*.toml]
indent_size = 4
max_line_length = 100

4
.gitattributes vendored
View File

@@ -14,7 +14,5 @@ crates/ruff_python_parser/resources/invalid/re_lex_logical_token_mac_eol.py text
crates/ruff_python_parser/resources/inline linguist-generated=true
ruff.schema.json -diff linguist-generated=true text=auto eol=lf
crates/ruff_python_ast/src/generated.rs -diff linguist-generated=true text=auto eol=lf
crates/ruff_python_formatter/src/generated.rs -diff linguist-generated=true text=auto eol=lf
ruff.schema.json linguist-generated=true text=auto eol=lf
*.md.snap linguist-language=Markdown

11
.github/CODEOWNERS vendored
View File

@@ -9,16 +9,13 @@
/crates/ruff_formatter/ @MichaReiser
/crates/ruff_python_formatter/ @MichaReiser
/crates/ruff_python_parser/ @MichaReiser @dhruvmanila
/crates/ruff_annotate_snippets/ @BurntSushi
# flake8-pyi
/crates/ruff_linter/src/rules/flake8_pyi/ @AlexWaygood
# Script for fuzzing the parser/red-knot etc.
/python/py-fuzzer/ @AlexWaygood
# Script for fuzzing the parser
/scripts/fuzz-parser/ @AlexWaygood
# red-knot
/crates/red_knot* @carljm @MichaReiser @AlexWaygood @sharkdp @dcreager
/crates/ruff_db/ @carljm @MichaReiser @AlexWaygood @sharkdp @dcreager
/scripts/knot_benchmark/ @carljm @MichaReiser @AlexWaygood @sharkdp @dcreager
/crates/red_knot_python_semantic @carljm @AlexWaygood @sharkdp @dcreager
/crates/red_knot* @carljm @MichaReiser @AlexWaygood
/crates/ruff_db/ @carljm @MichaReiser @AlexWaygood

12
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,12 @@
<!--
Thank you for taking the time to report an issue! We're glad to have you involved with Ruff.
If you're filing a bug report, please consider including the following information:
* List of keywords you searched for before creating this issue. Write them down here so that others can find this issue more easily and help provide feedback.
e.g. "RUF001", "unused variable", "Jupyter notebook"
* A minimal code snippet that reproduces the bug.
* The command you invoked (e.g., `ruff /path/to/file.py --fix`), ideally including the `--isolated` flag.
* The current Ruff settings (any relevant sections from your `pyproject.toml`).
* The current Ruff version (`ruff --version`).
-->

View File

@@ -1,31 +0,0 @@
name: Bug report
description: Report an error or unexpected behavior
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to report an issue! We're glad to have you involved with Ruff.
**Before reporting, please make sure to search through [existing issues](https://github.com/astral-sh/ruff/issues?q=is:issue+is:open+label:bug) (including [closed](https://github.com/astral-sh/ruff/issues?q=is:issue%20state:closed%20label:bug)).**
- type: textarea
attributes:
label: Summary
description: |
A clear and concise description of the bug, including a minimal reproducible example.
Be sure to include the command you invoked (e.g., `ruff check /path/to/file.py --fix`), ideally including the `--isolated` flag and
the current Ruff settings (e.g., relevant sections from your `pyproject.toml`).
If possible, try to include the [playground](https://play.ruff.rs) link that reproduces this issue.
validations:
required: true
- type: input
attributes:
label: Version
description: What version of ruff are you using? (see `ruff version`)
placeholder: e.g., ruff 0.9.3 (90589372d 2025-01-23)
validations:
required: false

View File

@@ -1,10 +0,0 @@
name: Rule request
description: Anything related to lint rules (proposing new rules, changes to existing rules, auto-fixes, etc.)
body:
- type: textarea
attributes:
label: Summary
description: |
A clear and concise description of the relevant request. If applicable, please describe the current behavior as well.
validations:
required: true

View File

@@ -1,18 +0,0 @@
name: Question
description: Ask a question about Ruff
labels: ["question"]
body:
- type: textarea
attributes:
label: Question
description: Describe your question in detail.
validations:
required: true
- type: input
attributes:
label: Version
description: What version of ruff are you using? (see `ruff version`)
placeholder: e.g., ruff 0.9.3 (90589372d 2025-01-23)
validations:
required: false

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: true
contact_links:
- name: Documentation
url: https://docs.astral.sh/ruff
about: Please consult the documentation before creating an issue.
- name: Community
url: https://discord.com/invite/astral-sh
about: Join our Discord community to ask questions and collaborate.

View File

@@ -1,10 +0,0 @@
# Configuration for the actionlint tool, which we run via pre-commit
# to verify the correctness of the syntax in our GitHub Actions workflows.
self-hosted-runner:
# Various runners we use that aren't recognized out-of-the-box by actionlint:
labels:
- depot-ubuntu-latest-8
- depot-ubuntu-22.04-16
- github-windows-2025-x86_64-8
- github-windows-2025-x86_64-16

View File

@@ -40,23 +40,12 @@
enabled: true,
},
packageRules: [
// Pin GitHub Actions to immutable SHAs.
{
matchDepTypes: ["action"],
pinDigests: true,
},
// Annotate GitHub Actions SHAs with a SemVer version.
{
extends: ["helpers:pinGitHubActionDigests"],
extractVersion: "^(?<version>v?\\d+\\.\\d+\\.\\d+)$",
versioning: "regex:^v?(?<major>\\d+)(\\.(?<minor>\\d+)\\.(?<patch>\\d+))?$",
},
{
// Group upload/download artifact updates, the versions are dependent
groupName: "Artifact GitHub Actions dependencies",
matchManagers: ["github-actions"],
matchDatasources: ["gitea-tags", "github-tags"],
matchPackageNames: ["actions/.*-artifact"],
matchPackagePatterns: ["actions/.*-artifact"],
description: "Weekly update of artifact-related GitHub Actions dependencies",
},
{
@@ -72,7 +61,7 @@
{
// Disable updates of `zip-rs`; intentionally pinned for now due to ownership change
// See: https://github.com/astral-sh/uv/issues/3642
matchPackageNames: ["zip"],
matchPackagePatterns: ["zip"],
matchManagers: ["cargo"],
enabled: false,
},
@@ -81,7 +70,7 @@
// with `mkdocs-material-insider`.
// See: https://squidfunk.github.io/mkdocs-material/insiders/upgrade/
matchManagers: ["pip_requirements"],
matchPackageNames: ["mkdocs-material"],
matchPackagePatterns: ["mkdocs-material"],
enabled: false,
},
{
@@ -98,15 +87,22 @@
{
groupName: "Monaco",
matchManagers: ["npm"],
matchPackageNames: ["monaco"],
matchPackagePatterns: ["monaco"],
description: "Weekly update of the Monaco editor",
},
{
groupName: "strum",
matchManagers: ["cargo"],
matchPackageNames: ["strum"],
matchPackagePatterns: ["strum"],
description: "Weekly update of strum dependencies",
}
},
{
groupName: "ESLint",
matchManagers: ["npm"],
matchPackageNames: ["eslint"],
allowedVersions: "<9",
description: "Constraint ESLint to version 8 until TypeScript-eslint supports ESLint 9", // https://github.com/typescript-eslint/typescript-eslint/issues/8211
},
],
vulnerabilityAlerts: {
commitMessageSuffix: "",

View File

@@ -23,12 +23,10 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
PYTHON_VERSION: "3.13"
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
@@ -39,52 +37,50 @@ jobs:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build sdist"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
command: sdist
args: --out dist
- name: "Test sdist"
run: |
pip install dist/"${PACKAGE_NAME}"-*.tar.gz --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload sdist"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-sdist
path: dist
macos-x86_64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
runs-on: macos-12
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - x86_64"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --locked --out dist
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-macos-x86_64
path: dist
@@ -99,7 +95,7 @@ jobs:
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-macos-x86_64
path: |
@@ -110,28 +106,27 @@ jobs:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: arm64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - aarch64"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: aarch64
args: --release --locked --out dist
- name: "Test wheel - aarch64"
run: |
pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-aarch64-apple-darwin
path: dist
@@ -146,7 +141,7 @@ jobs:
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-aarch64-apple-darwin
path: |
@@ -166,18 +161,17 @@ jobs:
- target: aarch64-pc-windows-msvc
arch: x64
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.platform.arch }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
args: --release --locked --out dist
@@ -188,11 +182,11 @@ jobs:
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
@@ -203,7 +197,7 @@ jobs:
7z a $ARCHIVE_FILE ./target/${{ matrix.platform.target }}/release/ruff.exe
sha256sum $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
path: |
@@ -219,18 +213,17 @@ jobs:
- x86_64-unknown-linux-gnu
- i686-unknown-linux-gnu
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
@@ -238,11 +231,11 @@ jobs:
- name: "Test wheel"
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.target }}
path: dist
@@ -260,7 +253,7 @@ jobs:
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.target }}
path: |
@@ -294,24 +287,23 @@ jobs:
arch: arm
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: auto
docker-options: ${{ matrix.platform.maturin_docker_options }}
args: --release --locked --out dist
- uses: uraimo/run-on-arch-action@ac33288c3728ca72563c97b8b88dda5a65a84448 # v2
if: ${{ matrix.platform.arch != 'ppc64' && matrix.platform.arch != 'ppc64le'}}
- uses: uraimo/run-on-arch-action@v2
if: matrix.platform.arch != 'ppc64'
name: Test wheel
with:
arch: ${{ matrix.platform.arch == 'arm' && 'armv6' || matrix.platform.arch }}
@@ -325,7 +317,7 @@ jobs:
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
@@ -343,7 +335,7 @@ jobs:
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
path: |
@@ -359,18 +351,17 @@ jobs:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
@@ -387,7 +378,7 @@ jobs:
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.target }}
path: dist
@@ -405,7 +396,7 @@ jobs:
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.target }}
path: |
@@ -425,23 +416,22 @@ jobs:
arch: armv7
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
docker-options: ${{ matrix.platform.maturin_docker_options }}
- uses: uraimo/run-on-arch-action@ac33288c3728ca72563c97b8b88dda5a65a84448 # v2
- uses: uraimo/run-on-arch-action@v2
name: Test wheel
with:
arch: ${{ matrix.platform.arch }}
@@ -454,7 +444,7 @@ jobs:
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
@@ -472,7 +462,7 @@ jobs:
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
path: |

View File

@@ -17,283 +17,52 @@ on:
paths:
- .github/workflows/build-docker.yml
env:
RUFF_BASE_IMG: ghcr.io/${{ github.repository_owner }}/ruff
jobs:
docker-build:
name: Build Docker image (ghcr.io/astral-sh/ruff) for ${{ matrix.platform }}
docker-publish:
name: Build Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
strategy:
fail-fast: false
matrix:
platform:
- linux/amd64
- linux/arm64
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
submodules: recursive
persist-credentials: false
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/astral-sh/ruff
- name: Check tag consistency
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
env:
TAG: ${{ inputs.plan != '' && fromJson(inputs.plan).announcement_tag || 'dry-run' }}
run: |
version=$(grep -m 1 "^version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${TAG}" != "${version}" ]; then
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ fromJson(inputs.plan).announcement_tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${TAG}" >&2
echo "${{ fromJson(inputs.plan).announcement_tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
with:
images: ${{ env.RUFF_BASE_IMG }}
# Defining this makes sure the org.opencontainers.image.version OCI label becomes the actual release version and not the branch name
tags: |
type=raw,value=dry-run,enable=${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}
type=pep440,pattern={{ version }},value=${{ inputs.plan != '' && fromJson(inputs.plan).announcement_tag || 'dry-run' }},enable=${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
- name: Normalize Platform Pair (replace / with -)
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_TUPLE=${platform//\//-}" >> "$GITHUB_ENV"
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Build and push by digest
id: build
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6
with:
context: .
platforms: ${{ matrix.platform }}
cache-from: type=gha,scope=ruff-${{ env.PLATFORM_TUPLE }}
cache-to: type=gha,mode=min,scope=ruff-${{ env.PLATFORM_TUPLE }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.RUFF_BASE_IMG }},push-by-digest=true,name-canonical=true,push=${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
- name: Export digests
env:
digest: ${{ steps.build.outputs.digest }}
run: |
mkdir -p /tmp/digests
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digests
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: digests-${{ env.PLATFORM_TUPLE }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
docker-publish:
name: Publish Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
needs:
- docker-build
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
with:
images: ${{ env.RUFF_BASE_IMG }}
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
tags: |
type=pep440,pattern={{ version }},value=${{ fromJson(inputs.plan).announcement_tag }}
type=pep440,pattern={{ major }}.{{ minor }},value=${{ fromJson(inputs.plan).announcement_tag }}
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Create manifest list and push
working-directory: /tmp/digests
# The jq command expands the docker/metadata json "tags" array entry to `-t tag1 -t tag2 ...` for each tag in the array
# The printf will expand the base image with the `<RUFF_BASE_IMG>@sha256:<sha256> ...` for each sha256 in the directory
# The final command becomes `docker buildx imagetools create -t tag1 -t tag2 ... <RUFF_BASE_IMG>@sha256:<sha256_1> <RUFF_BASE_IMG>@sha256:<sha256_2> ...`
run: |
# shellcheck disable=SC2046
docker buildx imagetools create \
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "${RUFF_BASE_IMG}@sha256:%s " *)
docker-publish-extra:
name: Publish additional Docker image based on ${{ matrix.image-mapping }}
runs-on: ubuntu-latest
environment:
name: release
needs:
- docker-publish
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
strategy:
fail-fast: false
matrix:
# Mapping of base image followed by a comma followed by one or more base tags (comma separated)
# Note, org.opencontainers.image.version label will use the first base tag (use the most specific tag first)
image-mapping:
- alpine:3.21,alpine3.21,alpine
- debian:bookworm-slim,bookworm-slim,debian-slim
- buildpack-deps:bookworm,bookworm,debian
steps:
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate Dynamic Dockerfile Tags
shell: bash
env:
TAG_VALUE: ${{ fromJson(inputs.plan).announcement_tag }}
run: |
set -euo pipefail
# Extract the image and tags from the matrix variable
IFS=',' read -r BASE_IMAGE BASE_TAGS <<< "${{ matrix.image-mapping }}"
# Generate Dockerfile content
cat <<EOF > Dockerfile
FROM ${BASE_IMAGE}
COPY --from=${RUFF_BASE_IMG}:latest /ruff /usr/local/bin/ruff
ENTRYPOINT []
CMD ["/usr/local/bin/ruff"]
EOF
# Initialize a variable to store all tag docker metadata patterns
TAG_PATTERNS=""
# Loop through all base tags and append its docker metadata pattern to the list
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
IFS=','; for TAG in ${BASE_TAGS}; do
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ version }},suffix=-${TAG},value=${TAG_VALUE}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ major }}.{{ minor }},suffix=-${TAG},value=${TAG_VALUE}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=raw,value=${TAG}\n"
done
# Remove the trailing newline from the pattern list
TAG_PATTERNS="${TAG_PATTERNS%\\n}"
# Export image cache name
echo "IMAGE_REF=${BASE_IMAGE//:/-}" >> "$GITHUB_ENV"
# Export tag patterns using the multiline env var syntax
{
echo "TAG_PATTERNS<<EOF"
echo -e "${TAG_PATTERNS}"
echo EOF
} >> "$GITHUB_ENV"
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
# ghcr.io prefers index level annotations
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: ${{ env.RUFF_BASE_IMG }}
flavor: |
latest=false
tags: |
${{ env.TAG_PATTERNS }}
- name: Build and push
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6
- name: "Build and push Docker image"
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
# We do not really need to cache here as the Dockerfile is tiny
#cache-from: type=gha,scope=ruff-${{ env.IMAGE_REF }}
#cache-to: type=gha,mode=min,scope=ruff-${{ env.IMAGE_REF }}
push: true
tags: ${{ steps.meta.outputs.tags }}
# Reuse the builder
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
tags: ghcr.io/astral-sh/ruff:latest,ghcr.io/astral-sh/ruff:${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || 'dry-run' }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
# This is effectively a duplicate of `docker-publish` to make https://github.com/astral-sh/ruff/pkgs/container/ruff
# show the ruff base image first since GitHub always shows the last updated image digests
# This works by annotating the original digests (previously non-annotated) which triggers an update to ghcr.io
docker-republish:
name: Annotate Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
needs:
- docker-publish-extra
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: ${{ env.RUFF_BASE_IMG }}
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
tags: |
type=pep440,pattern={{ version }},value=${{ fromJson(inputs.plan).announcement_tag }}
type=pep440,pattern={{ major }}.{{ minor }},value=${{ fromJson(inputs.plan).announcement_tag }}
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Create manifest list and push
working-directory: /tmp/digests
# The readarray part is used to make sure the quoting and special characters are preserved on expansion (e.g. spaces)
# The jq command expands the docker/metadata json "tags" array entry to `-t tag1 -t tag2 ...` for each tag in the array
# The printf will expand the base image with the `<RUFF_BASE_IMG>@sha256:<sha256> ...` for each sha256 in the directory
# The final command becomes `docker buildx imagetools create -t tag1 -t tag2 ... <RUFF_BASE_IMG>@sha256:<sha256_1> <RUFF_BASE_IMG>@sha256:<sha256_2> ...`
run: |
readarray -t lines <<< "$DOCKER_METADATA_OUTPUT_ANNOTATIONS"; annotations=(); for line in "${lines[@]}"; do annotations+=(--annotation "$line"); done
# shellcheck disable=SC2046
docker buildx imagetools create \
"${annotations[@]}" \
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "${RUFF_BASE_IMG}@sha256:%s " *)

View File

@@ -1,7 +1,5 @@
name: CI
permissions: {}
on:
push:
branches: [main]
@@ -18,7 +16,7 @@ env:
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.13"
PYTHON_VERSION: "3.11"
jobs:
determine_changes:
@@ -26,179 +24,73 @@ jobs:
runs-on: ubuntu-latest
outputs:
# Flag that is raised when any code that affects parser is changed
parser: ${{ steps.check_parser.outputs.changed }}
parser: ${{ steps.changed.outputs.parser_any_changed }}
# Flag that is raised when any code that affects linter is changed
linter: ${{ steps.check_linter.outputs.changed }}
linter: ${{ steps.changed.outputs.linter_any_changed }}
# Flag that is raised when any code that affects formatter is changed
formatter: ${{ steps.check_formatter.outputs.changed }}
formatter: ${{ steps.changed.outputs.formatter_any_changed }}
# Flag that is raised when any code is changed
# This is superset of the linter and formatter
code: ${{ steps.check_code.outputs.changed }}
# Flag that is raised when any code that affects the fuzzer is changed
fuzz: ${{ steps.check_fuzzer.outputs.changed }}
# Flag that is set to "true" when code related to red-knot changes.
red_knot: ${{ steps.check_red_knot.outputs.changed }}
# Flag that is set to "true" when code related to the playground changes.
playground: ${{ steps.check_playground.outputs.changed }}
code: ${{ steps.changed.outputs.code_any_changed }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Determine merge base
id: merge_base
env:
BASE_REF: ${{ github.event.pull_request.base.ref || 'main' }}
run: |
sha=$(git merge-base HEAD "origin/${BASE_REF}")
echo "sha=${sha}" >> "$GITHUB_OUTPUT"
- uses: tj-actions/changed-files@v44
id: changed
with:
files_yaml: |
parser:
- Cargo.toml
- Cargo.lock
- crates/ruff_python_trivia/**
- crates/ruff_source_file/**
- crates/ruff_text_size/**
- crates/ruff_python_ast/**
- crates/ruff_python_parser/**
- scripts/fuzz-parser/**
- .github/workflows/ci.yaml
- name: Check if the parser code changed
id: check_parser
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
':Cargo.toml' \
':Cargo.lock' \
':crates/ruff_python_trivia/**' \
':crates/ruff_source_file/**' \
':crates/ruff_text_size/**' \
':crates/ruff_python_ast/**' \
':crates/ruff_python_parser/**' \
':python/py-fuzzer/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
linter:
- Cargo.toml
- Cargo.lock
- crates/**
- "!crates/ruff_python_formatter/**"
- "!crates/ruff_formatter/**"
- "!crates/ruff_dev/**"
- scripts/*
- python/**
- .github/workflows/ci.yaml
- name: Check if the linter code changed
id: check_linter
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':Cargo.toml' \
':Cargo.lock' \
':crates/**' \
':!crates/red_knot*/**' \
':!crates/ruff_python_formatter/**' \
':!crates/ruff_formatter/**' \
':!crates/ruff_dev/**' \
':!crates/ruff_db/**' \
':scripts/*' \
':python/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
formatter:
- Cargo.toml
- Cargo.lock
- crates/ruff_python_formatter/**
- crates/ruff_formatter/**
- crates/ruff_python_trivia/**
- crates/ruff_python_ast/**
- crates/ruff_source_file/**
- crates/ruff_python_index/**
- crates/ruff_text_size/**
- crates/ruff_python_parser/**
- crates/ruff_dev/**
- scripts/*
- python/**
- .github/workflows/ci.yaml
- name: Check if the formatter code changed
id: check_formatter
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':Cargo.toml' \
':Cargo.lock' \
':crates/ruff_python_formatter/**' \
':crates/ruff_formatter/**' \
':crates/ruff_python_trivia/**' \
':crates/ruff_python_ast/**' \
':crates/ruff_source_file/**' \
':crates/ruff_python_index/**' \
':crates/ruff_python_index/**' \
':crates/ruff_text_size/**' \
':crates/ruff_python_parser/**' \
':scripts/*' \
':python/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if the fuzzer code changed
id: check_fuzzer
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':Cargo.toml' \
':Cargo.lock' \
':fuzz/fuzz_targets/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if there was any code related change
id: check_code
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':**' \
':!**/*.md' \
':crates/red_knot_python_semantic/resources/mdtest/**/*.md' \
':!docs/**' \
':!assets/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if there was any playground related change
id: check_playground
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
':playground/**' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if the red-knot code changed
id: check_red_knot
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
':Cargo.toml' \
':Cargo.lock' \
':crates/red_knot*/**' \
':crates/ruff_db/**' \
':crates/ruff_annotate_snippets/**' \
':crates/ruff_python_ast/**' \
':crates/ruff_python_parser/**' \
':crates/ruff_python_trivia/**' \
':crates/ruff_source_file/**' \
':crates/ruff_text_size/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
code:
- "**/*"
- "!**/*.md"
- "!docs/**"
- "!assets/**"
cargo-fmt:
name: "cargo fmt"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup component add rustfmt
- run: cargo fmt --all --check
@@ -210,14 +102,12 @@ jobs:
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: |
rustup component add clippy
rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features --locked -- -D warnings
- name: "Clippy (wasm)"
@@ -225,35 +115,25 @@ jobs:
cargo-test-linux:
name: "cargo test (linux)"
runs-on: depot-ubuntu-22.04-16
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- name: Red-knot mdtests (GitHub annotations)
if: ${{ needs.determine_changes.outputs.red_knot == 'true' }}
env:
NO_COLOR: 1
MDTEST_GITHUB_ANNOTATIONS_FORMAT: 1
# Ignore errors if this step fails; we want to continue to later steps in the workflow anyway.
# This step is just to get nice GitHub annotations on the PR diff in the files-changed tab.
run: cargo test -p red_knot_python_semantic --test mdtest || true
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
@@ -268,65 +148,33 @@ jobs:
# sync, not just public items. Eventually we should do this for all
# crates; for now add crates here as they are warning-clean to prevent
# regression.
- run: cargo doc --no-deps -p red_knot_python_semantic -p red_knot -p red_knot_test -p ruff_db --document-private-items
- run: cargo doc --no-deps -p red_knot_python_semantic -p red_knot -p ruff_db --document-private-items
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@v4
with:
name: ruff
path: target/debug/ruff
cargo-test-linux-release:
name: "cargo test (linux, release)"
runs-on: depot-ubuntu-22.04-16
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
with:
tool: cargo-insta
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo insta test --release --all-features --unreferenced reject --test-runner nextest
cargo-test-windows:
name: "cargo test (windows)"
runs-on: github-windows-2025-x86_64-16
runs-on: windows-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo nextest"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
# Workaround for <https://github.com/nextest-rs/nextest/issues/1493>.
RUSTUP_WINDOWS_PATH_ADD_BIN: 1
run: |
@@ -337,23 +185,19 @@ jobs:
name: "cargo test (wasm)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
- uses: actions/setup-node@v4
with:
node-version: 20
node-version: 18
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa # v0.4.0
with:
version: v0.13.1
- uses: jetli/wasm-pack-action@v0.4.0
- uses: Swatinem/rust-cache@v2
- name: "Test ruff_wasm"
run: |
cd crates/ruff_wasm
@@ -366,72 +210,64 @@ jobs:
cargo-build-release:
name: "cargo build (release)"
runs-on: macos-latest
if: ${{ github.ref == 'refs/heads/main' }}
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: "Build"
run: cargo build --release --locked
cargo-build-msrv:
name: "cargo build (msrv)"
runs-on: depot-ubuntu-latest-8
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: SebRollen/toml-action@b1b3628f55fc3a28208d4203ada8b737e9687876 # v1.2.0
- uses: actions/checkout@v4
- uses: SebRollen/toml-action@v1.2.0
id: msrv
with:
file: "Cargo.toml"
field: "workspace.package.rust-version"
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
env:
MSRV: ${{ steps.msrv.outputs.value }}
run: rustup default "${MSRV}"
run: rustup default ${{ steps.msrv.outputs.value }}
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
MSRV: ${{ steps.msrv.outputs.value }}
run: cargo "+${MSRV}" insta test --all-features --unreferenced reject --test-runner nextest
run: cargo +${{ steps.msrv.outputs.value }} insta test --all-features --unreferenced reject --test-runner nextest
cargo-fuzz-build:
name: "cargo fuzz build"
cargo-fuzz:
name: "cargo fuzz"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ github.ref == 'refs/heads/main' || needs.determine_changes.outputs.fuzz == 'true' || needs.determine_changes.outputs.code == 'true' }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:
workspaces: "fuzz -> target"
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@main
with:
@@ -442,63 +278,49 @@ jobs:
- run: cargo fuzz build -s none
fuzz-parser:
name: "fuzz parser"
name: "Fuzz the parser"
runs-on: ubuntu-latest
needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && needs.determine_changes.outputs.parser == 'true' }}
if: ${{ needs.determine_changes.outputs.parser == 'true' }}
timeout-minutes: 20
env:
FORCE_COLOR: 1
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
persist-credentials: false
- uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install Python requirements
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
- uses: actions/download-artifact@v4
name: Download Ruff binary to test
id: download-cached-binary
with:
name: ruff
path: ruff-to-test
- name: Fuzz
env:
DOWNLOAD_PATH: ${{ steps.download-cached-binary.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x "${DOWNLOAD_PATH}/ruff"
chmod +x ${{ steps.download-cached-binary.outputs.download-path }}/ruff
(
uvx \
--python="${PYTHON_VERSION}" \
--from=./python/py-fuzzer \
fuzz \
--test-executable="${DOWNLOAD_PATH}/ruff" \
--bin=ruff \
0-500
)
python scripts/fuzz-parser/fuzz.py 0-500 --test-executable ${{ steps.download-cached-binary.outputs.download-path }}/ruff
scripts:
name: "test scripts"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 5
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup component add rustfmt
# Run all code generation scripts, and verify that the current output is
# already checked into git.
- run: python crates/ruff_python_ast/generate.py
- run: python crates/ruff_python_formatter/generate.py
- run: test -z "$(git status --porcelain)"
# Verify that adding a plugin or rule produces clean code.
- run: ./scripts/add_rule.py --name DoTheThing --prefix F --code 999 --linter pyflakes
- uses: Swatinem/rust-cache@v2
- run: ./scripts/add_rule.py --name DoTheThing --prefix PL --code C0999 --linter pylint
- run: cargo check
- run: cargo fmt --all --check
- run: |
@@ -509,30 +331,28 @@ jobs:
ecosystem:
name: "ecosystem"
runs-on: depot-ubuntu-latest-8
runs-on: ubuntu-latest
needs:
- cargo-test-linux
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
if: ${{ github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
- uses: actions/download-artifact@v4
name: Download comparison Ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
- uses: dawidd6/action-download-artifact@v6
name: Download baseline Ruff binary
with:
name: ruff
@@ -546,72 +366,64 @@ jobs:
- name: Run `ruff check` stable ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-check-stable > $GITHUB_STEP_SUMMARY
echo "### Linter (stable)" > ecosystem-result
cat ecosystem-result-check-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff check` preview ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-check-preview > $GITHUB_STEP_SUMMARY
echo "### Linter (preview)" >> ecosystem-result
cat ecosystem-result-check-preview >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` stable ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-format-stable > $GITHUB_STEP_SUMMARY
echo "### Formatter (stable)" >> ecosystem-result
cat ecosystem-result-format-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` preview ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-format-preview > $GITHUB_STEP_SUMMARY
echo "### Formatter (preview)" >> ecosystem-result
cat ecosystem-result-format-preview >> ecosystem-result
echo "" >> ecosystem-result
@@ -620,13 +432,13 @@ jobs:
run: |
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@v4
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@v4
name: Upload Results
with:
name: ecosystem-result
@@ -638,9 +450,7 @@ jobs:
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/checkout@v4
- uses: cargo-bins/cargo-binstall@main
- run: cargo binstall --no-confirm cargo-shear
- run: cargo shear
@@ -649,25 +459,22 @@ jobs:
name: "python package"
runs-on: ubuntu-latest
timeout-minutes: 20
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: Swatinem/rust-cache@v2
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
uses: PyO3/maturin-action@v1
with:
args: --out dist
- name: "Test wheel"
run: |
pip install --force-reinstall --find-links dist "${PACKAGE_NAME}"
pip install --force-reinstall --find-links dist ${{ env.PACKAGE_NAME }}
ruff --help
python -m ruff --help
- name: "Remove wheels from cache"
@@ -675,28 +482,32 @@ jobs:
pre-commit:
name: "pre-commit"
runs-on: depot-ubuntu-22.04-16
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
persist-credentials: false
- uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
python-version: ${{ env.PYTHON_VERSION }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install pre-commit"
run: pip install pre-commit
- name: "Cache pre-commit"
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
uses: actions/cache@v4
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: "Run pre-commit"
run: |
echo '```console' > "$GITHUB_STEP_SUMMARY"
echo '```console' > $GITHUB_STEP_SUMMARY
# Enable color output for pre-commit and remove it for the summary
# Use --hook-stage=manual to enable slower pre-commit hooks that are skipped by default
SKIP=cargo-fmt,clippy,dev-generate-all uvx --python="${PYTHON_VERSION}" pre-commit run --all-files --show-diff-on-failure --color=always --hook-stage=manual | \
tee >(sed -E 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[mGK]//g' >> "$GITHUB_STEP_SUMMARY") >&1
exit_code="${PIPESTATUS[0]}"
echo '```' >> "$GITHUB_STEP_SUMMARY"
exit "$exit_code"
SKIP=cargo-fmt,clippy,dev-generate-all pre-commit run --all-files --show-diff-on-failure --color=always | \
tee >(sed -E 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[mGK]//g' >> $GITHUB_STEP_SUMMARY) >&1
exit_code=${PIPESTATUS[0]}
echo '```' >> $GITHUB_STEP_SUMMARY
exit $exit_code
docs:
name: "mkdocs"
@@ -705,28 +516,22 @@ jobs:
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.13"
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- name: Install uv
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: uv pip install -r docs/requirements-insiders.txt --system
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: uv pip install -r docs/requirements.txt --system
run: pip install -r docs/requirements.txt
- name: "Update README File"
run: python scripts/transform_readme.py --target mkdocs
- name: "Generate docs"
@@ -744,21 +549,20 @@ jobs:
name: "formatter instabilities and black similarity"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main') }}
if: needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main'
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Run checks"
- name: "Cache rust"
uses: Swatinem/rust-cache@v2
- name: "Formatter progress"
run: scripts/formatter_ecosystem_checks.sh
- name: "Github step summary"
run: cat target/formatter-ecosystem/stats.txt > "$GITHUB_STEP_SUMMARY"
run: cat target/progress_projects_stats.txt > $GITHUB_STEP_SUMMARY
- name: "Remove checkouts from cache"
run: rm -r target/formatter-ecosystem
run: rm -r target/progress_projects
check-ruff-lsp:
name: "test ruff-lsp"
@@ -767,24 +571,22 @@ jobs:
needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
steps:
- uses: extractions/setup-just@dd310ad5a97d8e7b41793f8ef055398d51ad4de6 # v2
- uses: extractions/setup-just@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
name: "Download ruff-lsp source"
with:
persist-credentials: false
repository: "astral-sh/ruff-lsp"
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
# installation fails on 3.13 and newer
python-version: "3.12"
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
- uses: actions/download-artifact@v4
name: Download development ruff binary
id: ruff-target
with:
@@ -796,76 +598,39 @@ jobs:
just install
- name: Run ruff-lsp tests
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Setup development binary
pip uninstall --yes ruff
chmod +x "${DOWNLOAD_PATH}/ruff"
export PATH="${DOWNLOAD_PATH}:${PATH}"
chmod +x ${{ steps.ruff-target.outputs.download-path }}/ruff
export PATH=${{ steps.ruff-target.outputs.download-path }}:$PATH
ruff version
just test
check-playground:
name: "check playground"
runs-on: ubuntu-latest
timeout-minutes: 5
needs:
- determine_changes
if: ${{ (needs.determine_changes.outputs.playground == 'true') }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 22
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Build playgrounds"
run: npm run dev:wasm
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Check formatting"
run: npm run fmt:check
working-directory: playground
benchmarks:
runs-on: ubuntu-24.04
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ github.repository == 'astral-sh/ruff' && !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ github.repository == 'astral-sh/ruff' && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- name: "Checkout Branch"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install codspeed"
uses: taiki-e/install-action@be7c31b6745feec79dec5eb79178466c0670bb2d # v2
uses: taiki-e/install-action@v2
with:
tool: cargo-codspeed
- uses: Swatinem/rust-cache@v2
- name: "Build benchmarks"
run: cargo codspeed build --features codspeed -p ruff_benchmark
- name: "Run benchmarks"
uses: CodSpeedHQ/action@0010eb0ca6e89b80c88e8edaaa07cfe5f3e6664d # v3.5.0
uses: CodSpeedHQ/action@v3
with:
run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}

View File

@@ -31,31 +31,25 @@ jobs:
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
persist-credentials: false
- uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
python-version: "3.12"
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install Python requirements
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: Swatinem/rust-cache@v2
- name: Build ruff
# A debug build means the script runs slower once it gets started,
# but this is outweighed by the fact that a release build takes *much* longer to compile in CI
run: cargo build --locked
- name: Fuzz
run: |
# shellcheck disable=SC2046
(
uvx \
--python=3.12 \
--from=./python/py-fuzzer \
fuzz \
--test-executable=target/debug/ruff \
--bin=ruff \
$(shuf -i 0-9999999999999999999 -n 1000)
)
run: python scripts/fuzz-parser/fuzz.py $(shuf -i 0-9999999999999999999 -n 1000) --test-executable target/debug/ruff
create-issue-on-failure:
name: Create an issue if the daily fuzz surfaced any bugs
@@ -65,7 +59,7 @@ jobs:
permissions:
issues: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -73,6 +67,6 @@ jobs:
owner: "astral-sh",
repo: "ruff",
title: `Daily parser fuzz failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
body: "Runs listed here: https://github.com/astral-sh/ruff/actions/workflows/daily_fuzz.yml",
labels: ["bug", "parser", "fuzzer"],
})

View File

@@ -1,72 +0,0 @@
name: Daily property test run
on:
workflow_dispatch:
schedule:
- cron: "0 12 * * *"
pull_request:
paths:
- ".github/workflows/daily_property_tests.yaml"
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
FORCE_COLOR: 1
jobs:
property_tests:
name: Property tests
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Build Red Knot
# A release build takes longer (2 min vs 1 min), but the property tests run much faster in release
# mode (1.5 min vs 14 min), so the overall time is shorter with a release build.
run: cargo build --locked --release --package red_knot_python_semantic --tests
- name: Run property tests
shell: bash
run: |
export QUICKCHECK_TESTS=100000
for _ in {1..5}; do
cargo test --locked --release --package red_knot_python_semantic -- --ignored list::property_tests
cargo test --locked --release --package red_knot_python_semantic -- --ignored types::property_tests::stable
done
create-issue-on-failure:
name: Create an issue if the daily property test run surfaced any bugs
runs-on: ubuntu-latest
needs: property_tests
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.property_tests.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Daily property test run failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "red-knot", "testing"],
})

View File

@@ -1,93 +0,0 @@
name: Run mypy_primer
permissions: {}
on:
pull_request:
paths:
- "crates/red_knot*/**"
- "crates/ruff_db"
- "crates/ruff_python_ast"
- "crates/ruff_python_parser"
- ".github/workflows/mypy_primer.yaml"
- ".github/workflows/mypy_primer_comment.yaml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
mypy_primer:
name: Run mypy_primer
runs-on: depot-ubuntu-22.04-16
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: ruff
fetch-depth: 0
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:
workspaces: "ruff"
- name: Install Rust toolchain
run: rustup show
- name: Install mypy_primer
run: |
uv tool install "git+https://github.com/astral-sh/mypy_primer.git@add-red-knot-support-v5"
- name: Run mypy_primer
shell: bash
run: |
cd ruff
echo "new commit"
git rev-list --format=%s --max-count=1 "$GITHUB_SHA"
MERGE_BASE="$(git merge-base "$GITHUB_SHA" "origin/$GITHUB_BASE_REF")"
git checkout -b base_commit "$MERGE_BASE"
echo "base commit"
git rev-list --format=%s --max-count=1 base_commit
cd ..
# Allow the exit code to be 0 or 1, only fail for actual mypy_primer crashes/bugs
uvx mypy_primer \
--repo ruff \
--type-checker knot \
--old base_commit \
--new "$GITHUB_SHA" \
--project-selector '/(mypy_primer|black|pyp|git-revise|zipp|arrow|isort|itsdangerous|rich|packaging|pybind11|pyinstrument|typeshed-stats|scrapy|werkzeug|bidict|async-utils|python-chess|dacite|python-htmlgen|paroxython|porcupine|psycopg)$' \
--output concise \
--debug > mypy_primer.diff || [ $? -eq 1 ]
# Output diff with ANSI color codes
cat mypy_primer.diff
# Remove ANSI color codes before uploading
sed -ie 's/\x1b\[[0-9;]*m//g' mypy_primer.diff
echo ${{ github.event.number }} > pr-number
- name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: mypy_primer_diff
path: mypy_primer.diff
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number

View File

@@ -1,97 +0,0 @@
name: PR comment (mypy_primer)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run mypy_primer]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The mypy_primer workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download mypy_primer results"
id: download-mypy_primer_diff
if: steps.pr-number.outputs.pr-number
with:
name: mypy_primer_diff
workflow: mypy_primer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/mypy_primer_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-mypy_primer_diff.outputs.found_artifact == 'true'
run: |
# Guard against malicious mypy_primer results that symlink to a secret
# file on this runner
if [[ -L pr/mypy_primer_diff/mypy_primer.diff ]]
then
echo "Error: mypy_primer.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment mypy_primer -->' >> comment.txt
echo '## `mypy_primer` results' >> comment.txt
if [ -s "pr/mypy_primer_diff/mypy_primer.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running on open source projects</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/mypy_primer_diff/mypy_primer.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No ecosystem changes detected ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment mypy_primer -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
uses: actions/github-script@v7
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |

View File

@@ -10,13 +10,14 @@ on:
description: The ecosystem workflow that triggers the workflow run
required: true
permissions:
pull-requests: write
jobs:
comment:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
- uses: dawidd6/action-download-artifact@v6
name: Download pull request number
with:
name: pr-number
@@ -29,10 +30,10 @@ jobs:
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
- uses: dawidd6/action-download-artifact@v6
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
@@ -65,12 +66,12 @@ jobs:
cat pr/ecosystem/ecosystem-result >> comment.txt
echo "" >> comment.txt
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
echo 'comment<<EOF' >> $GITHUB_OUTPUT
cat comment.txt >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
uses: peter-evans/find-comment@v3
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
@@ -80,7 +81,7 @@ jobs:
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}

View File

@@ -23,52 +23,52 @@ jobs:
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
persist-credentials: true
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
- uses: actions/setup-python@v5
with:
python-version: 3.12
- name: "Set docs version"
env:
version: ${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}
run: |
# if version is missing, use 'latest'
if [ -z "$version" ]; then
echo "Using 'latest' as version"
version="latest"
version="${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}"
# if version is missing, exit with error
if [[ -z "$version" ]]; then
echo "Can't build docs without a version."
exit 1
fi
# Use version as display name for now
display_name="$version"
echo "version=$version" >> "$GITHUB_ENV"
echo "display_name=$display_name" >> "$GITHUB_ENV"
echo "version=$version" >> $GITHUB_ENV
echo "display_name=$display_name" >> $GITHUB_ENV
- name: "Set branch name"
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
timestamp="$(date +%s)"
# create branch_display_name from display_name by replacing all
# characters disallowed in git branch names with hyphens
branch_display_name="$(echo "${display_name}" | tr -c '[:alnum:]._' '-' | tr -s '-')"
branch_display_name="$(echo "$display_name" | tr -c '[:alnum:]._' '-' | tr -s '-')"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> "$GITHUB_ENV"
echo "timestamp=$timestamp" >> "$GITHUB_ENV"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> $GITHUB_ENV
echo "timestamp=$timestamp" >> $GITHUB_ENV
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
@@ -92,7 +92,9 @@ jobs:
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo"
run: git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
run: |
version="${{ env.version }}"
git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
- name: "Copy docs"
run: rm -rf astral-docs/site/ruff && mkdir -p astral-docs/site && cp -r site/ruff astral-docs/site/
@@ -100,10 +102,12 @@ jobs:
- name: "Commit docs"
working-directory: astral-docs
run: |
branch_name="${{ env.branch_name }}"
git config user.name "astral-docs-bot"
git config user.email "176161322+astral-docs-bot@users.noreply.github.com"
git checkout -b "${branch_name}"
git checkout -b $branch_name
git add site/ruff
git commit -m "Update ruff documentation for $version"
@@ -112,8 +116,12 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
branch_name="${{ env.branch_name }}"
# set the PR title
pull_request_title="Update ruff documentation for ${display_name}"
pull_request_title="Update ruff documentation for $display_name"
# Delete any existing pull requests that are open for this version
# by checking against pull_request_title because the new PR will
@@ -122,15 +130,13 @@ jobs:
xargs -I {} gh pr close {}
# push the branch to GitHub
git push origin "${branch_name}"
git push origin $branch_name
# create the PR
gh pr create \
--base=main \
--head="${branch_name}" \
--title="${pull_request_title}" \
--body="Automated documentation update for ${display_name}" \
--label="documentation"
gh pr create --base main --head $branch_name \
--title "$pull_request_title" \
--body "Automated documentation update for $display_name" \
--label "documentation"
- name: "Merge Pull Request"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
@@ -138,7 +144,8 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
branch_name="${{ env.branch_name }}"
# auto-merge the PR if the build was triggered by a release. Manual builds should be reviewed by a human.
# give the PR a few seconds to be created before trying to auto-merge it
sleep 10
gh pr merge --squash "${branch_name}"
gh pr merge --squash $branch_name

View File

@@ -1,58 +0,0 @@
# Publish the Red Knot playground.
name: "[Knot Playground] Release"
permissions: {}
on:
push:
branches: [main]
paths:
- "crates/red_knot*/**"
- "crates/ruff_db/**"
- "crates/ruff_python_ast/**"
- "crates/ruff_python_parser/**"
- "playground/**"
- ".github/workflows/publish-knot-playground.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
publish:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 22
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Build Knot playground"
run: npm run build --workspace knot-playground
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65 # v3.14.1
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy playground/knot/dist --project-name=knot-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -24,31 +24,32 @@ jobs:
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
- uses: actions/setup-node@v4
with:
node-version: 22
node-version: 18
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack"
run: wasm-pack build --target web --out-dir ../../playground/src/pkg crates/ruff_wasm
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Build Ruff playground"
run: npm run build --workspace ruff-playground
- name: "Build JavaScript bundle"
run: npm run build
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65 # v3.14.1
uses: cloudflare/wrangler-action@v3.7.0
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy playground/ruff/dist --project-name=ruff-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}
command: pages deploy playground/dist --project-name=ruff-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -21,12 +21,14 @@ jobs:
# For PyPI's trusted publishing.
id-token: write
steps:
- name: "Install uv"
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
- uses: actions/download-artifact@v4
with:
pattern: wheels-*
path: wheels
merge-multiple: true
- name: Publish to PyPi
run: uv publish -v wheels/*
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true

View File

@@ -29,15 +29,11 @@ jobs:
target: [web, bundler, nodejs]
fail-fast: false
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa # v0.4.0
with:
version: v0.13.1
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack build"
run: wasm-pack build --target ${{ matrix.target }} crates/ruff_wasm
- name: "Rename generated package"
@@ -45,9 +41,9 @@ jobs:
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
- uses: actions/setup-node@v4
with:
node-version: 20
node-version: 18
registry-url: "https://registry.npmjs.org"
- name: "Publish (dry-run)"
if: ${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}

View File

@@ -1,13 +1,10 @@
# This file was autogenerated by dist: https://github.com/astral-sh/cargo-dist
#
# Copyright 2022-2024, axodotdev
# Copyright 2025 Astral Software Inc.
# SPDX-License-Identifier: MIT or Apache-2.0
#
# CI that:
#
# * checks for a Git Tag that looks like a release
# * builds artifacts with dist (archives, installers, hashes)
# * builds artifacts with cargo-dist (archives, installers, hashes)
# * uploads those artifacts to temporary workflow zip
# * on success, uploads the artifacts to a GitHub Release
#
@@ -25,10 +22,10 @@ permissions:
# must be a Cargo-style SemVer Version (must have at least major.minor.patch).
#
# If PACKAGE_NAME is specified, then the announcement will be for that
# package (erroring out if it doesn't have the given version or isn't dist-able).
# package (erroring out if it doesn't have the given version or isn't cargo-dist-able).
#
# If PACKAGE_NAME isn't specified, then the announcement will be for all
# (dist-able) packages in the workspace with that version (this mode is
# (cargo-dist-able) packages in the workspace with that version (this mode is
# intended for workspaces with only one dist-able package, or with all dist-able
# packages versioned/released in lockstep).
#
@@ -49,9 +46,9 @@ on:
type: string
jobs:
# Run 'dist plan' (or host) to determine what tasks we need to do
# Run 'cargo dist plan' (or host) to determine what tasks we need to do
plan:
runs-on: "depot-ubuntu-latest-4"
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.plan.outputs.manifest }}
tag: ${{ (inputs.tag != 'dry-run' && inputs.tag) || '' }}
@@ -60,20 +57,19 @@ jobs:
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install dist
- name: Install cargo-dist
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/cargo-dist/releases/download/v0.28.4-prerelease.1/cargo-dist-installer.sh | sh"
- name: Cache dist
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.18.0/cargo-dist-installer.sh | sh"
- name: Cache cargo-dist
uses: actions/upload-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/dist
path: ~/.cargo/bin/cargo-dist
# sure would be cool if github gave us proper conditionals...
# so here's a doubly-nested ternary-via-truthiness to try to provide the best possible
# functionality based on whether this is a pull_request, and whether it's from a fork.
@@ -81,12 +77,12 @@ jobs:
# but also really annoying to build CI around when it needs secrets to work right.)
- id: plan
run: |
dist ${{ (inputs.tag && inputs.tag != 'dry-run' && format('host --steps=create --tag={0}', inputs.tag)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "dist ran successfully"
cargo dist ${{ (inputs.tag && inputs.tag != 'dry-run' && format('host --steps=create --tag={0}', inputs.tag)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "cargo dist ran successfully"
cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@v4
with:
name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json
@@ -118,24 +114,23 @@ jobs:
- plan
- custom-build-binaries
- custom-build-docker
runs-on: "depot-ubuntu-latest-4"
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install cached dist
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist
- run: chmod +x ~/.cargo/bin/cargo-dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
@@ -143,8 +138,8 @@ jobs:
- id: cargo-dist
shell: bash
run: |
dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "dist ran successfully"
cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "cargo dist ran successfully"
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
@@ -153,7 +148,7 @@ jobs:
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@v4
with:
name: artifacts-build-global
path: |
@@ -170,23 +165,22 @@ jobs:
if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "depot-ubuntu-latest-4"
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.host.outputs.manifest }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install cached dist
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist
- run: chmod +x ~/.cargo/bin/cargo-dist
# Fetch artifacts from scratch-storage
- name: Fetch artifacts
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
@@ -195,12 +189,12 @@ jobs:
- id: host
shell: bash
run: |
dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
echo "artifacts uploaded and released successfully"
cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@v4
with:
# Overwrite the previous copy
name: artifacts-dist-manifest
@@ -246,17 +240,16 @@ jobs:
# still allowing individual publish jobs to skip themselves (for prereleases).
# "host" however must run to completion, no skipping allowed!
if: ${{ always() && needs.host.result == 'success' && (needs.custom-publish-pypi.result == 'skipped' || needs.custom-publish-pypi.result == 'success') && (needs.custom-publish-wasm.result == 'skipped' || needs.custom-publish-wasm.result == 'success') }}
runs-on: "depot-ubuntu-latest-4"
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
# Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts"
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: artifacts

View File

@@ -21,17 +21,15 @@ jobs:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
name: Checkout Ruff
with:
path: ruff
persist-credentials: true
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
name: Checkout typeshed
with:
repository: python/typeshed
path: typeshed
persist-credentials: false
- name: Setup git
run: |
git config --global user.name typeshedbot
@@ -39,13 +37,13 @@ jobs:
- name: Sync typeshed
id: sync
run: |
rm -rf ruff/crates/red_knot_vendored/vendor/typeshed
mkdir ruff/crates/red_knot_vendored/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_vendored/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_vendored/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_vendored/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_vendored/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_vendored/vendor/typeshed/source_commit.txt
rm -rf ruff/crates/red_knot_python_semantic/vendor/typeshed
mkdir ruff/crates/red_knot_python_semantic/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_python_semantic/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_python_semantic/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_python_semantic/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_python_semantic/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_python_semantic/vendor/typeshed/source_commit.txt
- name: Commit the changes
id: commit
if: ${{ steps.sync.outcome == 'success' }}
@@ -59,7 +57,7 @@ jobs:
run: |
cd ruff
git push --force origin typeshedbot/sync-typeshed
gh pr list --repo "$GITHUB_REPOSITORY" --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr list --repo $GITHUB_REPOSITORY --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
create-issue-on-failure:
@@ -70,7 +68,7 @@ jobs:
permissions:
issues: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -78,6 +76,5 @@ jobs:
owner: "astral-sh",
repo: "ruff",
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "red-knot"],
body: "Runs are listed here: https://github.com/astral-sh/ruff/actions/workflows/sync_typeshed.yaml",
})

19
.github/zizmor.yml vendored
View File

@@ -1,19 +0,0 @@
# Configuration for the zizmor static analysis tool, run via pre-commit in CI
# https://woodruffw.github.io/zizmor/configuration/
#
# TODO: can we remove the ignores here so that our workflows are more secure?
rules:
dangerous-triggers:
ignore:
- pr-comment.yaml
cache-poisoning:
ignore:
- build-docker.yml
- publish-playground.yml
excessive-permissions:
# it's hard to test what the impact of removing these ignores would be
# without actually running the release workflow...
ignore:
- build-docker.yml
- publish-playground.yml
- publish-docs.yml

4
.gitignore vendored
View File

@@ -29,10 +29,6 @@ tracing.folded
tracing-flamechart.svg
tracing-flamegraph.svg
# insta
*.rs.pending-snap
###
# Rust.gitignore
###

View File

@@ -1 +0,0 @@
!/.github/

View File

@@ -21,11 +21,3 @@ MD014: false
MD024:
# Allow when nested under different parents e.g. CHANGELOG.md
siblings_only: true
# MD046/code-block-style
#
# Ignore this because it conflicts with the code block style used in content
# tabs of mkdocs-material which is to add a blank line after the content title.
#
# Ref: https://github.com/astral-sh/ruff/pull/15011#issuecomment-2544790854
MD046: false

View File

@@ -1,11 +1,9 @@
fail_fast: false
fail_fast: true
exclude: |
(?x)^(
.github/workflows/release.yml|
crates/red_knot_vendored/vendor/.*|
crates/red_knot_project/resources/.*|
crates/ruff_benchmark/resources/.*|
crates/red_knot_python_semantic/vendor/.*|
crates/red_knot_workspace/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff_notebook/resources/.*|
@@ -18,23 +16,18 @@ exclude: |
)$
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-merge-conflict
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.24.1
rev: v0.19
hooks:
- id: validate-pyproject
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.22
rev: 0.7.17
hooks:
- id: mdformat
additional_dependencies:
- mdformat-mkdocs==4.0.0
- mdformat-footnote==0.1.1
- mdformat-mkdocs
- mdformat-admon
exclude: |
(?x)^(
docs/formatter/black\.md
@@ -42,7 +35,7 @@ repos:
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.44.0
rev: v0.41.0
hooks:
- id: markdownlint-fix
exclude: |
@@ -51,21 +44,8 @@ repos:
| docs/\w+\.md
)$
- repo: https://github.com/adamchainz/blacken-docs
rev: 1.19.1
hooks:
- id: blacken-docs
args: ["--pyi", "--line-length", "130"]
files: '^crates/.*/resources/mdtest/.*\.md'
exclude: |
(?x)^(
.*?invalid(_.+)*_syntax\.md
)$
additional_dependencies:
- black==25.1.0
- repo: https://github.com/crate-ci/typos
rev: v1.31.1
rev: v1.23.6
hooks:
- id: typos
@@ -79,7 +59,7 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.5
rev: v0.6.1
hooks:
- id: ruff-format
- id: ruff
@@ -88,43 +68,11 @@ repos:
require_serial: true
# Prettier
- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.5.3
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.1.0
hooks:
- id: prettier
types: [yaml]
# zizmor detects security vulnerabilities in GitHub Actions workflows.
# Additional configuration for the tool is found in `.github/zizmor.yml`
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.5.2
hooks:
- id: zizmor
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.33.0
hooks:
- id: check-github-workflows
# `actionlint` hook, for verifying correct syntax in GitHub Actions workflows.
# Some additional configuration for `actionlint` can be found in `.github/actionlint.yaml`.
- repo: https://github.com/rhysd/actionlint
rev: v1.7.7
hooks:
- id: actionlint
stages:
# This hook is disabled by default, since it's quite slow.
# To run all hooks *including* this hook, use `uvx pre-commit run -a --hook-stage=manual`.
# To run *just* this hook, use `uvx pre-commit run -a actionlint --hook-stage=manual`.
- manual
args:
- "-ignore=SC2129" # ignorable stylistic lint from shellcheck
- "-ignore=SC2016" # another shellcheck lint: seems to have false positives?
additional_dependencies:
# actionlint has a shellcheck integration which extracts shell scripts in `run:` steps from GitHub Actions
# and checks these with shellcheck. This is arguably its most useful feature,
# but the integration only works if shellcheck is installed
- "github.com/wasilibs/go-shellcheck/cmd/shellcheck@v0.10.0"
ci:
skip: [cargo-fmt, dev-generate-all]

View File

@@ -1,97 +1,5 @@
# Breaking Changes
## 0.11.0
This is a follow-up to release 0.10.0. Because of a mistake in the release process, the `requires-python` inference changes were not included in that release. Ruff 0.11.0 now includes this change as well as the stabilization of the preview behavior for `PGH004`.
- **Changes to how the Python version is inferred when a `target-version` is not specified** ([#16319](https://github.com/astral-sh/ruff/pull/16319))
In previous versions of Ruff, you could specify your Python version with:
- The `target-version` option in a `ruff.toml` file or the `[tool.ruff]` section of a pyproject.toml file.
- The `project.requires-python` field in a `pyproject.toml` file with a `[tool.ruff]` section.
These options worked well in most cases, and are still recommended for fine control of the Python version. However, because of the way Ruff discovers config files, `pyproject.toml` files without a `[tool.ruff]` section would be ignored, including the `requires-python` setting. Ruff would then use the default Python version (3.9 as of this writing) instead, which is surprising when you've attempted to request another version.
In v0.10, config discovery has been updated to address this issue:
- If Ruff finds a `ruff.toml` file without a `target-version`, it will check
for a `pyproject.toml` file in the same directory and respect its
`requires-python` version, even if it does not contain a `[tool.ruff]`
section.
- If Ruff finds a user-level configuration, the `requires-python` field of the closest `pyproject.toml` in a parent directory will take precedence.
- If there is no config file (`ruff.toml`or `pyproject.toml` with a
`[tool.ruff]` section) in the directory of the file being checked, Ruff will
search for the closest `pyproject.toml` in the parent directories and use its
`requires-python` setting.
## 0.10.0
- **Changes to how the Python version is inferred when a `target-version` is not specified** ([#16319](https://github.com/astral-sh/ruff/pull/16319))
Because of a mistake in the release process, the `requires-python` inference changes are not included in this release and instead shipped as part of 0.11.0.
You can find a description of this change in the 0.11.0 section.
- **Updated `TYPE_CHECKING` behavior** ([#16669](https://github.com/astral-sh/ruff/pull/16669))
Previously, Ruff only recognized typechecking blocks that tested the `typing.TYPE_CHECKING` symbol. Now, Ruff recognizes any local variable named `TYPE_CHECKING`. This release also removes support for the legacy `if 0:` and `if False:` typechecking checks. Use a local `TYPE_CHECKING` variable instead.
- **More robust noqa parsing** ([#16483](https://github.com/astral-sh/ruff/pull/16483))
The syntax for both file-level and in-line suppression comments has been unified and made more robust to certain errors. In most cases, this will result in more suppression comments being read by Ruff, but there are a few instances where previously read comments will now log an error to the user instead. Please refer to the documentation on [_Error suppression_](https://docs.astral.sh/ruff/linter/#error-suppression) for the full specification.
- **Avoid unnecessary parentheses around with statements with a single context manager and a trailing comment** ([#14005](https://github.com/astral-sh/ruff/pull/14005))
This change fixes a bug in the formatter where it introduced unnecessary parentheses around with statements with a single context manager and a trailing comment. This change may result in a change in formatting for some users.
- **Bump alpine default tag to 3.21 for derived Docker images** ([#16456](https://github.com/astral-sh/ruff/pull/16456))
Alpine 3.21 was released in Dec 2024 and is used in the official Alpine-based Python images. Now the ruff:alpine image will use 3.21 instead of 3.20 and ruff:alpine3.20 will no longer be updated.
- **\[`unsafe-markup-use`\]: `RUF035` has been recoded to `S704`** ([#15957](https://github.com/astral-sh/ruff/pull/15957))
## 0.9.0
Ruff now formats your code according to the 2025 style guide. As a result, your code might now get formatted differently. See the [changelog](./CHANGELOG.md#090) for a detailed list of changes.
## 0.8.0
- **Default to Python 3.9**
Ruff now defaults to Python 3.9 instead of 3.8 if no explicit Python version is configured using [`ruff.target-version`](https://docs.astral.sh/ruff/settings/#target-version) or [`project.requires-python`](https://packaging.python.org/en/latest/guides/writing-pyproject-toml/#python-requires) ([#13896](https://github.com/astral-sh/ruff/pull/13896))
- **Changed location of `pydoclint` diagnostics**
[`pydoclint`](https://docs.astral.sh/ruff/rules/#pydoclint-doc) diagnostics now point to the first-line of the problematic docstring. Previously, this was not the case.
If you've opted into these preview rules but have them suppressed using
[`noqa`](https://docs.astral.sh/ruff/linter/#error-suppression) comments in
some places, this change may mean that you need to move the `noqa` suppression
comments. Most users should be unaffected by this change.
- **Use XDG (i.e. `~/.local/bin`) instead of the Cargo home directory in the standalone installer**
Previously, Ruff's installer used `$CARGO_HOME` or `~/.cargo/bin` for its target install directory. Now, Ruff will be installed into `$XDG_BIN_HOME`, `$XDG_DATA_HOME/../bin`, or `~/.local/bin` (in that order).
This change is only relevant to users of the standalone Ruff installer (using the shell or PowerShell script). If you installed Ruff using uv or pip, you should be unaffected.
- **Changes to the line width calculation**
Ruff now uses a new version of the [unicode-width](https://github.com/unicode-rs/unicode-width) Rust crate to calculate the line width. In very rare cases, this may lead to lines containing Unicode characters being reformatted, or being considered too long when they were not before ([`E501`](https://docs.astral.sh/ruff/rules/line-too-long/)).
## 0.7.0
- The pytest rules `PT001` and `PT023` now default to omitting the decorator parentheses when there are no arguments
([#12838](https://github.com/astral-sh/ruff/pull/12838), [#13292](https://github.com/astral-sh/ruff/pull/13292)).
This was a change that we attempted to make in Ruff v0.6.0, but only partially made due to an error on our part.
See the [blog post](https://astral.sh/blog/ruff-v0.7.0) for more details.
- The `useless-try-except` rule (in our `tryceratops` category) has been recoded from `TRY302` to
`TRY203` ([#13502](https://github.com/astral-sh/ruff/pull/13502)). This ensures Ruff's code is consistent with
the same rule in the [`tryceratops`](https://github.com/guilatrova/tryceratops) linter.
- The `lint.allow-unused-imports` setting has been removed ([#13677](https://github.com/astral-sh/ruff/pull/13677)). Use
[`lint.pyflakes.allow-unused-imports`](https://docs.astral.sh/ruff/settings/#lint_pyflakes_allowed-unused-imports)
instead.
## 0.6.0
- Detect imports in `src` layouts by default for `isort` rules ([#12848](https://github.com/astral-sh/ruff/pull/12848))
@@ -246,7 +154,7 @@ flag or `unsafe-fixes` configuration option can be used to enable unsafe fixes.
See the [docs](https://docs.astral.sh/ruff/configuration/#fix-safety) for details.
### Remove formatter-conflicting rules from the default rule set ([#7900](https://github.com/astral-sh/ruff/pull/7900))
### Remove formatter-conflicting rules from the default rule set ([#7900](https://github.com/astral-sh/ruff/pull/7900))
Previously, Ruff enabled all implemented rules in Pycodestyle (`E`) by default. Ruff now only includes the
Pycodestyle prefixes `E4`, `E7`, and `E9` to exclude rules that conflict with automatic formatters. Consequently,
@@ -259,8 +167,8 @@ This change only affects those using Ruff under its default rule set. Users that
### Remove support for emoji identifiers ([#7212](https://github.com/astral-sh/ruff/pull/7212))
Previously, Ruff supported non-standards-compliant emoji identifiers such as `📦 = 1`.
We decided to remove this non-standard language extension. Ruff now reports syntax errors for invalid emoji identifiers in your code, the same as CPython.
Previously, Ruff supported the non-standard compliant emoji identifiers e.g. `📦 = 1`.
We decided to remove this non-standard language extension, and Ruff now reports syntax errors for emoji identifiers in your code, the same as CPython.
### Improved GitLab fingerprints ([#7203](https://github.com/astral-sh/ruff/pull/7203))

File diff suppressed because it is too large Load Diff

View File

@@ -29,14 +29,16 @@ You'll also need [Insta](https://insta.rs/docs/) to update snapshot tests:
cargo install cargo-insta
```
You'll need [uv](https://docs.astral.sh/uv/getting-started/installation/) (or `pipx` and `pip`) to
run Python utility commands.
And you'll need pre-commit to run some validation checks:
```shell
pipx install pre-commit # or `pip install pre-commit` if you have a virtualenv
```
You can optionally install pre-commit hooks to automatically run the validation checks
when making a commit:
```shell
uv tool install pre-commit
pre-commit install
```
@@ -64,7 +66,7 @@ and that it passes both the lint and test validation checks:
```shell
cargo clippy --workspace --all-targets --all-features -- -D warnings # Rust linting
RUFF_UPDATE_SCHEMA=1 cargo test # Rust testing and updating ruff.schema.json
uvx pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
```
These checks will run on GitHub Actions when you open your pull request, but running them locally
@@ -139,7 +141,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
1. Create a file for your rule (e.g., `crates/ruff_linter/src/rules/flake8_bugbear/rules/assert_false.rs`).
1. In that file, define a violation struct (e.g., `pub struct AssertFalse`). You can grep for
`#[derive(ViolationMetadata)]` to see examples.
`#[violation]` to see examples.
1. In that file, define a function that adds the violation to the diagnostic list as appropriate
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
@@ -265,20 +267,26 @@ To preview any changes to the documentation locally:
1. Install the [Rust toolchain](https://www.rust-lang.org/tools/install).
1. Install MkDocs and Material for MkDocs with:
```shell
pip install -r docs/requirements.txt
```
1. Generate the MkDocs site with:
```shell
uv run --no-project --isolated --with-requirements docs/requirements.txt scripts/generate_mkdocs.py
python scripts/generate_mkdocs.py
```
1. Run the development server with:
```shell
# For contributors.
uvx --with-requirements docs/requirements.txt -- mkdocs serve -f mkdocs.public.yml
mkdocs serve -f mkdocs.public.yml
# For members of the Astral org, which has access to MkDocs Insiders via sponsorship.
uvx --with-requirements docs/requirements-insiders.txt -- mkdocs serve -f mkdocs.insiders.yml
mkdocs serve -f mkdocs.insiders.yml
```
The documentation should then be available locally at
@@ -360,8 +368,9 @@ GitHub Actions will run your changes against a number of real-world projects fro
report on any linter or formatter differences. You can also run those checks locally via:
```shell
uvx --from ./python/ruff-ecosystem ruff-ecosystem check ruff "./target/debug/ruff"
uvx --from ./python/ruff-ecosystem ruff-ecosystem format ruff "./target/debug/ruff"
pip install -e ./python/ruff-ecosystem
ruff-ecosystem check ruff "./target/debug/ruff"
ruff-ecosystem format ruff "./target/debug/ruff"
```
See the [ruff-ecosystem package](https://github.com/astral-sh/ruff/tree/main/python/ruff-ecosystem) for more details.
@@ -388,18 +397,12 @@ which makes it a good target for benchmarking.
git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff_linter/resources/test/cpython
```
Install `hyperfine`:
```shell
cargo install hyperfine
```
To benchmark the release build:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ -e"
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ -e"
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
Time (mean ± σ): 293.8 ms ± 3.2 ms [User: 2384.6 ms, System: 90.3 ms]
@@ -418,7 +421,7 @@ To benchmark against the ecosystem's existing tools:
```shell
hyperfine --ignore-failure --warmup 5 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"pyflakes crates/ruff_linter/resources/test/cpython" \
"autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython" \
"pycodestyle crates/ruff_linter/resources/test/cpython" \
@@ -464,10 +467,10 @@ To benchmark a subset of rules, e.g. `LineTooLong` and `DocLineTooLong`:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
```
You can run `uv venv --project ./scripts/benchmarks`, activate the venv and then run `uv sync --project ./scripts/benchmarks` to create a working environment for the
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
above. All reported benchmarks were computed using the versions specified by
`./scripts/benchmarks/pyproject.toml` on Python 3.11.
@@ -521,12 +524,10 @@ You can run the benchmarks with
cargo benchmark
```
`cargo benchmark` is an alias for `cargo bench -p ruff_benchmark --bench linter --bench formatter --`
#### Benchmark-driven Development
Ruff uses [Criterion.rs](https://bheisler.github.io/criterion.rs/book/) for benchmarks. You can use
`--save-baseline=<name>` to store an initial baseline benchmark (e.g., on `main`) and then use
`--save-baseline=<name>` to store an initial baseline benchmark (e.g. on `main`) and then use
`--benchmark=<name>` to compare against that benchmark. Criterion will print a message telling you
if the benchmark improved/regressed compared to that baseline.
@@ -561,7 +562,7 @@ cargo install critcmp
#### Tips
- Use `cargo bench -p ruff_benchmark <filter>` to only run specific benchmarks. For example: `cargo bench -p ruff_benchmark lexer`
- Use `cargo bench -p ruff_benchmark <filter>` to only run specific benchmarks. For example: `cargo benchmark lexer`
to only run the lexer benchmarks.
- Use `cargo bench -p ruff_benchmark -- --quiet` for a more cleaned up output (without statistical relevance)
- Use `cargo bench -p ruff_benchmark -- --quick` to get faster results (more prone to noise)
@@ -678,9 +679,9 @@ utils with it:
23 Newline 24
```
- `cargo dev print-cst <file>`: Print the CST of a Python file using
- `cargo dev print-cst <file>`: Print the CST of a python file using
[LibCST](https://github.com/Instagram/LibCST), which is used in addition to the RustPython parser
in Ruff. For example, for `if True: pass # comment`, everything, including the whitespace, is represented:
in Ruff. E.g. for `if True: pass # comment` everything including the whitespace is represented:
```text
Module {
@@ -863,7 +864,7 @@ each configuration file.
The package root is used to determine a file's "module path". Consider, again, `baz.py`. In that
case, `./my_project/src/foo` was identified as the package root, so the module path for `baz.py`
would resolve to `foo.bar.baz` — as computed by taking the relative path from the package root
would resolve to `foo.bar.baz` — as computed by taking the relative path from the package root
(inclusive of the root itself). The module path can be thought of as "the path you would use to
import the module" (e.g., `import foo.bar.baz`).

2648
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ resolver = "2"
[workspace.package]
edition = "2021"
rust-version = "1.84"
rust-version = "1.76"
homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff"
@@ -13,12 +13,10 @@ license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_annotate_snippets = { path = "crates/ruff_annotate_snippets" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db", default-features = false }
ruff_db = { path = "crates/ruff_db" }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_graph = { path = "crates/ruff_graph" }
ruff_index = { path = "crates/ruff_index" }
ruff_linter = { path = "crates/ruff_linter" }
ruff_macros = { path = "crates/ruff_macros" }
@@ -35,66 +33,52 @@ ruff_python_trivia = { path = "crates/ruff_python_trivia" }
ruff_server = { path = "crates/ruff_server" }
ruff_source_file = { path = "crates/ruff_source_file" }
ruff_text_size = { path = "crates/ruff_text_size" }
red_knot_vendored = { path = "crates/red_knot_vendored" }
ruff_workspace = { path = "crates/ruff_workspace" }
red_knot_ide = { path = "crates/red_knot_ide" }
red_knot_project = { path = "crates/red_knot_project", default-features = false }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
red_knot_server = { path = "crates/red_knot_server" }
red_knot_test = { path = "crates/red_knot_test" }
red_knot_workspace = { path = "crates/red_knot_workspace" }
aho-corasick = { version = "1.1.3" }
anstream = { version = "0.6.18" }
anstyle = { version = "1.0.10" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
anyhow = { version = "1.0.80" }
assert_fs = { version = "1.1.0" }
argfile = { version = "0.2.0" }
bincode = { version = "1.3.3" }
bitflags = { version = "2.5.0" }
bstr = { version = "1.9.1" }
cachedir = { version = "0.3.1" }
camino = { version = "1.1.7" }
chrono = { version = "0.4.35", default-features = false, features = ["clock"] }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete_command = { version = "0.6.0" }
clearscreen = { version = "4.0.0" }
clearscreen = { version = "3.0.0" }
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
colored = { version = "3.0.0" }
colored = { version = "2.1.0" }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
compact_str = "0.9.0"
compact_str = "0.8.0"
criterion = { version = "0.5.1", default-features = false }
crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" }
dir-test = { version = "0.4.0" }
dunce = { version = "1.0.5" }
drop_bomb = { version = "0.1.5" }
env_logger = { version = "0.11.0" }
etcetera = { version = "0.10.0" }
fern = { version = "0.7.0" }
etcetera = { version = "0.8.0" }
fern = { version = "0.6.1" }
filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
globwalk = { version = "0.9.1" }
hashbrown = { version = "0.15.0", default-features = false, features = [
"raw-entry",
"equivalent",
"inline-more",
] }
hashbrown = "0.14.3"
ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" }
imperative = { version = "1.0.4" }
indexmap = { version = "2.6.0" }
indicatif = { version = "0.17.8" }
indoc = { version = "2.0.4" }
insta = { version = "1.35.1" }
insta-cmd = { version = "0.6.0" }
is-macro = { version = "0.3.5" }
is-wsl = { version = "0.4.0" }
itertools = { version = "0.14.0" }
jiff = { version = "0.2.0" }
itertools = { version = "0.13.0" }
js-sys = { version = "0.3.69" }
jod-thread = { version = "0.1.2" }
libc = { version = "0.2.153" }
@@ -102,29 +86,29 @@ libcst = { version = "1.1.0", default-features = false }
log = { version = "0.4.17" }
lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed",
"proposed",
] }
matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" }
notify = { version = "8.0.0" }
notify = { version = "6.1.1" }
once_cell = { version = "1.19.0" }
ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.7.1" }
pep440_rs = { version = "0.6.0", features = ["serde"] }
pretty_assertions = "1.3.0"
proc-macro2 = { version = "1.0.79" }
pyproject-toml = { version = "0.13.4" }
quick-junit = { version = "0.5.0" }
pyproject-toml = { version = "0.9.0" }
quick-junit = { version = "0.4.0" }
quote = { version = "1.0.23" }
rand = { version = "0.9.0" }
rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
# When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml`
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "87bf6b6c2d5f6479741271da73bd9d30c2580c26" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "f608ff8b24f07706492027199f51132244034f29" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }
@@ -132,48 +116,38 @@ serde-wasm-bindgen = { version = "0.6.4" }
serde_json = { version = "1.0.113" }
serde_test = { version = "1.0.152" }
serde_with = { version = "3.6.0", default-features = false, features = [
"macros",
"macros",
] }
shellexpand = { version = "3.0.0" }
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
snapbox = { version = "0.6.0", features = [
"diff",
"term-svg",
"cmd",
"examples",
] }
static_assertions = "1.1.0"
strum = { version = "0.27.0", features = ["strum_macros"] }
strum_macros = { version = "0.27.0" }
strum = { version = "0.26.0", features = ["strum_macros"] }
strum_macros = { version = "0.26.0" }
syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" }
test-case = { version = "3.3.1" }
thiserror = { version = "2.0.0" }
thiserror = { version = "1.0.58" }
tikv-jemallocator = { version = "0.6.0" }
toml = { version = "0.8.11" }
tracing = { version = "0.1.40" }
tracing-flame = { version = "0.2.0" }
tracing-indicatif = { version = "0.3.6" }
tracing-log = { version = "0.2.0" }
tracing-subscriber = { version = "0.3.18", default-features = false, features = [
"env-filter",
"fmt",
] }
tracing-subscriber = { version = "0.3.18", default-features = false, features = ["env-filter", "fmt"] }
tracing-tree = { version = "0.4.0" }
tryfn = { version = "0.2.1" }
typed-arena = { version = "2.0.2" }
unic-ucd-category = { version = "0.9" }
unicode-ident = { version = "1.0.12" }
unicode-width = { version = "0.2.0" }
unicode-width = { version = "0.1.11" }
unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
ureq = { version = "2.9.6" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
"fast-rng",
"macro-diagnostics",
"js",
"v4",
"fast-rng",
"macro-diagnostics",
"js",
] }
walkdir = { version = "2.3.2" }
wasm-bindgen = { version = "0.2.92" }
@@ -181,17 +155,10 @@ wasm-bindgen-test = { version = "0.3.42" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false }
[workspace.metadata.cargo-shear]
ignored = ["getrandom"]
[workspace.lints.rust]
unsafe_code = "warn"
unreachable_pub = "warn"
unexpected_cfgs = { level = "warn", check-cfg = [
"cfg(fuzzing)",
"cfg(codspeed)",
] }
unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fuzzing)", "cfg(codspeed)"] }
[workspace.lints.clippy]
pedantic = { level = "warn", priority = -2 }
@@ -207,10 +174,8 @@ missing_panics_doc = "allow"
module_name_repetitions = "allow"
must_use_candidate = "allow"
similar_names = "allow"
single_match_else = "allow"
too_many_lines = "allow"
needless_continue = "allow" # An explicit continue can be more readable, especially if the alternative is an empty block.
# Without the hashes we run into a `rustfmt` bug in some snapshot tests, see #13250
# To allow `#[allow(clippy::all)]` in `crates/ruff_python_parser/src/python.rs`.
needless_raw_string_hashes = "allow"
# Disallowed restriction lints
print_stdout = "warn"
@@ -223,13 +188,6 @@ get_unwrap = "warn"
rc_buffer = "warn"
rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
# nursery rules
redundant_clone = "warn"
debug_assert_with_mut_call = "warn"
unused_peekable = "warn"
# Diagnostics are not actionable: Enable once https://github.com/rust-lang/rust-clippy/issues/13774 is resolved.
large_stack_arrays = "allow"
[profile.release]
# Note that we set these explicitly, and these values
@@ -269,12 +227,12 @@ debug = 1
[profile.dist]
inherits = "release"
# Config for 'dist'
# Config for 'cargo dist'
[workspace.metadata.dist]
# The preferred dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.28.4-prerelease.1"
# The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.18.0"
# CI backends to support
ci = "github"
ci = ["github"]
# The installers to generate for each app
installers = ["shell", "powershell"]
# The archive format to use for windows builds (defaults .zip)
@@ -283,33 +241,33 @@ windows-archive = ".zip"
unix-archive = ".tar.gz"
# Target platforms to build apps for (Rust target-triple syntax)
targets = [
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"arm-unknown-linux-musleabihf",
"armv7-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabihf",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"arm-unknown-linux-musleabihf",
"armv7-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabihf",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
]
# Whether to auto-include files like READMEs, LICENSEs, and CHANGELOGs (default true)
auto-includes = false
# Whether dist should create a Github Release or use an existing draft
# Whether cargo-dist should create a GitHub Release or use an existing draft
create-release = true
# Which actions to run on pull requests
# Publish jobs to run in CI
pr-run-mode = "skip"
# Whether CI should trigger releases with dispatches instead of tag pushes
dispatch-releases = true
# Which phase dist should use to create the GitHub release
# The stage during which the GitHub Release should be created
github-release = "announce"
# Whether CI should include auto-generated code to build local artifacts
build-local-artifacts = false
@@ -317,24 +275,9 @@ build-local-artifacts = false
local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Post-announce jobs to run in CI
post-announce-jobs = [
"./notify-dependents",
"./publish-docs",
"./publish-playground",
]
# Announcement jobs to run in CI
post-announce-jobs = ["./notify-dependents", "./publish-docs", "./publish-playground"]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program
install-updater = false
# Path that installers should place binaries in
install-path = ["$XDG_BIN_HOME/", "$XDG_DATA_HOME/../bin", "~/.local/bin"]
[workspace.metadata.dist.github-custom-runners]
global = "depot-ubuntu-latest-4"
[workspace.metadata.dist.github-action-commits]
"actions/checkout" = "11bd71901bbe5b1630ceea73d27597364c9af683" # v4
"actions/upload-artifact" = "ea165f8d65b6e75b540449e92b4886f43607fa02" # v4.6.2
"actions/download-artifact" = "95815c38cf2ff2164869cbab79da8d1f422bc89e" # v4.2.1
"actions/attest-build-provenance" = "c074443f1aee8d4aeeae555aebba3282517141b2" #v2.2.3

View File

@@ -1,4 +1,4 @@
FROM --platform=$BUILDPLATFORM ubuntu AS build
FROM --platform=$BUILDPLATFORM ubuntu as build
ENV HOME="/root"
WORKDIR $HOME

View File

@@ -110,28 +110,15 @@ For more, see the [documentation](https://docs.astral.sh/ruff/).
1. [Who's Using Ruff?](#whos-using-ruff)
1. [License](#license)
## Getting Started<a id="getting-started"></a>
## Getting Started
For more, see the [documentation](https://docs.astral.sh/ruff/).
### Installation
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI.
Invoke Ruff directly with [`uvx`](https://docs.astral.sh/uv/):
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI:
```shell
uvx ruff check # Lint all files in the current directory.
uvx ruff format # Format all files in the current directory.
```
Or install Ruff with `uv` (recommended), `pip`, or `pipx`:
```shell
# With uv.
uv tool install ruff@latest # Install Ruff globally.
uv add --dev ruff # Or add Ruff to your project.
# With pip.
pip install ruff
@@ -149,8 +136,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.11.6/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.11.6/install.ps1 | iex"
curl -LsSf https://astral.sh/ruff/0.6.1/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.1/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -183,7 +170,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.11.6
rev: v0.6.1
hooks:
# Run the linter.
- id: ruff
@@ -195,7 +182,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or with [various other editors](https://docs.astral.sh/ruff/editors/setup).
Ruff can also be used as a [GitHub Action](https://github.com/features/actions) via
[`ruff-action`](https://github.com/astral-sh/ruff-action):
[`ruff-action`](https://github.com/chartboost/ruff-action):
```yaml
name: Ruff
@@ -205,10 +192,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v3
- uses: chartboost/ruff-action@v1
```
### Configuration<a id="configuration"></a>
### Configuration
Ruff can be configured through a `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file (see:
[_Configuration_](https://docs.astral.sh/ruff/configuration/), or [_Settings_](https://docs.astral.sh/ruff/settings/)
@@ -251,8 +238,8 @@ exclude = [
line-length = 88
indent-width = 4
# Assume Python 3.9
target-version = "py39"
# Assume Python 3.8
target-version = "py38"
[lint]
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
@@ -304,7 +291,7 @@ features that may change prior to stabilization.
See `ruff help` for more on Ruff's top-level commands, or `ruff help check` and `ruff help format`
for more on the linting and formatting commands, respectively.
## Rules<a id="rules"></a>
## Rules
<!-- Begin section: Rules -->
@@ -380,21 +367,21 @@ quality tools, including:
For a complete enumeration of the supported rules, see [_Rules_](https://docs.astral.sh/ruff/rules/).
## Contributing<a id="contributing"></a>
## Contributing
Contributions are welcome and highly appreciated. To get started, check out the
[**contributing guidelines**](https://docs.astral.sh/ruff/contributing/).
You can also join us on [**Discord**](https://discord.com/invite/astral-sh).
## Support<a id="support"></a>
## Support
Having trouble? Check out the existing issues on [**GitHub**](https://github.com/astral-sh/ruff/issues),
or feel free to [**open a new one**](https://github.com/astral-sh/ruff/issues/new).
You can also ask for help on [**Discord**](https://discord.com/invite/astral-sh).
## Acknowledgements<a id="acknowledgements"></a>
## Acknowledgements
Ruff's linter draws on both the APIs and implementation details of many other
tools in the Python ecosystem, especially [Flake8](https://github.com/PyCQA/flake8), [Pyflakes](https://github.com/PyCQA/pyflakes),
@@ -418,7 +405,7 @@ Ruff is the beneficiary of a large number of [contributors](https://github.com/a
Ruff is released under the MIT license.
## Who's Using Ruff?<a id="whos-using-ruff"></a>
## Who's Using Ruff?
Ruff is used by a number of major open-source projects and companies, including:
@@ -430,7 +417,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Babel](https://github.com/python-babel/babel)
- Benchling ([Refac](https://github.com/benchling/refac))
- [Bokeh](https://github.com/bokeh/bokeh)
- CrowdCent ([NumerBlox](https://github.com/crowdcent/numerblox)) <!-- typos: ignore -->
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
- CERN ([Indico](https://getindico.io/))
- [DVC](https://github.com/iterative/dvc)
@@ -452,7 +438,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- ING Bank ([popmon](https://github.com/ing-bank/popmon), [probatus](https://github.com/ing-bank/probatus))
- [Ibis](https://github.com/ibis-project/ibis)
- [ivy](https://github.com/unifyai/ivy)
- [JAX](https://github.com/jax-ml/jax)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [Kraken Tech](https://kraken.tech/)
- [LangChain](https://github.com/hwchase17/langchain)
@@ -539,7 +524,7 @@ If you're using Ruff, consider adding the Ruff badge to your project's `README.m
<a href="https://github.com/astral-sh/ruff"><img src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json" alt="Ruff" style="max-width:100%;"></a>
```
## License<a id="license"></a>
## License
This repository is licensed under the [MIT License](https://github.com/astral-sh/ruff/blob/main/LICENSE)

View File

@@ -1,15 +0,0 @@
# Security policy
## Reporting a vulnerability
If you have found a possible vulnerability, please email `security at astral dot sh`.
## Bug bounties
While we sincerely appreciate and encourage reports of suspected security problems, please note that
Astral does not currently run any bug bounty programs.
## Vulnerability disclosures
Critical vulnerabilities will be disclosed via GitHub's
[security advisory](https://github.com/astral-sh/ruff/security) system.

View File

@@ -1,10 +1,6 @@
[files]
# https://github.com/crate-ci/typos/issues/868
extend-exclude = [
"crates/red_knot_vendored/vendor/**/*",
"**/resources/**/*",
"**/snapshots/**/*",
]
extend-exclude = ["crates/red_knot_python_semantic/vendor/**/*", "**/resources/**/*", "**/snapshots/**/*"]
[default.extend-words]
"arange" = "arange" # e.g. `numpy.arange`
@@ -12,22 +8,14 @@ hel = "hel"
whos = "whos"
spawnve = "spawnve"
ned = "ned"
pn = "pn" # `import panel as pn` is a thing
pn = "pn" # `import panel as pd` is a thing
poit = "poit"
BA = "BA" # acronym for "Bad Allowed", used in testing.
jod = "jod" # e.g., `jod-thread`
Numer = "Numer" # Library name 'NumerBlox' in "Who's Using Ruff?"
[default]
extend-ignore-re = [
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
# Various third party dependencies uses `typ` as struct field names (e.g., lsp_types::LogMessageParams)
"typ",
# TODO: Remove this once the `TYP` redirects are removed from `rule_redirects.rs`
"TYP",
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
]
[default.extend-identifiers]
"FrIeNdLy" = "FrIeNdLy"

View File

@@ -1,25 +1,21 @@
doc-valid-idents = [
"..",
"CodeQL",
"FastAPI",
"IPython",
"LangChain",
"LibCST",
"McCabe",
"NumPy",
"SCREAMING_SNAKE_CASE",
"SQLAlchemy",
"StackOverflow",
"PyCharm",
"SNMPv1",
"SNMPv2",
"SNMPv3",
"PyFlakes"
"..",
"CodeQL",
"FastAPI",
"IPython",
"LangChain",
"LibCST",
"McCabe",
"NumPy",
"SCREAMING_SNAKE_CASE",
"SQLAlchemy",
"StackOverflow",
"PyCharm",
]
ignore-interior-mutability = [
# Interned is read-only. The wrapped `Rc` never gets updated.
"ruff_formatter::format_element::Interned",
# The expression is read-only.
"ruff_python_ast::hashable::HashableExpr",
# Interned is read-only. The wrapped `Rc` never gets updated.
"ruff_formatter::format_element::Interned",
# The expression is read-only.
"ruff_python_ast::hashable::HashableExpr",
]

View File

@@ -13,37 +13,28 @@ license.workspace = true
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_project = { workspace = true, features = ["zstd"] }
red_knot_workspace = { workspace = true }
red_knot_server = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
ruff_python_ast = { workspace = true }
anyhow = { workspace = true }
argfile = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["wrap_help"] }
colored = { workspace = true }
countme = { workspace = true, features = ["enable"] }
crossbeam = { workspace = true }
ctrlc = { version = "3.4.4" }
jiff = { workspace = true }
rayon = { workspace = true }
salsa = { workspace = true }
tracing = { workspace = true, features = ["release_max_level_debug"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
tracing-flame = { workspace = true }
tracing-tree = { workspace = true }
wild = { workspace = true }
[dev-dependencies]
ruff_db = { workspace = true, features = ["testing"] }
ruff_python_trivia = { workspace = true }
insta = { workspace = true, features = ["filters"] }
insta-cmd = { workspace = true }
filetime = { workspace = true }
regex = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
[lints]
workspace = true

View File

@@ -1,25 +0,0 @@
# Red Knot
Red Knot is an extremely fast type checker.
Currently, it is a work-in-progress and not ready for user testing.
Red Knot is designed to prioritize good type inference, even in unannotated code,
and aims to avoid false positives.
While Red Knot will produce similar results to mypy and pyright on many codebases,
100% compatibility with these tools is a non-goal.
On some codebases, Red Knot's design decisions lead to different outcomes
than you would get from running one of these more established tools.
## Contributing
Core type checking tests are written as Markdown code blocks.
They can be found in [`red_knot_python_semantic/resources/mdtest`][resources-mdtest].
See [`red_knot_test/README.md`][mdtest-readme] for more information
on the test framework itself.
The list of open issues can be found [here][open-issues].
[mdtest-readme]: ../red_knot_test/README.md
[open-issues]: https://github.com/astral-sh/ruff/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3Ared-knot
[resources-mdtest]: ../red_knot_python_semantic/resources/mdtest

View File

@@ -1,104 +0,0 @@
use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
fn main() {
// The workspace root directory is not available without walking up the tree
// https://github.com/rust-lang/cargo/issues/3946
let workspace_root = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap())
.join("..")
.join("..");
commit_info(&workspace_root);
#[allow(clippy::disallowed_methods)]
let target = std::env::var("TARGET").unwrap();
println!("cargo::rustc-env=RUST_HOST_TARGET={target}");
}
fn commit_info(workspace_root: &Path) {
// If not in a git repository, do not attempt to retrieve commit information
let git_dir = workspace_root.join(".git");
if !git_dir.exists() {
return;
}
if let Some(git_head_path) = git_head(&git_dir) {
println!("cargo:rerun-if-changed={}", git_head_path.display());
let git_head_contents = fs::read_to_string(git_head_path);
if let Ok(git_head_contents) = git_head_contents {
// The contents are either a commit or a reference in the following formats
// - "<commit>" when the head is detached
// - "ref <ref>" when working on a branch
// If a commit, checking if the HEAD file has changed is sufficient
// If a ref, we need to add the head file for that ref to rebuild on commit
let mut git_ref_parts = git_head_contents.split_whitespace();
git_ref_parts.next();
if let Some(git_ref) = git_ref_parts.next() {
let git_ref_path = git_dir.join(git_ref);
println!("cargo:rerun-if-changed={}", git_ref_path.display());
}
}
}
let output = match Command::new("git")
.arg("log")
.arg("-1")
.arg("--date=short")
.arg("--abbrev=9")
.arg("--format=%H %h %cd %(describe)")
.output()
{
Ok(output) if output.status.success() => output,
_ => return,
};
let stdout = String::from_utf8(output.stdout).unwrap();
let mut parts = stdout.split_whitespace();
let mut next = || parts.next().unwrap();
let _commit_hash = next();
println!("cargo::rustc-env=RED_KNOT_COMMIT_SHORT_HASH={}", next());
println!("cargo::rustc-env=RED_KNOT_COMMIT_DATE={}", next());
// Describe can fail for some commits
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emdescribeoptionsem
if let Some(describe) = parts.next() {
let mut describe_parts = describe.split('-');
let _last_tag = describe_parts.next().unwrap();
// If this is the tagged commit, this component will be missing
println!(
"cargo::rustc-env=RED_KNOT_LAST_TAG_DISTANCE={}",
describe_parts.next().unwrap_or("0")
);
}
}
fn git_head(git_dir: &Path) -> Option<PathBuf> {
// The typical case is a standard git repository.
let git_head_path = git_dir.join("HEAD");
if git_head_path.exists() {
return Some(git_head_path);
}
if !git_dir.is_file() {
return None;
}
// If `.git/HEAD` doesn't exist and `.git` is actually a file,
// then let's try to attempt to read it as a worktree. If it's
// a worktree, then its contents will look like this, e.g.:
//
// gitdir: /home/andrew/astral/uv/main/.git/worktrees/pr2
//
// And the HEAD file we want to watch will be at:
//
// /home/andrew/astral/uv/main/.git/worktrees/pr2/HEAD
let contents = fs::read_to_string(git_dir).ok()?;
let (label, worktree_path) = contents.split_once(':')?;
if label != "gitdir" {
return None;
}
let worktree_path = worktree_path.trim();
Some(PathBuf::from(worktree_path))
}

View File

@@ -1,61 +0,0 @@
# Running `mypy_primer`
## Basics
For now, we use our own [fork of mypy primer]. It can be run using `uvx --from "…" mypy_primer`. For example, to see the help message, run:
```sh
uvx --from "git+https://github.com/astral-sh/mypy_primer.git@add-red-knot-support" mypy_primer -h
```
Alternatively, you can install the forked version of `mypy_primer` using:
```sh
uv tool install "git+https://github.com/astral-sh/mypy_primer.git@add-red-knot-support"
```
and then run it using `uvx mypy_primer` or just `mypy_primer`, if your `PATH` is set up accordingly (see: [Tool executables]).
## Showing the diagnostics diff between two Git revisions
To show the diagnostics diff between two Git revisions (e.g. your feature branch and `main`), run:
```sh
mypy_primer \
--type-checker knot \
--old origin/main \
--new my/feature \
--debug \
--output concise \
--project-selector '/black$'
```
This will show the diagnostics diff for the `black` project between the `main` branch and your `my/feature` branch. To run the
diff for all projects, you currently need to copy the project-selector regex from the CI pipeline in `.github/workflows/mypy_primer.yaml`.
You can also take a look at the [full list of ecosystem projects]. Note that some of them might still need a `knot_paths` configuration
option to work correctly.
## Avoiding recompilation
If you want to run `mypy_primer` repeatedly, e.g. for different projects, but for the same combination of `--old` and `--new`, you
can use set the `MYPY_PRIMER_NO_REBUILD` environment variable to avoid recompilation of Red Knot:
```sh
MYPY_PRIMER_NO_REBUILD=1 mypy_primer …
```
## Running from a local copy of the repository
If you are working on a local branch, you can use `mypy_primer`'s `--repo` option to specify the path to your local copy of the `ruff` repository.
This allows `mypy_primer` to check out local branches:
```sh
mypy_primer --repo /path/to/ruff --old origin/main --new my/local-branch …
```
Note that you might need to clean up `/tmp/mypy_primer` in order for this to work correctly.
[fork of mypy primer]: https://github.com/astral-sh/mypy_primer/tree/add-red-knot-support
[full list of ecosystem projects]: https://github.com/astral-sh/mypy_primer/blob/add-red-knot-support/mypy_primer/projects.py
[tool executables]: https://docs.astral.sh/uv/concepts/tools/#tool-executables

View File

@@ -13,17 +13,12 @@ The CLI supports different verbosity levels.
- `-vv` activates `debug!` and timestamps: This should be enough information to get to the bottom of bug reports. When you're processing many packages or files, you'll get pages and pages of output, but each line is link to a specific action or state change.
- `-vvv` activates `trace!` (only in debug builds) and shows tracing-spans: At this level, you're logging everything. Most of this is wasted, it's really slow, we dump e.g. the entire resolution graph. Only useful to developers, and you almost certainly want to use `RED_KNOT_LOG` to filter it down to the area your investigating.
## Better logging with `RED_KNOT_LOG` and `RAYON_NUM_THREADS`
## `RED_KNOT_LOG`
By default, the CLI shows messages from the `ruff` and `red_knot` crates. Tracing messages from other crates are not shown.
The `RED_KNOT_LOG` environment variable allows you to customize which messages are shown by specifying one
or more [filter directives](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives).
The `RAYON_NUM_THREADS` environment variable, meanwhile, can be used to control the level of concurrency red-knot uses.
By default, red-knot will attempt to parallelize its work so that multiple files are checked simultaneously,
but this can result in a confused logging output where messages from different threads are intertwined.
To switch off concurrency entirely and have more readable logs, use `RAYON_NUM_THREADS=1`.
### Examples
#### Show all debug messages
@@ -103,7 +98,7 @@ called **once**.
## Profiling
Red Knot generates a folded stack trace to the current directory named `tracing.folded` when setting the environment variable `RED_KNOT_LOG_PROFILE` to `1` or `true`.
Red Knot generates a folded stack trace to the current directory named `tracing.folded` when setting the environment variable `RED_KNOT_LOG_PROFILE` to `1` or `true`.
```bash
RED_KNOT_LOG_PROFILE=1 red_knot -- --current-directory=../test -vvv

View File

@@ -1,280 +0,0 @@
use crate::logging::Verbosity;
use crate::python_version::PythonVersion;
use clap::{ArgAction, ArgMatches, Error, Parser};
use red_knot_project::metadata::options::{EnvironmentOptions, Options, TerminalOptions};
use red_knot_project::metadata::value::{RangedValue, RelativePathBuf};
use red_knot_python_semantic::lint;
use ruff_db::system::SystemPathBuf;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
pub(crate) struct Args {
#[command(subcommand)]
pub(crate) command: Command,
}
#[derive(Debug, clap::Subcommand)]
pub(crate) enum Command {
/// Check a project for type errors.
Check(CheckCommand),
/// Start the language server
Server,
/// Display Red Knot's version
Version,
}
#[derive(Debug, Parser)]
pub(crate) struct CheckCommand {
/// List of files or directories to check.
#[clap(
help = "List of files or directories to check [default: the project root]",
value_name = "PATH"
)]
pub paths: Vec<SystemPathBuf>,
/// Run the command within the given project directory.
///
/// All `pyproject.toml` files will be discovered by walking up the directory tree from the given project directory,
/// as will the project's virtual environment (`.venv`) unless the `venv-path` option is set.
///
/// Other command-line arguments (such as relative paths) will be resolved relative to the current working directory.
#[arg(long, value_name = "PROJECT")]
pub(crate) project: Option<SystemPathBuf>,
/// Path to the Python installation from which Red Knot resolves type information and third-party dependencies.
///
/// If not specified, Red Knot will look at the `VIRTUAL_ENV` environment variable.
///
/// Red Knot will search in the path's `site-packages` directories for type information and
/// third-party imports.
///
/// This option is commonly used to specify the path to a virtual environment.
#[arg(long, value_name = "PATH")]
pub(crate) python: Option<SystemPathBuf>,
/// Custom directory to use for stdlib typeshed stubs.
#[arg(long, value_name = "PATH", alias = "custom-typeshed-dir")]
pub(crate) typeshed: Option<SystemPathBuf>,
/// Additional path to use as a module-resolution source (can be passed multiple times).
#[arg(long, value_name = "PATH")]
pub(crate) extra_search_path: Option<Vec<SystemPathBuf>>,
/// Python version to assume when resolving types.
#[arg(long, value_name = "VERSION", alias = "target-version")]
pub(crate) python_version: Option<PythonVersion>,
/// Target platform to assume when resolving types.
///
/// This is used to specialize the type of `sys.platform` and will affect the visibility
/// of platform-specific functions and attributes. If the value is set to `all`, no
/// assumptions are made about the target platform. If unspecified, the current system's
/// platform will be used.
#[arg(long, value_name = "PLATFORM", alias = "platform")]
pub(crate) python_platform: Option<String>,
#[clap(flatten)]
pub(crate) verbosity: Verbosity,
#[clap(flatten)]
pub(crate) rules: RulesArg,
/// The format to use for printing diagnostic messages.
#[arg(long)]
pub(crate) output_format: Option<OutputFormat>,
/// Control when colored output is used.
#[arg(long, value_name = "WHEN")]
pub(crate) color: Option<TerminalColor>,
/// Use exit code 1 if there are any warning-level diagnostics.
#[arg(long, conflicts_with = "exit_zero", default_missing_value = "true", num_args=0..1)]
pub(crate) error_on_warning: Option<bool>,
/// Always use exit code 0, even when there are error-level diagnostics.
#[arg(long)]
pub(crate) exit_zero: bool,
/// Watch files for changes and recheck files related to the changed files.
#[arg(long, short = 'W')]
pub(crate) watch: bool,
}
impl CheckCommand {
pub(crate) fn into_options(self) -> Options {
let rules = if self.rules.is_empty() {
None
} else {
Some(
self.rules
.into_iter()
.map(|(rule, level)| (RangedValue::cli(rule), RangedValue::cli(level)))
.collect(),
)
};
Options {
environment: Some(EnvironmentOptions {
python_version: self
.python_version
.map(|version| RangedValue::cli(version.into())),
python_platform: self
.python_platform
.map(|platform| RangedValue::cli(platform.into())),
python: self.python.map(RelativePathBuf::cli),
typeshed: self.typeshed.map(RelativePathBuf::cli),
extra_paths: self.extra_search_path.map(|extra_search_paths| {
extra_search_paths
.into_iter()
.map(RelativePathBuf::cli)
.collect()
}),
}),
terminal: Some(TerminalOptions {
output_format: self
.output_format
.map(|output_format| RangedValue::cli(output_format.into())),
error_on_warning: self.error_on_warning,
}),
rules,
..Default::default()
}
}
}
/// A list of rules to enable or disable with a given severity.
///
/// This type is used to parse the `--error`, `--warn`, and `--ignore` arguments
/// while preserving the order in which they were specified (arguments last override previous severities).
#[derive(Debug)]
pub(crate) struct RulesArg(Vec<(String, lint::Level)>);
impl RulesArg {
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn into_iter(self) -> impl Iterator<Item = (String, lint::Level)> {
self.0.into_iter()
}
}
impl clap::FromArgMatches for RulesArg {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut rules = Vec::new();
for (level, arg_id) in [
(lint::Level::Ignore, "ignore"),
(lint::Level::Warn, "warn"),
(lint::Level::Error, "error"),
] {
let indices = matches.indices_of(arg_id).into_iter().flatten();
let levels = matches.get_many::<String>(arg_id).into_iter().flatten();
rules.extend(
indices
.zip(levels)
.map(|(index, rule)| (index, rule, level)),
);
}
// Sort by their index so that values specified later override earlier ones.
rules.sort_by_key(|(index, _, _)| *index);
Ok(Self(
rules
.into_iter()
.map(|(_, rule, level)| (rule.to_owned(), level))
.collect(),
))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
self.0 = Self::from_arg_matches(matches)?.0;
Ok(())
}
}
impl clap::Args for RulesArg {
fn augment_args(cmd: clap::Command) -> clap::Command {
const HELP_HEADING: &str = "Enabling / disabling rules";
cmd.arg(
clap::Arg::new("error")
.long("error")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'error'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("warn")
.long("warn")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'warn'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("ignore")
.long("ignore")
.action(ArgAction::Append)
.help("Disables the rule. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
}
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
}
/// The diagnostic output format.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum OutputFormat {
/// Print diagnostics verbosely, with context and helpful hints.
///
/// Diagnostic messages may include additional context and
/// annotations on the input to help understand the message.
#[default]
#[value(name = "full")]
Full,
/// Print diagnostics concisely, one per line.
///
/// This will guarantee that each diagnostic is printed on
/// a single line. Only the most important or primary aspects
/// of the diagnostic are included. Contextual information is
/// dropped.
#[value(name = "concise")]
Concise,
}
impl From<OutputFormat> for ruff_db::diagnostic::DiagnosticFormat {
fn from(format: OutputFormat) -> ruff_db::diagnostic::DiagnosticFormat {
match format {
OutputFormat::Full => Self::Full,
OutputFormat::Concise => Self::Concise,
}
}
}
/// Control when colored output is used.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub(crate) enum TerminalColor {
/// Display colors if the output goes to an interactive terminal.
#[default]
Auto,
/// Always display colors.
Always,
/// Never display colors.
Never,
}

View File

@@ -190,8 +190,8 @@ where
let ansi = writer.has_ansi_escapes();
if self.display_timestamp {
let timestamp = jiff::Zoned::now()
.strftime("%Y-%m-%d %H:%M:%S.%f")
let timestamp = chrono::Local::now()
.format("%Y-%m-%d %H:%M:%S.%f")
.to_string();
if ansi {
write!(writer, "{} ", timestamp.dimmed())?;
@@ -199,7 +199,7 @@ where
write!(
writer,
"{} ",
jiff::Zoned::now().strftime("%Y-%m-%d %H:%M:%S.%f")
chrono::Local::now().format("%Y-%m-%d %H:%M:%S.%f")
)?;
}
}

View File

@@ -1,28 +1,96 @@
use std::io::{self, stdout, BufWriter, Write};
use std::process::{ExitCode, Termination};
use anyhow::Result;
use std::sync::Mutex;
use crate::args::{Args, CheckCommand, Command, TerminalColor};
use crate::logging::setup_tracing;
use anyhow::{anyhow, Context};
use clap::Parser;
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use red_knot_project::metadata::options::Options;
use red_knot_project::watch::ProjectWatcher;
use red_knot_project::{watch, Db};
use red_knot_project::{ProjectDatabase, ProjectMetadata};
use red_knot_server::run_server;
use ruff_db::diagnostic::{Diagnostic, DisplayDiagnosticConfig, Severity};
use ruff_db::system::{OsSystem, SystemPath, SystemPathBuf};
use salsa::plumbing::ZalsaDatabase;
mod args;
use red_knot_python_semantic::{ProgramSettings, SearchPathSettings};
use red_knot_server::run_server;
use red_knot_workspace::db::RootDatabase;
use red_knot_workspace::site_packages::VirtualEnvironment;
use red_knot_workspace::watch;
use red_knot_workspace::watch::WorkspaceWatcher;
use red_knot_workspace::workspace::WorkspaceMetadata;
use ruff_db::system::{OsSystem, System, SystemPath, SystemPathBuf};
use target_version::TargetVersion;
use crate::logging::{setup_tracing, Verbosity};
mod logging;
mod python_version;
mod version;
mod target_version;
mod verbosity;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
struct Args {
#[command(subcommand)]
pub(crate) command: Option<Command>,
#[arg(
long,
help = "Changes the current working directory.",
long_help = "Changes the current working directory before any specified operations. This affects the workspace and configuration discovery.",
value_name = "PATH"
)]
current_directory: Option<SystemPathBuf>,
#[arg(
long,
help = "Path to the virtual environment the project uses",
long_help = "\
Path to the virtual environment the project uses. \
If provided, red-knot will use the `site-packages` directory of this virtual environment \
to resolve type information for the project's third-party dependencies.",
value_name = "PATH"
)]
venv_path: Option<SystemPathBuf>,
#[arg(
long,
value_name = "DIRECTORY",
help = "Custom directory to use for stdlib typeshed stubs"
)]
custom_typeshed_dir: Option<SystemPathBuf>,
#[arg(
long,
value_name = "PATH",
help = "Additional path to use as a module-resolution source (can be passed multiple times)"
)]
extra_search_path: Vec<SystemPathBuf>,
#[arg(
long,
help = "Python version to assume when resolving types",
default_value_t = TargetVersion::default(),
value_name="VERSION")
]
target_version: TargetVersion,
#[clap(flatten)]
verbosity: Verbosity,
#[arg(
long,
help = "Run in watch mode by re-running whenever files change",
short = 'W'
)]
watch: bool,
}
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Start the language server
Server,
}
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
pub fn main() -> ExitStatus {
@@ -39,15 +107,6 @@ pub fn main() -> ExitStatus {
// the configuration it is help to chain errors ("resolving configuration failed" ->
// "failed to read file: subdir/pyproject.toml")
for cause in error.chain() {
// Exit "gracefully" on broken pipe errors.
//
// See: https://github.com/BurntSushi/ripgrep/blob/bf63fe8f258afc09bae6caa48f0ae35eaf115005/crates/core/main.rs#L47C1-L61C14
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return ExitStatus::Success;
}
}
writeln!(stderr, " {} {cause}", "Cause:".bold()).ok();
}
@@ -56,83 +115,78 @@ pub fn main() -> ExitStatus {
}
fn run() -> anyhow::Result<ExitStatus> {
let args = wild::args_os();
let args = argfile::expand_args_from(args, argfile::parse_fromfile, argfile::PREFIX)
.context("Failed to read CLI arguments from file")?;
let args = Args::parse_from(args);
let Args {
command,
current_directory,
custom_typeshed_dir,
extra_search_path: extra_paths,
venv_path,
target_version,
verbosity,
watch,
} = Args::parse_from(std::env::args().collect::<Vec<_>>());
match args.command {
Command::Server => run_server().map(|()| ExitStatus::Success),
Command::Check(check_args) => run_check(check_args),
Command::Version => version().map(|()| ExitStatus::Success),
if matches!(command, Some(Command::Server)) {
return run_server().map(|()| ExitStatus::Success);
}
}
pub(crate) fn version() -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
let version_info = crate::version::version();
writeln!(stdout, "red knot {}", &version_info)?;
Ok(())
}
fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
set_colored_override(args.color);
let verbosity = args.verbosity.level();
let verbosity = verbosity.level();
countme::enable(verbosity.is_trace());
let _guard = setup_tracing(verbosity)?;
tracing::debug!("Version: {}", version::version());
// The base path to which all CLI arguments are relative to.
let cwd = {
let cli_base_path = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd)
.map_err(|path| {
anyhow!(
"The current working directory `{}` contains non-Unicode characters. Red Knot only supports Unicode paths.",
"The current working directory '{}' contains non-unicode characters. Red Knot only supports unicode paths.",
path.display()
)
})?
};
let project_path = args
.project
.as_ref()
.map(|project| {
if project.as_std_path().is_dir() {
Ok(SystemPath::absolute(project, &cwd))
let cwd = current_directory
.map(|cwd| {
if cwd.as_std_path().is_dir() {
Ok(SystemPath::absolute(&cwd, &cli_base_path))
} else {
Err(anyhow!(
"Provided project path `{project}` is not a directory"
"Provided current-directory path '{cwd}' is not a directory."
))
}
})
.transpose()?
.unwrap_or_else(|| cwd.clone());
.unwrap_or_else(|| cli_base_path.clone());
let check_paths: Vec<_> = args
.paths
.iter()
.map(|path| SystemPath::absolute(path, &cwd))
.collect();
let system = OsSystem::new(cwd.clone());
let workspace_metadata = WorkspaceMetadata::from_path(system.current_directory(), &system)?;
let system = OsSystem::new(cwd);
let watch = args.watch;
let exit_zero = args.exit_zero;
// TODO: Verify the remaining search path settings eagerly.
let site_packages = venv_path
.map(|path| {
VirtualEnvironment::new(path, &OsSystem::new(cli_base_path))
.and_then(|venv| venv.site_packages_directories(&system))
})
.transpose()?
.unwrap_or_default();
let cli_options = args.into_options();
let mut project_metadata = ProjectMetadata::discover(&project_path, &system)?;
project_metadata.apply_cli_options(cli_options.clone());
project_metadata.apply_configuration_files(&system)?;
// TODO: Respect the settings from the workspace metadata. when resolving the program settings.
let program_settings = ProgramSettings {
target_version: target_version.into(),
search_paths: SearchPathSettings {
extra_paths,
src_root: workspace_metadata.root().to_path_buf(),
custom_typeshed: custom_typeshed_dir,
site_packages,
},
};
let mut db = ProjectDatabase::new(project_metadata, system)?;
// TODO: Use the `program_settings` to compute the key for the database's persistent
// cache and load the cache if it exists.
let mut db = RootDatabase::new(workspace_metadata, program_settings, system)?;
if !check_paths.is_empty() {
db.project().set_included_paths(&mut db, check_paths);
}
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_options);
let (main_loop, main_loop_cancellation_token) = MainLoop::new();
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
@@ -147,18 +201,14 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
let exit_status = if watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)?
main_loop.run(&mut db)
};
tracing::trace!("Counts for entire CLI run:\n{}", countme::get_all());
std::mem::forget(db);
if exit_zero {
Ok(ExitStatus::Success)
} else {
Ok(exit_status)
}
Ok(exit_status)
}
#[derive(Copy, Clone)]
@@ -187,13 +237,11 @@ struct MainLoop {
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
/// The file system watcher, if running in watch mode.
watcher: Option<ProjectWatcher>,
cli_options: Options,
watcher: Option<WorkspaceWatcher>,
}
impl MainLoop {
fn new(cli_options: Options) -> (Self, MainLoopCancellationToken) {
fn new() -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
(
@@ -201,27 +249,26 @@ impl MainLoop {
sender: sender.clone(),
receiver,
watcher: None,
cli_options,
},
MainLoopCancellationToken { sender },
)
}
fn watch(mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
fn watch(mut self, db: &mut RootDatabase) -> anyhow::Result<ExitStatus> {
tracing::debug!("Starting watch mode");
let sender = self.sender.clone();
let watcher = watch::directory_watcher(move |event| {
sender.send(MainLoopMessage::ApplyChanges(event)).unwrap();
})?;
self.watcher = Some(ProjectWatcher::new(watcher, db));
self.watcher = Some(WorkspaceWatcher::new(watcher, db));
self.run(db)?;
self.run(db);
Ok(ExitStatus::Success)
}
fn run(mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
fn run(mut self, db: &mut RootDatabase) -> ExitStatus {
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
let result = self.main_loop(db);
@@ -231,7 +278,7 @@ impl MainLoop {
result
}
fn main_loop(&mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
fn main_loop(&mut self, db: &mut RootDatabase) -> ExitStatus {
// Schedule the first check.
tracing::debug!("Starting main loop");
@@ -240,10 +287,10 @@ impl MainLoop {
while let Ok(message) = self.receiver.recv() {
match message {
MainLoopMessage::CheckWorkspace => {
let db = db.clone();
let db = db.snapshot();
let sender = self.sender.clone();
// Spawn a new task that checks the project. This needs to be done in a separate thread
// Spawn a new task that checks the workspace. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
if let Ok(result) = db.check() {
@@ -259,54 +306,10 @@ impl MainLoop {
result,
revision: check_revision,
} => {
let terminal_settings = db.project().settings(db).terminal();
let display_config = DisplayDiagnosticConfig::default()
.format(terminal_settings.output_format)
.color(colored::control::SHOULD_COLORIZE.should_colorize());
let min_error_severity = if terminal_settings.error_on_warning {
Severity::Warning
} else {
Severity::Error
};
let has_diagnostics = !result.is_empty();
if check_revision == revision {
if db.project().files(db).is_empty() {
tracing::warn!("No python files found under the given path(s)");
}
let mut stdout = stdout().lock();
if result.is_empty() {
writeln!(stdout, "All checks passed!")?;
if self.watcher.is_none() {
return Ok(ExitStatus::Success);
}
} else {
let mut failed = false;
let diagnostics_count = result.len();
for diagnostic in result {
write!(stdout, "{}", diagnostic.display(db, &display_config))?;
failed |= diagnostic.severity() >= min_error_severity;
}
writeln!(
stdout,
"Found {} diagnostic{}",
diagnostics_count,
if diagnostics_count > 1 { "s" } else { "" }
)?;
if self.watcher.is_none() {
return Ok(if failed {
ExitStatus::Failure
} else {
ExitStatus::Success
});
}
for diagnostic in result {
tracing::error!("{}", diagnostic);
}
} else {
tracing::debug!(
@@ -314,13 +317,21 @@ impl MainLoop {
);
}
if self.watcher.is_none() {
return if has_diagnostics {
ExitStatus::Failure
} else {
ExitStatus::Success
};
}
tracing::trace!("Counts after last check:\n{}", countme::get_all());
}
MainLoopMessage::ApplyChanges(changes) => {
revision += 1;
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes, Some(&self.cli_options));
db.apply_changes(changes);
if let Some(watcher) = self.watcher.as_mut() {
watcher.update(db);
}
@@ -331,14 +342,14 @@ impl MainLoop {
// TODO: Don't use Salsa internal APIs
// [Zulip-Thread](https://salsa.zulipchat.com/#narrow/stream/333573-salsa-3.2E0/topic/Expose.20an.20API.20to.20cancel.20other.20queries)
let _ = db.zalsa_mut();
return Ok(ExitStatus::Success);
return ExitStatus::Success;
}
}
tracing::debug!("Waiting for next main loop message.");
}
Ok(ExitStatus::Success)
ExitStatus::Success
}
}
@@ -357,29 +368,7 @@ impl MainLoopCancellationToken {
#[derive(Debug)]
enum MainLoopMessage {
CheckWorkspace,
CheckCompleted {
/// The diagnostics that were found during the check.
result: Vec<Diagnostic>,
revision: u64,
},
CheckCompleted { result: Vec<String>, revision: u64 },
ApplyChanges(Vec<watch::ChangeEvent>),
Exit,
}
fn set_colored_override(color: Option<TerminalColor>) {
let Some(color) = color else {
return;
};
match color {
TerminalColor::Auto => {
colored::control::unset_override();
}
TerminalColor::Always => {
colored::control::set_override(true);
}
TerminalColor::Never => {
colored::control::set_override(false);
}
}
}

View File

@@ -1,68 +0,0 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum PythonVersion {
#[value(name = "3.7")]
Py37,
#[value(name = "3.8")]
Py38,
#[default]
#[value(name = "3.9")]
Py39,
#[value(name = "3.10")]
Py310,
#[value(name = "3.11")]
Py311,
#[value(name = "3.12")]
Py312,
#[value(name = "3.13")]
Py313,
}
impl PythonVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "3.7",
Self::Py38 => "3.8",
Self::Py39 => "3.9",
Self::Py310 => "3.10",
Self::Py311 => "3.11",
Self::Py312 => "3.12",
Self::Py313 => "3.13",
}
}
}
impl std::fmt::Display for PythonVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<PythonVersion> for ruff_python_ast::PythonVersion {
fn from(value: PythonVersion) -> Self {
match value {
PythonVersion::Py37 => Self::PY37,
PythonVersion::Py38 => Self::PY38,
PythonVersion::Py39 => Self::PY39,
PythonVersion::Py310 => Self::PY310,
PythonVersion::Py311 => Self::PY311,
PythonVersion::Py312 => Self::PY312,
PythonVersion::Py313 => Self::PY313,
}
}
}
#[cfg(test)]
mod tests {
use crate::python_version::PythonVersion;
#[test]
fn same_default_as_python_version() {
assert_eq!(
ruff_python_ast::PythonVersion::from(PythonVersion::default()),
ruff_python_ast::PythonVersion::default()
);
}
}

View File

@@ -0,0 +1,48 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum TargetVersion {
Py37,
#[default]
Py38,
Py39,
Py310,
Py311,
Py312,
Py313,
}
impl TargetVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "py37",
Self::Py38 => "py38",
Self::Py39 => "py39",
Self::Py310 => "py310",
Self::Py311 => "py311",
Self::Py312 => "py312",
Self::Py313 => "py313",
}
}
}
impl std::fmt::Display for TargetVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<TargetVersion> for red_knot_python_semantic::PythonVersion {
fn from(value: TargetVersion) -> Self {
match value {
TargetVersion::Py37 => Self::PY37,
TargetVersion::Py38 => Self::PY38,
TargetVersion::Py39 => Self::PY39,
TargetVersion::Py310 => Self::PY310,
TargetVersion::Py311 => Self::PY311,
TargetVersion::Py312 => Self::PY312,
TargetVersion::Py313 => Self::PY313,
}
}
}

View File

@@ -0,0 +1 @@

View File

@@ -1,105 +0,0 @@
//! Code for representing Red Knot's release version number.
use std::fmt;
/// Information about the git repository where Red Knot was built from.
pub(crate) struct CommitInfo {
short_commit_hash: String,
commit_date: String,
commits_since_last_tag: u32,
}
/// Red Knot's version.
pub(crate) struct VersionInfo {
/// Red Knot's version, such as "0.5.1"
version: String,
/// Information about the git commit we may have been built from.
///
/// `None` if not built from a git repo or if retrieval failed.
commit_info: Option<CommitInfo>,
}
impl fmt::Display for VersionInfo {
/// Formatted version information: `<version>[+<commits>] (<commit> <date>)`
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.version)?;
if let Some(ref ci) = self.commit_info {
if ci.commits_since_last_tag > 0 {
write!(f, "+{}", ci.commits_since_last_tag)?;
}
write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?;
}
Ok(())
}
}
/// Returns information about Red Knot's version.
pub(crate) fn version() -> VersionInfo {
// Environment variables are only read at compile-time
macro_rules! option_env_str {
($name:expr) => {
option_env!($name).map(|s| s.to_string())
};
}
// This version is pulled from Cargo.toml and set by Cargo
let version = option_env_str!("CARGO_PKG_VERSION").unwrap();
// Commit info is pulled from git and set by `build.rs`
let commit_info =
option_env_str!("RED_KNOT_COMMIT_SHORT_HASH").map(|short_commit_hash| CommitInfo {
short_commit_hash,
commit_date: option_env_str!("RED_KNOT_COMMIT_DATE").unwrap(),
commits_since_last_tag: option_env_str!("RED_KNOT_LAST_TAG_DISTANCE")
.as_deref()
.map_or(0, |value| value.parse::<u32>().unwrap_or(0)),
});
VersionInfo {
version,
commit_info,
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use super::{CommitInfo, VersionInfo};
#[test]
fn version_formatting() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: None,
};
assert_snapshot!(version, @"0.0.0");
}
#[test]
fn version_formatting_with_commit_info() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
}),
};
assert_snapshot!(version, @"0.0.0 (53b0f5d92 2023-10-19)");
}
#[test]
fn version_formatting_with_commits_since_last_tag() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 24,
}),
};
assert_snapshot!(version, @"0.0.0+24 (53b0f5d92 2023-10-19)");
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +0,0 @@
[package]
name = "red_knot_ide"
version = "0.0.0"
publish = false
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff_db = { workspace = true }
ruff_python_ast = { workspace = true }
ruff_python_parser = { workspace = true }
ruff_text_size = { workspace = true }
red_knot_python_semantic = { workspace = true }
rustc-hash = { workspace = true }
salsa = { workspace = true }
smallvec = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
red_knot_vendored = { workspace = true }
insta = { workspace = true, features = ["filters"] }
[lints]
workspace = true

View File

@@ -1,134 +0,0 @@
use red_knot_python_semantic::Db as SemanticDb;
use ruff_db::{Db as SourceDb, Upcast};
#[salsa::db]
pub trait Db: SemanticDb + Upcast<dyn SemanticDb> + Upcast<dyn SourceDb> {}
#[cfg(test)]
pub(crate) mod tests {
use std::sync::Arc;
use super::Db;
use red_knot_python_semantic::lint::{LintRegistry, RuleSelection};
use red_knot_python_semantic::{default_lint_registry, Db as SemanticDb};
use ruff_db::files::{File, Files};
use ruff_db::system::{DbWithTestSystem, System, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
use ruff_db::{Db as SourceDb, Upcast};
#[salsa::db]
#[derive(Clone)]
pub(crate) struct TestDb {
storage: salsa::Storage<Self>,
files: Files,
system: TestSystem,
vendored: VendoredFileSystem,
events: Arc<std::sync::Mutex<Vec<salsa::Event>>>,
rule_selection: Arc<RuleSelection>,
}
#[allow(dead_code)]
impl TestDb {
pub(crate) fn new() -> Self {
Self {
storage: salsa::Storage::default(),
system: TestSystem::default(),
vendored: red_knot_vendored::file_system().clone(),
events: Arc::default(),
files: Files::default(),
rule_selection: Arc::new(RuleSelection::from_registry(default_lint_registry())),
}
}
/// Takes the salsa events.
///
/// ## Panics
/// If there are any pending salsa snapshots.
pub(crate) fn take_salsa_events(&mut self) -> Vec<salsa::Event> {
let inner = Arc::get_mut(&mut self.events).expect("no pending salsa snapshots");
let events = inner.get_mut().unwrap();
std::mem::take(&mut *events)
}
/// Clears the salsa events.
///
/// ## Panics
/// If there are any pending salsa snapshots.
pub(crate) fn clear_salsa_events(&mut self) {
self.take_salsa_events();
}
}
impl DbWithTestSystem for TestDb {
fn test_system(&self) -> &TestSystem {
&self.system
}
fn test_system_mut(&mut self) -> &mut TestSystem {
&mut self.system
}
}
#[salsa::db]
impl SourceDb for TestDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
}
impl Upcast<dyn SourceDb> for TestDb {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
fn upcast_mut(&mut self) -> &mut (dyn SourceDb + 'static) {
self
}
}
impl Upcast<dyn SemanticDb> for TestDb {
fn upcast(&self) -> &(dyn SemanticDb + 'static) {
self
}
fn upcast_mut(&mut self) -> &mut dyn SemanticDb {
self
}
}
#[salsa::db]
impl SemanticDb for TestDb {
fn is_file_open(&self, file: File) -> bool {
!file.path(self).is_vendored_path()
}
fn rule_selection(&self) -> Arc<RuleSelection> {
self.rule_selection.clone()
}
fn lint_registry(&self) -> &LintRegistry {
default_lint_registry()
}
}
#[salsa::db]
impl Db for TestDb {}
#[salsa::db]
impl salsa::Database for TestDb {
fn salsa_event(&self, event: &dyn Fn() -> salsa::Event) {
let event = event();
tracing::trace!("event: {event:?}");
let mut events = self.events.lock().unwrap();
events.push(event);
}
}
}

View File

@@ -1,106 +0,0 @@
use ruff_python_ast::visitor::source_order::{SourceOrderVisitor, TraversalSignal};
use ruff_python_ast::AnyNodeRef;
use ruff_text_size::{Ranged, TextRange};
use std::fmt;
use std::fmt::Formatter;
/// Returns the node with a minimal range that fully contains `range`.
///
/// If `range` is empty and falls within a parser *synthesized* node generated during error recovery,
/// then the first node with the given range is returned.
///
/// ## Panics
/// Panics if `range` is not contained within `root`.
pub(crate) fn covering_node(root: AnyNodeRef, range: TextRange) -> CoveringNode {
struct Visitor<'a> {
range: TextRange,
found: bool,
ancestors: Vec<AnyNodeRef<'a>>,
}
impl<'a> SourceOrderVisitor<'a> for Visitor<'a> {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
// If the node fully contains the range, than it is a possible match but traverse into its children
// to see if there's a node with a narrower range.
if !self.found && node.range().contains_range(self.range) {
self.ancestors.push(node);
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn leave_node(&mut self, node: AnyNodeRef<'a>) {
if !self.found && self.ancestors.last() == Some(&node) {
self.found = true;
}
}
}
assert!(
root.range().contains_range(range),
"Range is not contained within root"
);
let mut visitor = Visitor {
range,
found: false,
ancestors: Vec::new(),
};
root.visit_source_order(&mut visitor);
let minimal = visitor.ancestors.pop().unwrap_or(root);
CoveringNode {
node: minimal,
ancestors: visitor.ancestors,
}
}
/// The node with a minimal range that fully contains the search range.
pub(crate) struct CoveringNode<'a> {
/// The node with a minimal range that fully contains the search range.
node: AnyNodeRef<'a>,
/// The node's ancestor (the spine up to the root).
ancestors: Vec<AnyNodeRef<'a>>,
}
impl<'a> CoveringNode<'a> {
pub(crate) fn node(&self) -> AnyNodeRef<'a> {
self.node
}
/// Returns the node's parent.
pub(crate) fn parent(&self) -> Option<AnyNodeRef<'a>> {
self.ancestors.last().copied()
}
/// Finds the minimal node that fully covers the range and fulfills the given predicate.
pub(crate) fn find(mut self, f: impl Fn(AnyNodeRef<'a>) -> bool) -> Result<Self, Self> {
if f(self.node) {
return Ok(self);
}
match self.ancestors.iter().rposition(|node| f(*node)) {
Some(index) => {
let node = self.ancestors[index];
self.ancestors.truncate(index);
Ok(Self {
node,
ancestors: self.ancestors,
})
}
None => Err(self),
}
}
}
impl fmt::Debug for CoveringNode<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_tuple("NodeWithAncestors")
.field(&self.node)
.finish()
}
}

View File

@@ -1,851 +0,0 @@
use crate::find_node::covering_node;
use crate::{Db, HasNavigationTargets, NavigationTargets, RangedValue};
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::{HasType, SemanticModel};
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::{parsed_module, ParsedModule};
use ruff_python_ast::{self as ast, AnyNodeRef};
use ruff_python_parser::TokenKind;
use ruff_text_size::{Ranged, TextRange, TextSize};
pub fn goto_type_definition(
db: &dyn Db,
file: File,
offset: TextSize,
) -> Option<RangedValue<NavigationTargets>> {
let parsed = parsed_module(db.upcast(), file);
let goto_target = find_goto_target(parsed, offset)?;
let model = SemanticModel::new(db.upcast(), file);
let ty = goto_target.inferred_type(&model)?;
tracing::debug!(
"Inferred type of covering node is {}",
ty.display(db.upcast())
);
let navigation_targets = ty.navigation_targets(db);
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: navigation_targets,
})
}
#[derive(Clone, Copy, Debug)]
pub(crate) enum GotoTarget<'a> {
Expression(ast::ExprRef<'a>),
FunctionDef(&'a ast::StmtFunctionDef),
ClassDef(&'a ast::StmtClassDef),
Parameter(&'a ast::Parameter),
Alias(&'a ast::Alias),
/// Go to on the module name of an import from
/// ```py
/// from foo import bar
/// ^^^
/// ```
ImportedModule(&'a ast::StmtImportFrom),
/// Go to on the exception handler variable
/// ```py
/// try: ...
/// except Exception as e: ...
/// ^
/// ```
ExceptVariable(&'a ast::ExceptHandlerExceptHandler),
/// Go to on a keyword argument
/// ```py
/// test(a = 1)
/// ^
/// ```
KeywordArgument(&'a ast::Keyword),
/// Go to on the rest parameter of a pattern match
///
/// ```py
/// match x:
/// case {"a": a, "b": b, **rest}: ...
/// ^^^^
/// ```
PatternMatchRest(&'a ast::PatternMatchMapping),
/// Go to on a keyword argument of a class pattern
///
/// ```py
/// match Point3D(0, 0, 0):
/// case Point3D(x=0, y=0, z=0): ...
/// ^ ^ ^
/// ```
PatternKeywordArgument(&'a ast::PatternKeyword),
/// Go to on a pattern star argument
///
/// ```py
/// match array:
/// case [*args]: ...
/// ^^^^
PatternMatchStarName(&'a ast::PatternMatchStar),
/// Go to on the name of a pattern match as pattern
///
/// ```py
/// match x:
/// case [x] as y: ...
/// ^
PatternMatchAsName(&'a ast::PatternMatchAs),
/// Go to on the name of a type variable
///
/// ```py
/// type Alias[T: int = bool] = list[T]
/// ^
/// ```
TypeParamTypeVarName(&'a ast::TypeParamTypeVar),
/// Go to on the name of a type param spec
///
/// ```py
/// type Alias[**P = [int, str]] = Callable[P, int]
/// ^
/// ```
TypeParamParamSpecName(&'a ast::TypeParamParamSpec),
/// Go to on the name of a type var tuple
///
/// ```py
/// type Alias[*Ts = ()] = tuple[*Ts]
/// ^^
/// ```
TypeParamTypeVarTupleName(&'a ast::TypeParamTypeVarTuple),
NonLocal {
identifier: &'a ast::Identifier,
},
Globals {
identifier: &'a ast::Identifier,
},
}
impl<'db> GotoTarget<'db> {
pub(crate) fn inferred_type(self, model: &SemanticModel<'db>) -> Option<Type<'db>> {
let ty = match self {
GotoTarget::Expression(expression) => expression.inferred_type(model),
GotoTarget::FunctionDef(function) => function.inferred_type(model),
GotoTarget::ClassDef(class) => class.inferred_type(model),
GotoTarget::Parameter(parameter) => parameter.inferred_type(model),
GotoTarget::Alias(alias) => alias.inferred_type(model),
GotoTarget::ExceptVariable(except) => except.inferred_type(model),
GotoTarget::KeywordArgument(argument) => {
// TODO: Pyright resolves the declared type of the matching parameter. This seems more accurate
// than using the inferred value.
argument.value.inferred_type(model)
}
// TODO: Support identifier targets
GotoTarget::PatternMatchRest(_)
| GotoTarget::PatternKeywordArgument(_)
| GotoTarget::PatternMatchStarName(_)
| GotoTarget::PatternMatchAsName(_)
| GotoTarget::ImportedModule(_)
| GotoTarget::TypeParamTypeVarName(_)
| GotoTarget::TypeParamParamSpecName(_)
| GotoTarget::TypeParamTypeVarTupleName(_)
| GotoTarget::NonLocal { .. }
| GotoTarget::Globals { .. } => return None,
};
Some(ty)
}
}
impl Ranged for GotoTarget<'_> {
fn range(&self) -> TextRange {
match self {
GotoTarget::Expression(expression) => expression.range(),
GotoTarget::FunctionDef(function) => function.name.range,
GotoTarget::ClassDef(class) => class.name.range,
GotoTarget::Parameter(parameter) => parameter.name.range,
GotoTarget::Alias(alias) => alias.name.range,
GotoTarget::ImportedModule(module) => module.module.as_ref().unwrap().range,
GotoTarget::ExceptVariable(except) => except.name.as_ref().unwrap().range,
GotoTarget::KeywordArgument(keyword) => keyword.arg.as_ref().unwrap().range,
GotoTarget::PatternMatchRest(rest) => rest.rest.as_ref().unwrap().range,
GotoTarget::PatternKeywordArgument(keyword) => keyword.attr.range,
GotoTarget::PatternMatchStarName(star) => star.name.as_ref().unwrap().range,
GotoTarget::PatternMatchAsName(as_name) => as_name.name.as_ref().unwrap().range,
GotoTarget::TypeParamTypeVarName(type_var) => type_var.name.range,
GotoTarget::TypeParamParamSpecName(spec) => spec.name.range,
GotoTarget::TypeParamTypeVarTupleName(tuple) => tuple.name.range,
GotoTarget::NonLocal { identifier, .. } => identifier.range,
GotoTarget::Globals { identifier, .. } => identifier.range,
}
}
}
pub(crate) fn find_goto_target(parsed: &ParsedModule, offset: TextSize) -> Option<GotoTarget> {
let token = parsed
.tokens()
.at_offset(offset)
.max_by_key(|token| match token.kind() {
TokenKind::Name
| TokenKind::String
| TokenKind::Complex
| TokenKind::Float
| TokenKind::Int => 1,
_ => 0,
})?;
let covering_node = covering_node(parsed.syntax().into(), token.range())
.find(|node| node.is_identifier() || node.is_expression())
.ok()?;
tracing::trace!("Covering node is of kind {:?}", covering_node.node().kind());
match covering_node.node() {
AnyNodeRef::Identifier(identifier) => match covering_node.parent() {
Some(AnyNodeRef::StmtFunctionDef(function)) => Some(GotoTarget::FunctionDef(function)),
Some(AnyNodeRef::StmtClassDef(class)) => Some(GotoTarget::ClassDef(class)),
Some(AnyNodeRef::Parameter(parameter)) => Some(GotoTarget::Parameter(parameter)),
Some(AnyNodeRef::Alias(alias)) => Some(GotoTarget::Alias(alias)),
Some(AnyNodeRef::StmtImportFrom(from)) => Some(GotoTarget::ImportedModule(from)),
Some(AnyNodeRef::ExceptHandlerExceptHandler(handler)) => {
Some(GotoTarget::ExceptVariable(handler))
}
Some(AnyNodeRef::Keyword(keyword)) => Some(GotoTarget::KeywordArgument(keyword)),
Some(AnyNodeRef::PatternMatchMapping(mapping)) => {
Some(GotoTarget::PatternMatchRest(mapping))
}
Some(AnyNodeRef::PatternKeyword(keyword)) => {
Some(GotoTarget::PatternKeywordArgument(keyword))
}
Some(AnyNodeRef::PatternMatchStar(star)) => {
Some(GotoTarget::PatternMatchStarName(star))
}
Some(AnyNodeRef::PatternMatchAs(as_pattern)) => {
Some(GotoTarget::PatternMatchAsName(as_pattern))
}
Some(AnyNodeRef::TypeParamTypeVar(var)) => Some(GotoTarget::TypeParamTypeVarName(var)),
Some(AnyNodeRef::TypeParamParamSpec(bound)) => {
Some(GotoTarget::TypeParamParamSpecName(bound))
}
Some(AnyNodeRef::TypeParamTypeVarTuple(var_tuple)) => {
Some(GotoTarget::TypeParamTypeVarTupleName(var_tuple))
}
Some(AnyNodeRef::ExprAttribute(attribute)) => {
Some(GotoTarget::Expression(attribute.into()))
}
Some(AnyNodeRef::StmtNonlocal(_)) => Some(GotoTarget::NonLocal { identifier }),
Some(AnyNodeRef::StmtGlobal(_)) => Some(GotoTarget::Globals { identifier }),
None => None,
Some(parent) => {
tracing::debug!(
"Missing `GoToTarget` for identifier with parent {:?}",
parent.kind()
);
None
}
},
node => node.as_expr_ref().map(GotoTarget::Expression),
}
}
#[cfg(test)]
mod tests {
use crate::tests::{cursor_test, CursorTest, IntoDiagnostic};
use crate::{goto_type_definition, NavigationTarget};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span, SubDiagnostic,
};
use ruff_db::files::FileRange;
use ruff_text_size::Ranged;
#[test]
fn goto_type_of_expression_with_class_type() {
let test = cursor_test(
r#"
class Test: ...
a<CURSOR>b = Test()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:19
|
2 | class Test: ...
| ^^^^
3 |
4 | ab = Test()
|
info: Source
--> /main.py:4:13
|
2 | class Test: ...
3 |
4 | ab = Test()
| ^^
|
"###);
}
#[test]
fn goto_type_of_expression_with_function_type() {
let test = cursor_test(
r#"
def foo(a, b): ...
ab = foo
a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:17
|
2 | def foo(a, b): ...
| ^^^
3 |
4 | ab = foo
|
info: Source
--> /main.py:6:13
|
4 | ab = foo
5 |
6 | ab
| ^^
|
"###);
}
#[test]
fn goto_type_of_expression_with_union_type() {
let test = cursor_test(
r#"
def foo(a, b): ...
def bar(a, b): ...
if random.choice():
a = foo
else:
a = bar
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> /main.py:3:17
|
3 | def foo(a, b): ...
| ^^^
4 |
5 | def bar(a, b): ...
|
info: Source
--> /main.py:12:13
|
10 | a = bar
11 |
12 | a
| ^
|
info: lint:goto-type-definition: Type definition
--> /main.py:5:17
|
3 | def foo(a, b): ...
4 |
5 | def bar(a, b): ...
| ^^^
6 |
7 | if random.choice():
|
info: Source
--> /main.py:12:13
|
10 | a = bar
11 |
12 | a
| ^
|
");
}
#[test]
fn goto_type_of_expression_with_module() {
let mut test = cursor_test(
r#"
import lib
lib<CURSOR>
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> /lib.py:1:1
|
1 | a = 10
| ^^^^^^
|
info: Source
--> /main.py:4:13
|
2 | import lib
3 |
4 | lib
| ^^^
|
");
}
#[test]
fn goto_type_of_expression_with_literal_type() {
let test = cursor_test(
r#"
a: str = "test"
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:4:13
|
2 | a: str = "test"
3 |
4 | a
| ^
|
"#);
}
#[test]
fn goto_type_of_expression_with_literal_node() {
let test = cursor_test(
r#"
a: str = "te<CURSOR>st"
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:2:22
|
2 | a: str = "test"
| ^^^^^^
|
"#);
}
#[test]
fn goto_type_of_expression_with_type_var_type() {
let test = cursor_test(
r#"
type Alias[T: int = bool] = list[T<CURSOR>]
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:24
|
2 | type Alias[T: int = bool] = list[T]
| ^
|
info: Source
--> /main.py:2:46
|
2 | type Alias[T: int = bool] = list[T]
| ^
|
"###);
}
#[test]
fn goto_type_of_expression_with_type_param_spec() {
let test = cursor_test(
r#"
type Alias[**P = [int, str]] = Callable[P<CURSOR>, int]
"#,
);
// TODO: Goto type definition currently doesn't work for type param specs
// because the inference doesn't support them yet.
// This snapshot should show a single target pointing to `T`
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_of_expression_with_type_var_tuple() {
let test = cursor_test(
r#"
type Alias[*Ts = ()] = tuple[*Ts<CURSOR>]
"#,
);
// TODO: Goto type definition currently doesn't work for type var tuples
// because the inference doesn't support them yet.
// This snapshot should show a single target pointing to `T`
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_on_keyword_argument() {
let test = cursor_test(
r#"
def test(a: str): ...
test(a<CURSOR>= "123")
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:4:18
|
2 | def test(a: str): ...
3 |
4 | test(a= "123")
| ^
|
"#);
}
#[test]
fn goto_type_on_incorrectly_typed_keyword_argument() {
let test = cursor_test(
r#"
def test(a: str): ...
test(a<CURSOR>= 123)
"#,
);
// TODO: This should jump to `str` and not `int` because
// the keyword is typed as a string. It's only the passed argument that
// is an int. Navigating to `str` would match pyright's behavior.
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:231:7
|
229 | _LiteralInteger = _PositiveInteger | _NegativeInteger | Literal[0] # noqa: Y026 # TODO: Use TypeAlias once mypy bugs are fixed
230 |
231 | class int:
| ^^^
232 | @overload
233 | def __new__(cls, x: ConvertibleToInt = ..., /) -> Self: ...
|
info: Source
--> /main.py:4:18
|
2 | def test(a: str): ...
3 |
4 | test(a= 123)
| ^
|
");
}
#[test]
fn goto_type_on_kwargs() {
let test = cursor_test(
r#"
def f(name: str): ...
kwargs = { "name": "test"}
f(**kwargs<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:1086:7
|
1084 | def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
1085 |
1086 | class dict(MutableMapping[_KT, _VT]):
| ^^^^
1087 | # __init__ should be kept roughly in line with `collections.UserDict.__init__`, which has similar semantics
1088 | # Also multiprocessing.managers.SyncManager.dict()
|
info: Source
--> /main.py:6:5
|
4 | kwargs = { "name": "test"}
5 |
6 | f(**kwargs)
| ^^^^^^
|
"#);
}
#[test]
fn goto_type_of_expression_with_builtin() {
let test = cursor_test(
r#"
def foo(a: str):
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:3:17
|
2 | def foo(a: str):
3 | a
| ^
|
");
}
#[test]
fn goto_type_definition_cursor_between_object_and_attribute() {
let test = cursor_test(
r#"
class X:
def foo(a, b): ...
x = X()
x<CURSOR>.foo()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:19
|
2 | class X:
| ^
3 | def foo(a, b): ...
|
info: Source
--> /main.py:7:13
|
5 | x = X()
6 |
7 | x.foo()
| ^
|
"###);
}
#[test]
fn goto_between_call_arguments() {
let test = cursor_test(
r#"
def foo(a, b): ...
foo<CURSOR>()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:17
|
2 | def foo(a, b): ...
| ^^^
3 |
4 | foo()
|
info: Source
--> /main.py:4:13
|
2 | def foo(a, b): ...
3 |
4 | foo()
| ^^^
|
"###);
}
#[test]
fn goto_type_narrowing() {
let test = cursor_test(
r#"
def foo(a: str | None, b):
if a is not None:
print(a<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:4:27
|
2 | def foo(a: str | None, b):
3 | if a is not None:
4 | print(a)
| ^
|
");
}
#[test]
fn goto_type_none() {
let test = cursor_test(
r#"
def foo(a: str | None, b):
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/types.pyi:671:11
|
669 | if sys.version_info >= (3, 10):
670 | @final
671 | class NoneType:
| ^^^^^^^^
672 | def __bool__(self) -> Literal[False]: ...
|
info: Source
--> /main.py:3:17
|
2 | def foo(a: str | None, b):
3 | a
| ^
|
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:3:17
|
2 | def foo(a: str | None, b):
3 | a
| ^
|
");
}
impl CursorTest {
fn goto_type_definition(&self) -> String {
let Some(targets) = goto_type_definition(&self.db, self.file, self.cursor_offset)
else {
return "No goto target found".to_string();
};
if targets.is_empty() {
return "No type definitions found".to_string();
}
let source = targets.range;
self.render_diagnostics(
targets
.into_iter()
.map(|target| GotoTypeDefinitionDiagnostic::new(source, &target)),
)
}
}
struct GotoTypeDefinitionDiagnostic {
source: FileRange,
target: FileRange,
}
impl GotoTypeDefinitionDiagnostic {
fn new(source: FileRange, target: &NavigationTarget) -> Self {
Self {
source,
target: FileRange::new(target.file(), target.focus_range()),
}
}
}
impl IntoDiagnostic for GotoTypeDefinitionDiagnostic {
fn into_diagnostic(self) -> Diagnostic {
let mut source = SubDiagnostic::new(Severity::Info, "Source");
source.annotate(Annotation::primary(
Span::from(self.source.file()).with_range(self.source.range()),
));
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("goto-type-definition")),
Severity::Info,
"Type definition".to_string(),
);
main.annotate(Annotation::primary(
Span::from(self.target.file()).with_range(self.target.range()),
));
main.sub(source);
main
}
}
}

View File

@@ -1,602 +0,0 @@
use crate::goto::{find_goto_target, GotoTarget};
use crate::{Db, MarkupKind, RangedValue};
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::SemanticModel;
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::parsed_module;
use ruff_text_size::{Ranged, TextSize};
use std::fmt;
use std::fmt::Formatter;
pub fn hover(db: &dyn Db, file: File, offset: TextSize) -> Option<RangedValue<Hover>> {
let parsed = parsed_module(db.upcast(), file);
let goto_target = find_goto_target(parsed, offset)?;
if let GotoTarget::Expression(expr) = goto_target {
if expr.is_literal_expr() {
return None;
}
}
let model = SemanticModel::new(db.upcast(), file);
let ty = goto_target.inferred_type(&model)?;
tracing::debug!(
"Inferred type of covering node is {}",
ty.display(db.upcast())
);
// TODO: Add documentation of the symbol (not the type's definition).
// TODO: Render the symbol's signature instead of just its type.
let contents = vec![HoverContent::Type(ty)];
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: Hover { contents },
})
}
pub struct Hover<'db> {
contents: Vec<HoverContent<'db>>,
}
impl<'db> Hover<'db> {
/// Renders the hover to a string using the specified markup kind.
pub const fn display<'a>(&'a self, db: &'a dyn Db, kind: MarkupKind) -> DisplayHover<'a> {
DisplayHover {
db,
hover: self,
kind,
}
}
fn iter(&self) -> std::slice::Iter<'_, HoverContent<'db>> {
self.contents.iter()
}
}
impl<'db> IntoIterator for Hover<'db> {
type Item = HoverContent<'db>;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.contents.into_iter()
}
}
impl<'a, 'db> IntoIterator for &'a Hover<'db> {
type Item = &'a HoverContent<'db>;
type IntoIter = std::slice::Iter<'a, HoverContent<'db>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct DisplayHover<'a> {
db: &'a dyn Db,
hover: &'a Hover<'a>,
kind: MarkupKind,
}
impl fmt::Display for DisplayHover<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let mut first = true;
for content in &self.hover.contents {
if !first {
self.kind.horizontal_line().fmt(f)?;
}
content.display(self.db, self.kind).fmt(f)?;
first = false;
}
Ok(())
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HoverContent<'db> {
Type(Type<'db>),
}
impl<'db> HoverContent<'db> {
fn display(&self, db: &'db dyn Db, kind: MarkupKind) -> DisplayHoverContent<'_, 'db> {
DisplayHoverContent {
db,
content: self,
kind,
}
}
}
pub(crate) struct DisplayHoverContent<'a, 'db> {
db: &'db dyn Db,
content: &'a HoverContent<'db>,
kind: MarkupKind,
}
impl fmt::Display for DisplayHoverContent<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.content {
HoverContent::Type(ty) => self
.kind
.fenced_code_block(ty.display(self.db.upcast()), "text")
.fmt(f),
}
}
}
#[cfg(test)]
mod tests {
use crate::tests::{cursor_test, CursorTest};
use crate::{hover, MarkupKind};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig, LintName,
Severity, Span,
};
use ruff_text_size::{Ranged, TextRange};
#[test]
fn hover_basic() {
let test = cursor_test(
r#"
a = 10
a<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r"
Literal[10]
---------------------------------------------
```text
Literal[10]
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:9
|
2 | a = 10
3 |
4 | a
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_member() {
let test = cursor_test(
r#"
class Foo:
a: int = 10
def __init__(a: int, b: str):
self.a = a
self.b: str = b
foo = Foo()
foo.<CURSOR>a
"#,
);
assert_snapshot!(test.hover(), @r"
int
---------------------------------------------
```text
int
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:10:9
|
9 | foo = Foo()
10 | foo.a
| ^^^^-
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_function_typed_variable() {
let test = cursor_test(
r#"
def foo(a, b): ...
foo<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r###"
def foo(a, b) -> Unknown
---------------------------------------------
```text
def foo(a, b) -> Unknown
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:13
|
2 | def foo(a, b): ...
3 |
4 | foo
| ^^^- Cursor offset
| |
| source
|
"###);
}
#[test]
fn hover_binary_expression() {
let test = cursor_test(
r#"
def foo(a: int, b: int, c: int):
a + b ==<CURSOR> c
"#,
);
assert_snapshot!(test.hover(), @r"
bool
---------------------------------------------
```text
bool
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:3:17
|
2 | def foo(a: int, b: int, c: int):
3 | a + b == c
| ^^^^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_keyword_parameter() {
let test = cursor_test(
r#"
def test(a: int): ...
test(a<CURSOR>= 123)
"#,
);
// TODO: This should reveal `int` because the user hovers over the parameter and not the value.
assert_snapshot!(test.hover(), @r"
Literal[123]
---------------------------------------------
```text
Literal[123]
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:18
|
2 | def test(a: int): ...
3 |
4 | test(a= 123)
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_union() {
let test = cursor_test(
r#"
def foo(a, b): ...
def bar(a, b): ...
if random.choice([True, False]):
a = foo
else:
a = bar
a<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r###"
(def foo(a, b) -> Unknown) | (def bar(a, b) -> Unknown)
---------------------------------------------
```text
(def foo(a, b) -> Unknown) | (def bar(a, b) -> Unknown)
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:12:13
|
10 | a = bar
11 |
12 | a
| ^- Cursor offset
| |
| source
|
"###);
}
#[test]
fn hover_module() {
let mut test = cursor_test(
r#"
import lib
li<CURSOR>b
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.hover(), @r"
<module 'lib'>
---------------------------------------------
```text
<module 'lib'>
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:13
|
2 | import lib
3 |
4 | lib
| ^^-
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_type_of_expression_with_type_var_type() {
let test = cursor_test(
r#"
type Alias[T: int = bool] = list[T<CURSOR>]
"#,
);
assert_snapshot!(test.hover(), @r"
T
---------------------------------------------
```text
T
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:2:46
|
2 | type Alias[T: int = bool] = list[T]
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_type_of_expression_with_type_param_spec() {
let test = cursor_test(
r#"
type Alias[**P = [int, str]] = Callable[P<CURSOR>, int]
"#,
);
assert_snapshot!(test.hover(), @r"
@Todo
---------------------------------------------
```text
@Todo
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:2:53
|
2 | type Alias[**P = [int, str]] = Callable[P, int]
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_type_of_expression_with_type_var_tuple() {
let test = cursor_test(
r#"
type Alias[*Ts = ()] = tuple[*Ts<CURSOR>]
"#,
);
assert_snapshot!(test.hover(), @r"
@Todo
---------------------------------------------
```text
@Todo
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:2:43
|
2 | type Alias[*Ts = ()] = tuple[*Ts]
| ^^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_class_member_declaration() {
let test = cursor_test(
r#"
class Foo:
a<CURSOR>: int
"#,
);
// TODO: This should be int and not `Never`, https://github.com/astral-sh/ruff/issues/17122
assert_snapshot!(test.hover(), @r"
Never
---------------------------------------------
```text
Never
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:3:13
|
2 | class Foo:
3 | a: int
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_type_narrowing() {
let test = cursor_test(
r#"
def foo(a: str | None, b):
if a is not None:
print(a<CURSOR>)
"#,
);
assert_snapshot!(test.hover(), @r"
str
---------------------------------------------
```text
str
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:27
|
2 | def foo(a: str | None, b):
3 | if a is not None:
4 | print(a)
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_whitespace() {
let test = cursor_test(
r#"
class C:
<CURSOR>
foo: str = 'bar'
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_literal_int() {
let test = cursor_test(
r#"
print(
0 + 1<CURSOR>
)
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_literal_ellipsis() {
let test = cursor_test(
r#"
print(
.<CURSOR>..
)
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_docstring() {
let test = cursor_test(
r#"
def f():
"""Lorem ipsum dolor sit amet.<CURSOR>"""
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
impl CursorTest {
fn hover(&self) -> String {
use std::fmt::Write;
let Some(hover) = hover(&self.db, self.file, self.cursor_offset) else {
return "Hover provided no content".to_string();
};
let source = hover.range;
let mut buf = String::new();
write!(
&mut buf,
"{plaintext}{line}{markdown}{line}",
plaintext = hover.display(&self.db, MarkupKind::PlainText),
line = MarkupKind::PlainText.horizontal_line(),
markdown = hover.display(&self.db, MarkupKind::Markdown),
)
.unwrap();
let config = DisplayDiagnosticConfig::default()
.color(false)
.format(DiagnosticFormat::Full);
let mut diagnostic = Diagnostic::new(
DiagnosticId::Lint(LintName::of("hover")),
Severity::Info,
"Hovered content is",
);
diagnostic.annotate(
Annotation::primary(Span::from(source.file()).with_range(source.range()))
.message("source"),
);
diagnostic.annotate(
Annotation::secondary(
Span::from(source.file()).with_range(TextRange::empty(self.cursor_offset)),
)
.message("Cursor offset"),
);
write!(buf, "{}", diagnostic.display(&self.db, &config)).unwrap();
buf
}
}
}

View File

@@ -1,279 +0,0 @@
use crate::Db;
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::{HasType, SemanticModel};
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::visitor::source_order::{self, SourceOrderVisitor, TraversalSignal};
use ruff_python_ast::{AnyNodeRef, Expr, Stmt};
use ruff_text_size::{Ranged, TextRange, TextSize};
use std::fmt;
use std::fmt::Formatter;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InlayHint<'db> {
pub position: TextSize,
pub content: InlayHintContent<'db>,
}
impl<'db> InlayHint<'db> {
pub const fn display(&self, db: &'db dyn Db) -> DisplayInlayHint<'_, 'db> {
self.content.display(db)
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum InlayHintContent<'db> {
Type(Type<'db>),
ReturnType(Type<'db>),
}
impl<'db> InlayHintContent<'db> {
pub const fn display(&self, db: &'db dyn Db) -> DisplayInlayHint<'_, 'db> {
DisplayInlayHint { db, hint: self }
}
}
pub struct DisplayInlayHint<'a, 'db> {
db: &'db dyn Db,
hint: &'a InlayHintContent<'db>,
}
impl fmt::Display for DisplayInlayHint<'_, '_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.hint {
InlayHintContent::Type(ty) => {
write!(f, ": {}", ty.display(self.db.upcast()))
}
InlayHintContent::ReturnType(ty) => {
write!(f, " -> {}", ty.display(self.db.upcast()))
}
}
}
}
pub fn inlay_hints(db: &dyn Db, file: File, range: TextRange) -> Vec<InlayHint<'_>> {
let mut visitor = InlayHintVisitor::new(db, file, range);
let ast = parsed_module(db.upcast(), file);
visitor.visit_body(ast.suite());
visitor.hints
}
struct InlayHintVisitor<'db> {
model: SemanticModel<'db>,
hints: Vec<InlayHint<'db>>,
in_assignment: bool,
range: TextRange,
}
impl<'db> InlayHintVisitor<'db> {
fn new(db: &'db dyn Db, file: File, range: TextRange) -> Self {
Self {
model: SemanticModel::new(db.upcast(), file),
hints: Vec::new(),
in_assignment: false,
range,
}
}
fn add_type_hint(&mut self, position: TextSize, ty: Type<'db>) {
self.hints.push(InlayHint {
position,
content: InlayHintContent::Type(ty),
});
}
}
impl SourceOrderVisitor<'_> for InlayHintVisitor<'_> {
fn enter_node(&mut self, node: AnyNodeRef<'_>) -> TraversalSignal {
if self.range.intersect(node.range()).is_some() {
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn visit_stmt(&mut self, stmt: &Stmt) {
let node = AnyNodeRef::from(stmt);
if !self.enter_node(node).is_traverse() {
return;
}
match stmt {
Stmt::Assign(assign) => {
self.in_assignment = true;
for target in &assign.targets {
self.visit_expr(target);
}
self.in_assignment = false;
return;
}
// TODO
Stmt::FunctionDef(_) => {}
Stmt::For(_) => {}
Stmt::Expr(_) => {
// Don't traverse into expression statements because we don't show any hints.
return;
}
_ => {}
}
source_order::walk_stmt(self, stmt);
}
fn visit_expr(&mut self, expr: &'_ Expr) {
if !self.in_assignment {
return;
}
match expr {
Expr::Name(name) => {
if name.ctx.is_store() {
let ty = expr.inferred_type(&self.model);
self.add_type_hint(expr.range().end(), ty);
}
}
_ => {
source_order::walk_expr(self, expr);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
use ruff_db::{
files::{system_path_to_file, File},
source::source_text,
};
use ruff_text_size::TextSize;
use crate::db::tests::TestDb;
use red_knot_python_semantic::{
Program, ProgramSettings, PythonPath, PythonPlatform, SearchPathSettings,
};
use ruff_db::system::{DbWithWritableSystem, SystemPathBuf};
use ruff_python_ast::PythonVersion;
pub(super) fn inlay_hint_test(source: &str) -> InlayHintTest {
const START: &str = "<START>";
const END: &str = "<END>";
let mut db = TestDb::new();
let start = source.find(START);
let end = source
.find(END)
.map(|x| if start.is_some() { x - START.len() } else { x })
.unwrap_or(source.len());
let range = TextRange::new(
TextSize::try_from(start.unwrap_or_default()).unwrap(),
TextSize::try_from(end).unwrap(),
);
let source = source.replace(START, "");
let source = source.replace(END, "");
db.write_file("main.py", source)
.expect("write to memory file system to be successful");
let file = system_path_to_file(&db, "main.py").expect("newly written file to existing");
Program::from_settings(
&db,
ProgramSettings {
python_version: PythonVersion::latest(),
python_platform: PythonPlatform::default(),
search_paths: SearchPathSettings {
extra_paths: vec![],
src_roots: vec![SystemPathBuf::from("/")],
custom_typeshed: None,
python_path: PythonPath::KnownSitePackages(vec![]),
},
},
)
.expect("Default settings to be valid");
InlayHintTest { db, file, range }
}
pub(super) struct InlayHintTest {
pub(super) db: TestDb,
pub(super) file: File,
pub(super) range: TextRange,
}
impl InlayHintTest {
fn inlay_hints(&self) -> String {
let hints = inlay_hints(&self.db, self.file, self.range);
let mut buf = source_text(&self.db, self.file).as_str().to_string();
let mut offset = 0;
for hint in hints {
let end_position = (hint.position.to_u32() as usize) + offset;
let hint_str = format!("[{}]", hint.display(&self.db));
buf.insert_str(end_position, &hint_str);
offset += hint_str.len();
}
buf
}
}
#[test]
fn test_assign_statement() {
let test = inlay_hint_test("x = 1");
assert_snapshot!(test.inlay_hints(), @r"
x[: Literal[1]] = 1
");
}
#[test]
fn test_tuple_assignment() {
let test = inlay_hint_test("x, y = (1, 'abc')");
assert_snapshot!(test.inlay_hints(), @r#"
x[: Literal[1]], y[: Literal["abc"]] = (1, 'abc')
"#);
}
#[test]
fn test_nested_tuple_assignment() {
let test = inlay_hint_test("x, (y, z) = (1, ('abc', 2))");
assert_snapshot!(test.inlay_hints(), @r#"
x[: Literal[1]], (y[: Literal["abc"]], z[: Literal[2]]) = (1, ('abc', 2))
"#);
}
#[test]
fn test_assign_statement_with_type_annotation() {
let test = inlay_hint_test("x: int = 1");
assert_snapshot!(test.inlay_hints(), @r"
x: int = 1
");
}
#[test]
fn test_assign_statement_out_of_range() {
let test = inlay_hint_test("<START>x = 1<END>\ny = 2");
assert_snapshot!(test.inlay_hints(), @r"
x[: Literal[1]] = 1
y = 2
");
}
}

View File

@@ -1,296 +0,0 @@
mod db;
mod find_node;
mod goto;
mod hover;
mod inlay_hints;
mod markup;
pub use db::Db;
pub use goto::goto_type_definition;
pub use hover::hover;
pub use inlay_hints::inlay_hints;
pub use markup::MarkupKind;
use rustc_hash::FxHashSet;
use std::ops::{Deref, DerefMut};
use red_knot_python_semantic::types::{Type, TypeDefinition};
use ruff_db::files::{File, FileRange};
use ruff_text_size::{Ranged, TextRange};
/// Information associated with a text range.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct RangedValue<T> {
pub range: FileRange,
pub value: T,
}
impl<T> RangedValue<T> {
pub fn file_range(&self) -> FileRange {
self.range
}
}
impl<T> Deref for RangedValue<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> DerefMut for RangedValue<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}
impl<T> IntoIterator for RangedValue<T>
where
T: IntoIterator,
{
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
/// Target to which the editor can navigate to.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct NavigationTarget {
file: File,
/// The range that should be focused when navigating to the target.
///
/// This is typically not the full range of the node. For example, it's the range of the class's name in a class definition.
///
/// The `focus_range` must be fully covered by `full_range`.
focus_range: TextRange,
/// The range covering the entire target.
full_range: TextRange,
}
impl NavigationTarget {
pub fn file(&self) -> File {
self.file
}
pub fn focus_range(&self) -> TextRange {
self.focus_range
}
pub fn full_range(&self) -> TextRange {
self.full_range
}
}
#[derive(Debug, Clone)]
pub struct NavigationTargets(smallvec::SmallVec<[NavigationTarget; 1]>);
impl NavigationTargets {
fn single(target: NavigationTarget) -> Self {
Self(smallvec::smallvec![target])
}
fn empty() -> Self {
Self(smallvec::SmallVec::new())
}
fn unique(targets: impl IntoIterator<Item = NavigationTarget>) -> Self {
let unique: FxHashSet<_> = targets.into_iter().collect();
if unique.is_empty() {
Self::empty()
} else {
let mut targets = unique.into_iter().collect::<Vec<_>>();
targets.sort_by_key(|target| (target.file, target.focus_range.start()));
Self(targets.into())
}
}
fn iter(&self) -> std::slice::Iter<'_, NavigationTarget> {
self.0.iter()
}
#[cfg(test)]
fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl IntoIterator for NavigationTargets {
type Item = NavigationTarget;
type IntoIter = smallvec::IntoIter<[NavigationTarget; 1]>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a> IntoIterator for &'a NavigationTargets {
type Item = &'a NavigationTarget;
type IntoIter = std::slice::Iter<'a, NavigationTarget>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl FromIterator<NavigationTarget> for NavigationTargets {
fn from_iter<T: IntoIterator<Item = NavigationTarget>>(iter: T) -> Self {
Self::unique(iter)
}
}
pub trait HasNavigationTargets {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets;
}
impl HasNavigationTargets for Type<'_> {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets {
match self {
Type::Union(union) => union
.iter(db.upcast())
.flat_map(|target| target.navigation_targets(db))
.collect(),
Type::Intersection(intersection) => {
// Only consider the positive elements because the negative elements are mainly from narrowing constraints.
let mut targets = intersection
.iter_positive(db.upcast())
.filter(|ty| !ty.is_unknown());
let Some(first) = targets.next() else {
return NavigationTargets::empty();
};
match targets.next() {
Some(_) => {
// If there are multiple types in the intersection, we can't navigate to a single one
// because the type is the intersection of all those types.
NavigationTargets::empty()
}
None => first.navigation_targets(db),
}
}
ty => ty
.definition(db.upcast())
.map(|definition| definition.navigation_targets(db))
.unwrap_or_else(NavigationTargets::empty),
}
}
}
impl HasNavigationTargets for TypeDefinition<'_> {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets {
let full_range = self.full_range(db.upcast());
NavigationTargets::single(NavigationTarget {
file: full_range.file(),
focus_range: self.focus_range(db.upcast()).unwrap_or(full_range).range(),
full_range: full_range.range(),
})
}
}
#[cfg(test)]
mod tests {
use crate::db::tests::TestDb;
use insta::internals::SettingsBindDropGuard;
use red_knot_python_semantic::{
Program, ProgramSettings, PythonPath, PythonPlatform, SearchPathSettings,
};
use ruff_db::diagnostic::{Diagnostic, DiagnosticFormat, DisplayDiagnosticConfig};
use ruff_db::files::{system_path_to_file, File};
use ruff_db::system::{DbWithWritableSystem, SystemPath, SystemPathBuf};
use ruff_python_ast::PythonVersion;
use ruff_text_size::TextSize;
pub(super) fn cursor_test(source: &str) -> CursorTest {
let mut db = TestDb::new();
let cursor_offset = source.find("<CURSOR>").expect(
"`source`` should contain a `<CURSOR>` marker, indicating the position of the cursor.",
);
let mut content = source[..cursor_offset].to_string();
content.push_str(&source[cursor_offset + "<CURSOR>".len()..]);
db.write_file("main.py", &content)
.expect("write to memory file system to be successful");
let file = system_path_to_file(&db, "main.py").expect("newly written file to existing");
Program::from_settings(
&db,
ProgramSettings {
python_version: PythonVersion::latest(),
python_platform: PythonPlatform::default(),
search_paths: SearchPathSettings {
extra_paths: vec![],
src_roots: vec![SystemPathBuf::from("/")],
custom_typeshed: None,
python_path: PythonPath::KnownSitePackages(vec![]),
},
},
)
.expect("Default settings to be valid");
let mut insta_settings = insta::Settings::clone_current();
insta_settings.add_filter(r#"\\(\w\w|\s|\.|")"#, "/$1");
// Filter out TODO types because they are different between debug and release builds.
insta_settings.add_filter(r"@Todo\(.+\)", "@Todo");
let insta_settings_guard = insta_settings.bind_to_scope();
CursorTest {
db,
cursor_offset: TextSize::try_from(cursor_offset)
.expect("source to be smaller than 4GB"),
file,
_insta_settings_guard: insta_settings_guard,
}
}
pub(super) struct CursorTest {
pub(super) db: TestDb,
pub(super) cursor_offset: TextSize,
pub(super) file: File,
_insta_settings_guard: SettingsBindDropGuard,
}
impl CursorTest {
pub(super) fn write_file(
&mut self,
path: impl AsRef<SystemPath>,
content: &str,
) -> std::io::Result<()> {
self.db.write_file(path, content)
}
pub(super) fn render_diagnostics<I, D>(&self, diagnostics: I) -> String
where
I: IntoIterator<Item = D>,
D: IntoDiagnostic,
{
use std::fmt::Write;
let mut buf = String::new();
let config = DisplayDiagnosticConfig::default()
.color(false)
.format(DiagnosticFormat::Full);
for diagnostic in diagnostics {
let diag = diagnostic.into_diagnostic();
write!(buf, "{}", diag.display(&self.db, &config)).unwrap();
}
buf
}
}
pub(super) trait IntoDiagnostic {
fn into_diagnostic(self) -> Diagnostic;
}
}

View File

@@ -1,66 +0,0 @@
use std::fmt;
use std::fmt::Formatter;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum MarkupKind {
PlainText,
Markdown,
}
impl MarkupKind {
pub(crate) const fn fenced_code_block<T>(self, code: T, language: &str) -> FencedCodeBlock<T>
where
T: fmt::Display,
{
FencedCodeBlock {
language,
code,
kind: self,
}
}
pub(crate) const fn horizontal_line(self) -> HorizontalLine {
HorizontalLine { kind: self }
}
}
pub(crate) struct FencedCodeBlock<'a, T> {
language: &'a str,
code: T,
kind: MarkupKind,
}
impl<T> fmt::Display for FencedCodeBlock<'_, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
MarkupKind::PlainText => self.code.fmt(f),
MarkupKind::Markdown => write!(
f,
"```{language}\n{code}\n```",
language = self.language,
code = self.code
),
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct HorizontalLine {
kind: MarkupKind,
}
impl fmt::Display for HorizontalLine {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.kind {
MarkupKind::PlainText => {
f.write_str("\n---------------------------------------------\n")
}
MarkupKind::Markdown => {
write!(f, "\n---\n")
}
}
}
}

View File

@@ -1,56 +0,0 @@
[package]
name = "red_knot_project"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ruff_cache = { workspace = true }
ruff_db = { workspace = true, features = ["cache", "serde"] }
ruff_macros = { workspace = true }
ruff_python_ast = { workspace = true, features = ["serde"] }
ruff_python_formatter = { workspace = true, optional = true }
ruff_text_size = { workspace = true }
red_knot_ide = { workspace = true }
red_knot_python_semantic = { workspace = true, features = ["serde"] }
red_knot_vendored = { workspace = true }
anyhow = { workspace = true }
crossbeam = { workspace = true }
glob = { workspace = true }
notify = { workspace = true }
pep440_rs = { workspace = true, features = ["version-ranges"] }
rayon = { workspace = true }
rustc-hash = { workspace = true }
salsa = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true }
thiserror = { workspace = true }
toml = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
ruff_db = { workspace = true, features = ["testing"] }
glob = { workspace = true }
insta = { workspace = true, features = ["redactions", "ron"] }
[features]
default = ["zstd"]
deflate = ["red_knot_vendored/deflate"]
schemars = [
"dep:schemars",
"ruff_db/schemars",
"red_knot_python_semantic/schemars",
]
zstd = ["red_knot_vendored/zstd"]
format = ["ruff_python_formatter"]
[lints]
workspace = true

View File

@@ -1 +0,0 @@
../../../../ruff_python_parser/resources/invalid/statements/invalid_assignment_targets.py

View File

@@ -1,2 +0,0 @@
x = 0
(x := x + 1)

View File

@@ -1 +0,0 @@
../../../../ruff_python_parser/resources/invalid/expressions/named/invalid_target.py

View File

@@ -1 +0,0 @@
../../../../ruff_python_parser/resources/invalid/statements/invalid_augmented_assignment_target.py

View File

@@ -1,3 +0,0 @@
x = 0
if x := x + 1:
pass

View File

@@ -1,6 +0,0 @@
while True:
class A:
x: int
break

View File

@@ -1,6 +0,0 @@
while True:
def b():
x: int
break

View File

@@ -1,6 +0,0 @@
for _ in range(1):
class A:
x: int
break

View File

@@ -1,6 +0,0 @@
for _ in range(1):
def b():
x: int
break

View File

@@ -1,11 +0,0 @@
def bool(x) -> bool:
return True
class MyClass: ...
def MyClass() -> MyClass: ...
def x(self) -> x: ...

View File

@@ -1,8 +0,0 @@
# Regression test for https://github.com/astral-sh/ruff/issues/14115
#
# This is invalid syntax, but should not lead to a crash.
def f() -> *int: ...
f()

View File

@@ -1,2 +0,0 @@
def bool(x=bool):
return x

View File

@@ -1,2 +0,0 @@
with foo() as self.bar:
pass

View File

@@ -1 +0,0 @@
../../../../ruff_notebook/resources/test/fixtures/jupyter/unused_variable.ipynb

View File

@@ -1,3 +0,0 @@
match x:
case [1, 0] if x := x[:0]:
y = 1

View File

@@ -1,3 +0,0 @@
match some_int:
case x:=2:
pass

View File

@@ -1,17 +0,0 @@
"""
Regression test that makes sure we do not short-circuit here after
determining that the overall type will be `Never` and still infer
a type for the second tuple element `2`.
Relevant discussion:
https://github.com/astral-sh/ruff/pull/15218#discussion_r1900811073
"""
from typing_extensions import Never
def never() -> Never:
return never()
(never(), 2)

View File

@@ -1,2 +0,0 @@
type foo = int
type ListOrSet[T] = list[T] | set[T]

View File

@@ -1 +0,0 @@
../../../../ruff_python_parser/resources/inline/err/type_param_invalid_bound_expr.py

View File

@@ -1,3 +0,0 @@
msg = "hello"
f"{msg!r:>{10+10}}"

View File

@@ -1 +0,0 @@
x: f"Literal[{1 + 2}]" = 3

View File

@@ -1,3 +0,0 @@
from typing import Union
x: Union[int, str] = 1

View File

@@ -1 +0,0 @@
../../../../ruff_python_parser/resources/inline/err/ann_assign_stmt_invalid_target.py

View File

@@ -1,15 +0,0 @@
# Regression test for https://github.com/astral-sh/ruff/issues/17215
# panicked in commit 1a6a10b30
# error message:
# dependency graph cycle querying all_narrowing_constraints_for_expression(Id(8591))
def f(a: A, b: B, c: C):
unknown_a: UA = make_unknown()
unknown_b: UB = make_unknown()
unknown_c: UC = make_unknown()
unknown_d: UD = make_unknown()
if unknown_a and unknown_b:
if unknown_c:
if unknown_d:
return a, b, c

View File

@@ -1,22 +0,0 @@
# Regression test for https://github.com/astral-sh/ruff/issues/17215
# panicked in commit 1a6a10b30
# error message:
# dependency graph cycle querying all_negative_narrowing_constraints_for_expression(Id(859f))
def f(f1: bool, f2: bool, f3: bool, f4: bool):
o1: UnknownClass = make_o()
o2: UnknownClass = make_o()
o3: UnknownClass = make_o()
o4: UnknownClass = make_o()
if f1 and f2 and f3 and f4:
if o1 == o2:
return None
if o2 == o3:
return None
if o3 == o4:
return None
if o4 == o1:
return None
return o1, o2, o3, o4

View File

@@ -1,191 +0,0 @@
use std::{collections::HashMap, hash::BuildHasher};
use red_knot_python_semantic::{PythonPath, PythonPlatform};
use ruff_db::system::SystemPathBuf;
use ruff_python_ast::PythonVersion;
/// Combine two values, preferring the values in `self`.
///
/// The logic should follow that of Cargo's `config.toml`:
///
/// > If a key is specified in multiple config files, the values will get merged together.
/// > Numbers, strings, and booleans will use the value in the deeper config directory taking
/// > precedence over ancestor directories, where the home directory is the lowest priority.
/// > Arrays will be joined together with higher precedence items being placed later in the
/// > merged array.
///
/// ## uv Compatibility
///
/// The merging behavior differs from uv in that values with higher precedence in arrays
/// are placed later in the merged array. This is because we want to support overriding
/// earlier values and values from other configurations, including unsetting them.
/// For example: patterns coming last in file inclusion and exclusion patterns
/// allow overriding earlier patterns, matching the `gitignore` behavior.
/// Generally speaking, it feels more intuitive if later values override earlier values
/// than the other way around: `knot --exclude png --exclude "!important.png"`.
///
/// The main downside of this approach is that the ordering can be surprising in cases
/// where the option has a "first match" semantic and not a "last match" wins.
/// One such example is `extra-paths` where the semantics is given by Python:
/// the module on the first matching search path wins.
///
/// ```toml
/// [environment]
/// extra-paths = ["b", "c"]
/// ```
///
/// ```bash
/// knot --extra-paths a
/// ```
///
/// That's why a user might expect that this configuration results in `["a", "b", "c"]`,
/// because the CLI has higher precedence. However, the current implementation results in a
/// resolved extra search path of `["b", "c", "a"]`, which means `a` will be tried last.
///
/// There's an argument here that the user should be able to specify the order of the paths,
/// because only then is the user in full control of where to insert the path when specifying `extra-paths`
/// in multiple sources.
///
/// ## Macro
/// You can automatically derive `Combine` for structs with named fields by using `derive(ruff_macros::Combine)`.
pub trait Combine {
#[must_use]
fn combine(mut self, other: Self) -> Self
where
Self: Sized,
{
self.combine_with(other);
self
}
fn combine_with(&mut self, other: Self);
}
impl<T> Combine for Option<T>
where
T: Combine,
{
fn combine(self, other: Self) -> Self
where
Self: Sized,
{
match (self, other) {
(Some(a), Some(b)) => Some(a.combine(b)),
(None, Some(b)) => Some(b),
(a, _) => a,
}
}
fn combine_with(&mut self, other: Self) {
match (self, other) {
(Some(a), Some(b)) => {
a.combine_with(b);
}
(a @ None, Some(b)) => {
*a = Some(b);
}
_ => {}
}
}
}
impl<T> Combine for Vec<T> {
fn combine_with(&mut self, mut other: Self) {
// `self` takes precedence over `other` but values with higher precedence must be placed after.
// Swap the vectors so that `other` is the one that gets extended, so that the values of `self` come after.
std::mem::swap(self, &mut other);
self.extend(other);
}
}
impl<K, V, S> Combine for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
S: BuildHasher,
{
fn combine_with(&mut self, mut other: Self) {
// `self` takes precedence over `other` but `extend` overrides existing values.
// Swap the hash maps so that `self` is the one that gets extended.
std::mem::swap(self, &mut other);
self.extend(other);
}
}
/// Implements [`Combine`] for a value that always returns `self` when combined with another value.
macro_rules! impl_noop_combine {
($name:ident) => {
impl Combine for $name {
#[inline(always)]
fn combine_with(&mut self, _other: Self) {}
#[inline(always)]
fn combine(self, _other: Self) -> Self {
self
}
}
};
}
impl_noop_combine!(SystemPathBuf);
impl_noop_combine!(PythonPlatform);
impl_noop_combine!(PythonPath);
impl_noop_combine!(PythonVersion);
// std types
impl_noop_combine!(bool);
impl_noop_combine!(usize);
impl_noop_combine!(u8);
impl_noop_combine!(u16);
impl_noop_combine!(u32);
impl_noop_combine!(u64);
impl_noop_combine!(u128);
impl_noop_combine!(isize);
impl_noop_combine!(i8);
impl_noop_combine!(i16);
impl_noop_combine!(i32);
impl_noop_combine!(i64);
impl_noop_combine!(i128);
impl_noop_combine!(String);
#[cfg(test)]
mod tests {
use crate::combine::Combine;
use std::collections::HashMap;
#[test]
fn combine_option() {
assert_eq!(Some(1).combine(Some(2)), Some(1));
assert_eq!(None.combine(Some(2)), Some(2));
assert_eq!(Some(1).combine(None), Some(1));
}
#[test]
fn combine_vec() {
assert_eq!(None.combine(Some(vec![1, 2, 3])), Some(vec![1, 2, 3]));
assert_eq!(Some(vec![1, 2, 3]).combine(None), Some(vec![1, 2, 3]));
assert_eq!(
Some(vec![1, 2, 3]).combine(Some(vec![4, 5, 6])),
Some(vec![4, 5, 6, 1, 2, 3])
);
}
#[test]
fn combine_map() {
let a: HashMap<u32, _> = HashMap::from_iter([(1, "a"), (2, "a"), (3, "a")]);
let b: HashMap<u32, _> = HashMap::from_iter([(0, "b"), (2, "b"), (5, "b")]);
assert_eq!(None.combine(Some(b.clone())), Some(b.clone()));
assert_eq!(Some(a.clone()).combine(None), Some(a.clone()));
assert_eq!(
Some(a).combine(Some(b)),
Some(HashMap::from_iter([
(0, "b"),
// The value from `a` takes precedence
(1, "a"),
(2, "a"),
(3, "a"),
(5, "b")
]))
);
}
}

View File

@@ -1,240 +0,0 @@
use crate::db::{Db, ProjectDatabase};
use crate::metadata::options::Options;
use crate::watch::{ChangeEvent, CreatedKind, DeletedKind};
use crate::{Project, ProjectMetadata};
use std::collections::BTreeSet;
use crate::walk::ProjectFilesWalker;
use red_knot_python_semantic::Program;
use ruff_db::files::{File, Files};
use ruff_db::system::SystemPath;
use ruff_db::Db as _;
use rustc_hash::FxHashSet;
impl ProjectDatabase {
#[tracing::instrument(level = "debug", skip(self, changes, cli_options))]
pub fn apply_changes(&mut self, changes: Vec<ChangeEvent>, cli_options: Option<&Options>) {
let mut project = self.project();
let project_root = project.root(self).to_path_buf();
let program = Program::get(self);
let custom_stdlib_versions_path = program
.custom_stdlib_search_path(self)
.map(|path| path.join("VERSIONS"));
// Are there structural changes to the project
let mut project_changed = false;
// Changes to a custom stdlib path's VERSIONS
let mut custom_stdlib_change = false;
// Paths that were added
let mut added_paths = FxHashSet::default();
// Deduplicate the `sync` calls. Many file watchers emit multiple events for the same path.
let mut synced_files = FxHashSet::default();
let mut sync_recursively = BTreeSet::default();
let mut sync_path = |db: &mut ProjectDatabase, path: &SystemPath| {
if synced_files.insert(path.to_path_buf()) {
File::sync_path(db, path);
}
};
for change in changes {
tracing::trace!("Handle change: {:?}", change);
if let Some(path) = change.system_path() {
if matches!(
path.file_name(),
Some(".gitignore" | ".ignore" | "knot.toml" | "pyproject.toml")
) {
// Changes to ignore files or settings can change the project structure or add/remove files.
project_changed = true;
continue;
}
if Some(path) == custom_stdlib_versions_path.as_deref() {
custom_stdlib_change = true;
}
}
match change {
ChangeEvent::Changed { path, kind: _ } | ChangeEvent::Opened(path) => {
sync_path(self, &path);
}
ChangeEvent::Created { kind, path } => {
match kind {
CreatedKind::File => sync_path(self, &path),
CreatedKind::Directory | CreatedKind::Any => {
sync_recursively.insert(path.clone());
}
}
// Unlike other files, it's not only important to update the status of existing
// and known `File`s (`sync_recursively`), it's also important to discover new files
// that were added in the project's root (or any of the paths included for checking).
//
// This is important because `Project::check` iterates over all included files.
// The code below walks the `added_paths` and adds all files that
// should be included in the project. We can skip this check for
// paths that aren't part of the project or shouldn't be included
// when checking the project.
if project.is_path_included(self, &path) {
if self.system().is_file(&path) {
// Add the parent directory because `walkdir` always visits explicitly passed files
// even if they match an exclude filter.
added_paths.insert(path.parent().unwrap().to_path_buf());
} else {
added_paths.insert(path);
}
}
}
ChangeEvent::Deleted { kind, path } => {
let is_file = match kind {
DeletedKind::File => true,
DeletedKind::Directory => {
// file watchers emit an event for every deleted file. No need to scan the entire dir.
continue;
}
DeletedKind::Any => self
.files
.try_system(self, &path)
.is_some_and(|file| file.exists(self)),
};
if is_file {
sync_path(self, &path);
if let Some(file) = self.files().try_system(self, &path) {
project.remove_file(self, file);
}
} else {
sync_recursively.insert(path.clone());
if custom_stdlib_versions_path
.as_ref()
.is_some_and(|versions_path| versions_path.starts_with(&path))
{
custom_stdlib_change = true;
}
if project.is_path_included(self, &path) || path == project_root {
// TODO: Shouldn't it be enough to simply traverse the project files and remove all
// that start with the given path?
tracing::debug!(
"Reload project because of a path that could have been a directory."
);
// Perform a full-reload in case the deleted directory contained the pyproject.toml.
// We may want to make this more clever in the future, to e.g. iterate over the
// indexed files and remove the once that start with the same path, unless
// the deleted path is the project configuration.
project_changed = true;
}
}
}
ChangeEvent::CreatedVirtual(path) | ChangeEvent::ChangedVirtual(path) => {
File::sync_virtual_path(self, &path);
}
ChangeEvent::DeletedVirtual(path) => {
if let Some(virtual_file) = self.files().try_virtual_file(&path) {
virtual_file.close(self);
}
}
ChangeEvent::Rescan => {
project_changed = true;
Files::sync_all(self);
sync_recursively.clear();
break;
}
}
}
let sync_recursively = sync_recursively.into_iter();
let mut last = None;
for path in sync_recursively {
// Avoid re-syncing paths that are sub-paths of each other.
if let Some(last) = &last {
if path.starts_with(last) {
continue;
}
}
Files::sync_recursively(self, &path);
last = Some(path);
}
if project_changed {
match ProjectMetadata::discover(&project_root, self.system()) {
Ok(mut metadata) => {
if let Some(cli_options) = cli_options {
metadata.apply_cli_options(cli_options.clone());
}
if let Err(error) = metadata.apply_configuration_files(self.system()) {
tracing::error!(
"Failed to apply configuration files, continuing without applying them: {error}"
);
}
let program_settings = metadata.to_program_settings(self.system());
let program = Program::get(self);
if let Err(error) = program.update_from_settings(self, program_settings) {
tracing::error!("Failed to update the program settings, keeping the old program settings: {error}");
}
if metadata.root() == project.root(self) {
tracing::debug!("Reloading project after structural change");
project.reload(self, metadata);
} else {
tracing::debug!("Replace project after structural change");
project = Project::from_metadata(self, metadata);
self.project = Some(project);
}
}
Err(error) => {
tracing::error!(
"Failed to load project, keeping old project configuration: {error}"
);
}
}
return;
} else if custom_stdlib_change {
let search_paths = project
.metadata(self)
.to_program_settings(self.system())
.search_paths;
if let Err(error) = program.update_search_paths(self, &search_paths) {
tracing::error!("Failed to set the new search paths: {error}");
}
}
let diagnostics = if let Some(walker) = ProjectFilesWalker::incremental(self, added_paths) {
// Use directory walking to discover newly added files.
let (files, diagnostics) = walker.collect_vec(self);
for file in files {
project.add_file(self, file);
}
diagnostics
} else {
Vec::new()
};
// Note: We simply replace all IO related diagnostics here. This isn't ideal, because
// it removes IO errors that may still be relevant. However, tracking IO errors correctly
// across revisions doesn't feel essential, considering that they're rare. However, we could
// implement a `BTreeMap` or similar and only prune the diagnostics from paths that we've
// re-scanned (or that were removed etc).
project.replace_index_diagnostics(self, diagnostics);
}
}

View File

@@ -1,569 +0,0 @@
#![allow(clippy::ref_option)]
use crate::metadata::options::OptionDiagnostic;
use crate::walk::{ProjectFilesFilter, ProjectFilesWalker};
pub use db::{Db, ProjectDatabase};
use files::{Index, Indexed, IndexedFiles};
use metadata::settings::Settings;
pub use metadata::{ProjectDiscoveryError, ProjectMetadata};
use red_knot_python_semantic::lint::{LintRegistry, LintRegistryBuilder, RuleSelection};
use red_knot_python_semantic::register_lints;
use red_knot_python_semantic::types::check_types;
use ruff_db::diagnostic::{
create_parse_diagnostic, Annotation, Diagnostic, DiagnosticId, Severity, Span,
};
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_db::source::{source_text, SourceTextError};
use ruff_db::system::{SystemPath, SystemPathBuf};
use rustc_hash::FxHashSet;
use salsa::Durability;
use salsa::Setter;
use std::sync::Arc;
use thiserror::Error;
pub mod combine;
mod db;
mod files;
pub mod metadata;
mod walk;
pub mod watch;
pub static DEFAULT_LINT_REGISTRY: std::sync::LazyLock<LintRegistry> =
std::sync::LazyLock::new(default_lints_registry);
pub fn default_lints_registry() -> LintRegistry {
let mut builder = LintRegistryBuilder::default();
register_lints(&mut builder);
builder.build()
}
/// The project as a Salsa ingredient.
///
/// ## How is a project different from a program?
/// There are two (related) motivations:
///
/// 1. Program is defined in `ruff_db` and it can't reference the settings types for the linter and formatter
/// without introducing a cyclic dependency. The project is defined in a higher level crate
/// where it can reference these setting types.
/// 2. Running `ruff check` with different target versions results in different programs (settings) but
/// it remains the same project. That's why program is a narrowed view of the project only
/// holding on to the most fundamental settings required for checking.
#[salsa::input]
pub struct Project {
/// The files that are open in the project.
///
/// Setting the open files to a non-`None` value changes `check` to only check the
/// open files rather than all files in the project.
#[return_ref]
#[default]
open_fileset: Option<Arc<FxHashSet<File>>>,
/// The first-party files of this project.
#[default]
#[return_ref]
file_set: IndexedFiles,
/// The metadata describing the project, including the unresolved options.
#[return_ref]
pub metadata: ProjectMetadata,
/// The resolved project settings.
#[return_ref]
pub settings: Settings,
/// The paths that should be included when checking this project.
///
/// The default (when this list is empty) is to include all files in the project root
/// (that satisfy the configured include and exclude patterns).
/// However, it's sometimes desired to only check a subset of the project, e.g. to see
/// the diagnostics for a single file or a folder.
///
/// This list gets initialized by the paths passed to `knot check <paths>`
///
/// ## How is this different from `open_files`?
///
/// The `included_paths` is closely related to `open_files`. The only difference is that
/// `open_files` is already a resolved set of files whereas `included_paths` is only a list of paths
/// that are resolved to files by indexing them. The other difference is that
/// new files added to any directory in `included_paths` will be indexed and added to the project
/// whereas `open_files` needs to be updated manually (e.g. by the IDE).
///
/// In short, `open_files` is cheaper in contexts where the set of files is known, like
/// in an IDE when the user only wants to check the open tabs. This could be modeled
/// with `included_paths` too but it would require an explicit walk dir step that's simply unnecessary.
#[default]
#[return_ref]
included_paths_list: Vec<SystemPathBuf>,
/// Diagnostics that were generated when resolving the project settings.
#[return_ref]
settings_diagnostics: Vec<OptionDiagnostic>,
}
#[salsa::tracked]
impl Project {
pub fn from_metadata(db: &dyn Db, metadata: ProjectMetadata) -> Self {
let (settings, settings_diagnostics) = metadata.options().to_settings(db);
Project::builder(metadata, settings, settings_diagnostics)
.durability(Durability::MEDIUM)
.open_fileset_durability(Durability::LOW)
.file_set_durability(Durability::LOW)
.new(db)
}
pub fn root(self, db: &dyn Db) -> &SystemPath {
self.metadata(db).root()
}
pub fn name(self, db: &dyn Db) -> &str {
self.metadata(db).name()
}
/// Returns the resolved linter rules for the project.
///
/// This is a salsa query to prevent re-computing queries if other, unrelated
/// settings change. For example, we don't want that changing the terminal settings
/// invalidates any type checking queries.
#[salsa::tracked]
pub fn rules(self, db: &dyn Db) -> Arc<RuleSelection> {
self.settings(db).to_rules()
}
/// Returns `true` if `path` is both part of the project and included (see `included_paths_list`).
///
/// Unlike [Self::files], this method does not respect `.gitignore` files. It only checks
/// the project's include and exclude settings as well as the paths that were passed to `knot check <paths>`.
/// This means, that this method is an over-approximation of `Self::files` and may return `true` for paths
/// that won't be included when checking the project because they're ignored in a `.gitignore` file.
pub fn is_path_included(self, db: &dyn Db, path: &SystemPath) -> bool {
ProjectFilesFilter::from_project(db, self).is_included(path)
}
pub fn reload(self, db: &mut dyn Db, metadata: ProjectMetadata) {
tracing::debug!("Reloading project");
assert_eq!(self.root(db), metadata.root());
if &metadata != self.metadata(db) {
let (settings, settings_diagnostics) = metadata.options().to_settings(db);
if self.settings(db) != &settings {
self.set_settings(db).to(settings);
}
if self.settings_diagnostics(db) != &settings_diagnostics {
self.set_settings_diagnostics(db).to(settings_diagnostics);
}
self.set_metadata(db).to(metadata);
}
self.reload_files(db);
}
/// Checks all open files in the project and its dependencies.
pub(crate) fn check(self, db: &ProjectDatabase) -> Vec<Diagnostic> {
let project_span = tracing::debug_span!("Project::check");
let _span = project_span.enter();
tracing::debug!("Checking project '{name}'", name = self.name(db));
let mut diagnostics: Vec<Diagnostic> = Vec::new();
diagnostics.extend(
self.settings_diagnostics(db)
.iter()
.map(OptionDiagnostic::to_diagnostic),
);
let files = ProjectFiles::new(db, self);
diagnostics.extend(
files
.diagnostics()
.iter()
.map(IOErrorDiagnostic::to_diagnostic),
);
let result = Arc::new(std::sync::Mutex::new(diagnostics));
let inner_result = Arc::clone(&result);
let db = db.clone();
let project_span = project_span.clone();
rayon::scope(move |scope| {
for file in &files {
let result = inner_result.clone();
let db = db.clone();
let project_span = project_span.clone();
scope.spawn(move |_| {
let check_file_span =
tracing::debug_span!(parent: &project_span, "check_file", ?file);
let _entered = check_file_span.entered();
let file_diagnostics = check_file_impl(&db, file);
result.lock().unwrap().extend(file_diagnostics);
});
}
});
Arc::into_inner(result).unwrap().into_inner().unwrap()
}
pub(crate) fn check_file(self, db: &dyn Db, file: File) -> Vec<Diagnostic> {
let mut file_diagnostics: Vec<_> = self
.settings_diagnostics(db)
.iter()
.map(OptionDiagnostic::to_diagnostic)
.collect();
let check_diagnostics = check_file_impl(db, file);
file_diagnostics.extend(check_diagnostics);
file_diagnostics
}
/// Opens a file in the project.
///
/// This changes the behavior of `check` to only check the open files rather than all files in the project.
pub fn open_file(self, db: &mut dyn Db, file: File) {
tracing::debug!("Opening file `{}`", file.path(db));
let mut open_files = self.take_open_files(db);
open_files.insert(file);
self.set_open_files(db, open_files);
}
/// Closes a file in the project.
pub fn close_file(self, db: &mut dyn Db, file: File) -> bool {
tracing::debug!("Closing file `{}`", file.path(db));
let mut open_files = self.take_open_files(db);
let removed = open_files.remove(&file);
if removed {
self.set_open_files(db, open_files);
}
removed
}
pub fn set_included_paths(self, db: &mut dyn Db, paths: Vec<SystemPathBuf>) {
tracing::debug!("Setting included paths: {paths}", paths = paths.len());
self.set_included_paths_list(db).to(paths);
self.reload_files(db);
}
/// Returns the paths that should be checked.
///
/// The default is to check the entire project in which case this method returns
/// the project root. However, users can specify to only check specific sub-folders or
/// even files of a project by using `knot check <paths>`. In that case, this method
/// returns the provided absolute paths.
///
/// Note: The CLI doesn't prohibit users from specifying paths outside the project root.
/// This can be useful to check arbitrary files, but it isn't something we recommend.
/// We should try to support this use case but it's okay if there are some limitations around it.
fn included_paths_or_root(self, db: &dyn Db) -> &[SystemPathBuf] {
match &**self.included_paths_list(db) {
[] => std::slice::from_ref(&self.metadata(db).root),
paths => paths,
}
}
/// Returns the open files in the project or `None` if the entire project should be checked.
pub fn open_files(self, db: &dyn Db) -> Option<&FxHashSet<File>> {
self.open_fileset(db).as_deref()
}
/// Sets the open files in the project.
///
/// This changes the behavior of `check` to only check the open files rather than all files in the project.
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn set_open_files(self, db: &mut dyn Db, open_files: FxHashSet<File>) {
tracing::debug!("Set open project files (count: {})", open_files.len());
self.set_open_fileset(db).to(Some(Arc::new(open_files)));
}
/// This takes the open files from the project and returns them.
///
/// This changes the behavior of `check` to check all files in the project instead of just the open files.
fn take_open_files(self, db: &mut dyn Db) -> FxHashSet<File> {
tracing::debug!("Take open project files");
// Salsa will cancel any pending queries and remove its own reference to `open_files`
// so that the reference counter to `open_files` now drops to 1.
let open_files = self.set_open_fileset(db).to(None);
if let Some(open_files) = open_files {
Arc::try_unwrap(open_files).unwrap()
} else {
FxHashSet::default()
}
}
/// Returns `true` if the file is open in the project.
///
/// A file is considered open when:
/// * explicitly set as an open file using [`open_file`](Self::open_file)
/// * It has a [`SystemPath`] and belongs to a package's `src` files
/// * It has a [`SystemVirtualPath`](ruff_db::system::SystemVirtualPath)
pub fn is_file_open(self, db: &dyn Db, file: File) -> bool {
if let Some(open_files) = self.open_files(db) {
open_files.contains(&file)
} else if file.path(db).is_system_path() {
self.contains_file(db, file)
} else {
file.path(db).is_system_virtual_path()
}
}
/// Returns `true` if `file` is a first-party file part of this package.
pub fn contains_file(self, db: &dyn Db, file: File) -> bool {
self.files(db).contains(&file)
}
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn remove_file(self, db: &mut dyn Db, file: File) {
tracing::debug!(
"Removing file `{}` from project `{}`",
file.path(db),
self.name(db)
);
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.remove(file);
}
pub fn add_file(self, db: &mut dyn Db, file: File) {
tracing::debug!(
"Adding file `{}` to project `{}`",
file.path(db),
self.name(db)
);
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.insert(file);
}
/// Replaces the diagnostics from indexing the project files with `diagnostics`.
///
/// This is a no-op if the project files haven't been indexed yet.
pub fn replace_index_diagnostics(self, db: &mut dyn Db, diagnostics: Vec<IOErrorDiagnostic>) {
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.set_diagnostics(diagnostics);
}
/// Returns the files belonging to this project.
pub fn files(self, db: &dyn Db) -> Indexed<'_> {
let files = self.file_set(db);
let indexed = match files.get() {
Index::Lazy(vacant) => {
let _entered =
tracing::debug_span!("Project::index_files", project = %self.name(db))
.entered();
let walker = ProjectFilesWalker::new(db);
let (files, diagnostics) = walker.collect_set(db);
tracing::info!("Indexed {} file(s)", files.len());
vacant.set(files, diagnostics)
}
Index::Indexed(indexed) => indexed,
};
indexed
}
pub fn reload_files(self, db: &mut dyn Db) {
tracing::debug!("Reloading files for project `{}`", self.name(db));
if !self.file_set(db).is_lazy() {
// Force a re-index of the files in the next revision.
self.set_file_set(db).to(IndexedFiles::lazy());
}
}
}
fn check_file_impl(db: &dyn Db, file: File) -> Vec<Diagnostic> {
let mut diagnostics: Vec<Diagnostic> = Vec::new();
// Abort checking if there are IO errors.
let source = source_text(db.upcast(), file);
if let Some(read_error) = source.read_error() {
diagnostics.push(
IOErrorDiagnostic {
file: Some(file),
error: read_error.clone().into(),
}
.to_diagnostic(),
);
return diagnostics;
}
let parsed = parsed_module(db.upcast(), file);
diagnostics.extend(
parsed
.errors()
.iter()
.map(|error| create_parse_diagnostic(file, error)),
);
diagnostics.extend(check_types(db.upcast(), file).into_iter().cloned());
diagnostics.sort_unstable_by_key(|diagnostic| {
diagnostic
.primary_span()
.and_then(|span| span.range())
.unwrap_or_default()
.start()
});
diagnostics
}
#[derive(Debug)]
enum ProjectFiles<'a> {
OpenFiles(&'a FxHashSet<File>),
Indexed(files::Indexed<'a>),
}
impl<'a> ProjectFiles<'a> {
fn new(db: &'a dyn Db, project: Project) -> Self {
if let Some(open_files) = project.open_files(db) {
ProjectFiles::OpenFiles(open_files)
} else {
ProjectFiles::Indexed(project.files(db))
}
}
fn diagnostics(&self) -> &[IOErrorDiagnostic] {
match self {
ProjectFiles::OpenFiles(_) => &[],
ProjectFiles::Indexed(indexed) => indexed.diagnostics(),
}
}
}
impl<'a> IntoIterator for &'a ProjectFiles<'a> {
type Item = File;
type IntoIter = ProjectFilesIter<'a>;
fn into_iter(self) -> Self::IntoIter {
match self {
ProjectFiles::OpenFiles(files) => ProjectFilesIter::OpenFiles(files.iter()),
ProjectFiles::Indexed(indexed) => ProjectFilesIter::Indexed {
files: indexed.into_iter(),
},
}
}
}
enum ProjectFilesIter<'db> {
OpenFiles(std::collections::hash_set::Iter<'db, File>),
Indexed { files: files::IndexedIter<'db> },
}
impl Iterator for ProjectFilesIter<'_> {
type Item = File;
fn next(&mut self) -> Option<Self::Item> {
match self {
ProjectFilesIter::OpenFiles(files) => files.next().copied(),
ProjectFilesIter::Indexed { files } => files.next(),
}
}
}
#[derive(Debug, Clone)]
pub struct IOErrorDiagnostic {
file: Option<File>,
error: IOErrorKind,
}
impl IOErrorDiagnostic {
fn to_diagnostic(&self) -> Diagnostic {
let mut diag = Diagnostic::new(DiagnosticId::Io, Severity::Error, &self.error);
if let Some(file) = self.file {
diag.annotate(Annotation::primary(Span::from(file)));
}
diag
}
}
#[derive(Error, Debug, Clone)]
enum IOErrorKind {
#[error(transparent)]
Walk(#[from] walk::WalkError),
#[error(transparent)]
SourceText(#[from] SourceTextError),
}
#[cfg(test)]
mod tests {
use crate::db::tests::TestDb;
use crate::{check_file_impl, ProjectMetadata};
use red_knot_python_semantic::types::check_types;
use ruff_db::files::system_path_to_file;
use ruff_db::source::source_text;
use ruff_db::system::{DbWithTestSystem, DbWithWritableSystem as _, SystemPath, SystemPathBuf};
use ruff_db::testing::assert_function_query_was_not_run;
use ruff_python_ast::name::Name;
#[test]
fn check_file_skips_type_checking_when_file_cant_be_read() -> ruff_db::system::Result<()> {
let project = ProjectMetadata::new(Name::new_static("test"), SystemPathBuf::from("/"));
let mut db = TestDb::new(project);
let path = SystemPath::new("test.py");
db.write_file(path, "x = 10")?;
let file = system_path_to_file(&db, path).unwrap();
// Now the file gets deleted before we had a chance to read its source text.
db.memory_file_system().remove_file(path)?;
file.sync(&mut db);
assert_eq!(source_text(&db, file).as_str(), "");
assert_eq!(
check_file_impl(&db, file)
.into_iter()
.map(|diagnostic| diagnostic.primary_message().to_string())
.collect::<Vec<_>>(),
vec!["Failed to read file: No such file or directory".to_string()]
);
let events = db.take_salsa_events();
assert_function_query_was_not_run(&db, check_types, file, &events);
// The user now creates a new file with an empty text. The source text
// content returned by `source_text` remains unchanged, but the diagnostics should get updated.
db.write_file(path, "").unwrap();
assert_eq!(source_text(&db, file).as_str(), "");
assert_eq!(
check_file_impl(&db, file)
.into_iter()
.map(|diagnostic| diagnostic.primary_message().to_string())
.collect::<Vec<_>>(),
vec![] as Vec<String>
);
Ok(())
}
}

View File

@@ -1,945 +0,0 @@
use configuration_file::{ConfigurationFile, ConfigurationFileError};
use red_knot_python_semantic::ProgramSettings;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_python_ast::name::Name;
use std::sync::Arc;
use thiserror::Error;
use crate::combine::Combine;
use crate::metadata::pyproject::{Project, PyProject, PyProjectError, ResolveRequiresPythonError};
use crate::metadata::value::ValueSource;
use options::KnotTomlError;
use options::Options;
mod configuration_file;
pub mod options;
pub mod pyproject;
pub mod settings;
pub mod value;
#[derive(Debug, PartialEq, Eq)]
#[cfg_attr(test, derive(serde::Serialize))]
pub struct ProjectMetadata {
pub(super) name: Name,
pub(super) root: SystemPathBuf,
/// The raw options
pub(super) options: Options,
/// Paths of configurations other than the project's configuration that were combined into [`Self::options`].
///
/// This field stores the paths of the configuration files, mainly for
/// knowing which files to watch for changes.
///
/// The path ordering doesn't imply precedence.
#[cfg_attr(test, serde(skip_serializing_if = "Vec::is_empty"))]
pub(super) extra_configuration_paths: Vec<SystemPathBuf>,
}
impl ProjectMetadata {
/// Creates a project with the given name and root that uses the default options.
pub fn new(name: Name, root: SystemPathBuf) -> Self {
Self {
name,
root,
extra_configuration_paths: Vec::default(),
options: Options::default(),
}
}
/// Loads a project from a `pyproject.toml` file.
pub(crate) fn from_pyproject(
pyproject: PyProject,
root: SystemPathBuf,
) -> Result<Self, ResolveRequiresPythonError> {
Self::from_options(
pyproject
.tool
.and_then(|tool| tool.knot)
.unwrap_or_default(),
root,
pyproject.project.as_ref(),
)
}
/// Loads a project from a set of options with an optional pyproject-project table.
pub fn from_options(
mut options: Options,
root: SystemPathBuf,
project: Option<&Project>,
) -> Result<Self, ResolveRequiresPythonError> {
let name = project
.and_then(|project| project.name.as_deref())
.map(|name| Name::new(&**name))
.unwrap_or_else(|| Name::new(root.file_name().unwrap_or("root")));
// If the `options` don't specify a python version but the `project.requires-python` field is set,
// use that as a lower bound instead.
if let Some(project) = project {
if options
.environment
.as_ref()
.is_none_or(|env| env.python_version.is_none())
{
if let Some(requires_python) = project.resolve_requires_python_lower_bound()? {
let mut environment = options.environment.unwrap_or_default();
environment.python_version = Some(requires_python);
options.environment = Some(environment);
}
}
}
Ok(Self {
name,
root,
options,
extra_configuration_paths: Vec::new(),
})
}
/// Discovers the closest project at `path` and returns its metadata.
///
/// The algorithm traverses upwards in the `path`'s ancestor chain and uses the following precedence
/// the resolve the project's root.
///
/// 1. The closest `pyproject.toml` with a `tool.knot` section or `knot.toml`.
/// 1. The closest `pyproject.toml`.
/// 1. Fallback to use `path` as the root and use the default settings.
pub fn discover(
path: &SystemPath,
system: &dyn System,
) -> Result<ProjectMetadata, ProjectDiscoveryError> {
tracing::debug!("Searching for a project in '{path}'");
if !system.is_directory(path) {
return Err(ProjectDiscoveryError::NotADirectory(path.to_path_buf()));
}
let mut closest_project: Option<ProjectMetadata> = None;
for project_root in path.ancestors() {
let pyproject_path = project_root.join("pyproject.toml");
let pyproject = if let Ok(pyproject_str) = system.read_to_string(&pyproject_path) {
match PyProject::from_toml_str(
&pyproject_str,
ValueSource::File(Arc::new(pyproject_path.clone())),
) {
Ok(pyproject) => Some(pyproject),
Err(error) => {
return Err(ProjectDiscoveryError::InvalidPyProject {
path: pyproject_path,
source: Box::new(error),
})
}
}
} else {
None
};
// A `knot.toml` takes precedence over a `pyproject.toml`.
let knot_toml_path = project_root.join("knot.toml");
if let Ok(knot_str) = system.read_to_string(&knot_toml_path) {
let options = match Options::from_toml_str(
&knot_str,
ValueSource::File(Arc::new(knot_toml_path.clone())),
) {
Ok(options) => options,
Err(error) => {
return Err(ProjectDiscoveryError::InvalidKnotToml {
path: knot_toml_path,
source: Box::new(error),
})
}
};
if pyproject
.as_ref()
.is_some_and(|project| project.knot().is_some())
{
// TODO: Consider using a diagnostic here
tracing::warn!("Ignoring the `tool.knot` section in `{pyproject_path}` because `{knot_toml_path}` takes precedence.");
}
tracing::debug!("Found project at '{}'", project_root);
let metadata = ProjectMetadata::from_options(
options,
project_root.to_path_buf(),
pyproject
.as_ref()
.and_then(|pyproject| pyproject.project.as_ref()),
)
.map_err(|err| {
ProjectDiscoveryError::InvalidRequiresPythonConstraint {
source: err,
path: pyproject_path,
}
})?;
return Ok(metadata);
}
if let Some(pyproject) = pyproject {
let has_knot_section = pyproject.knot().is_some();
let metadata =
ProjectMetadata::from_pyproject(pyproject, project_root.to_path_buf())
.map_err(
|err| ProjectDiscoveryError::InvalidRequiresPythonConstraint {
source: err,
path: pyproject_path,
},
)?;
if has_knot_section {
tracing::debug!("Found project at '{}'", project_root);
return Ok(metadata);
}
// Not a project itself, keep looking for an enclosing project.
if closest_project.is_none() {
closest_project = Some(metadata);
}
}
}
// No project found, but maybe a pyproject.toml was found.
let metadata = if let Some(closest_project) = closest_project {
tracing::debug!(
"Project without `tool.knot` section: '{}'",
closest_project.root()
);
closest_project
} else {
tracing::debug!("The ancestor directories contain no `pyproject.toml`. Falling back to a virtual project.");
// Create a project with a default configuration
Self::new(
path.file_name().unwrap_or("root").into(),
path.to_path_buf(),
)
};
Ok(metadata)
}
pub fn root(&self) -> &SystemPath {
&self.root
}
pub fn name(&self) -> &str {
&self.name
}
pub fn options(&self) -> &Options {
&self.options
}
pub fn extra_configuration_paths(&self) -> &[SystemPathBuf] {
&self.extra_configuration_paths
}
pub fn to_program_settings(&self, system: &dyn System) -> ProgramSettings {
self.options.to_program_settings(self.root(), system)
}
/// Combine the project options with the CLI options where the CLI options take precedence.
pub fn apply_cli_options(&mut self, options: Options) {
self.options = options.combine(std::mem::take(&mut self.options));
}
/// Applies the options from the configuration files to the project's options.
///
/// This includes:
///
/// * The user-level configuration
pub fn apply_configuration_files(
&mut self,
system: &dyn System,
) -> Result<(), ConfigurationFileError> {
if let Some(user) = ConfigurationFile::user(system)? {
tracing::debug!(
"Applying user-level configuration loaded from `{path}`.",
path = user.path()
);
self.apply_configuration_file(user);
}
Ok(())
}
/// Applies a lower-precedence configuration files to the project's options.
fn apply_configuration_file(&mut self, options: ConfigurationFile) {
self.extra_configuration_paths
.push(options.path().to_owned());
self.options.combine_with(options.into_options());
}
}
#[derive(Debug, Error)]
pub enum ProjectDiscoveryError {
#[error("project path '{0}' is not a directory")]
NotADirectory(SystemPathBuf),
#[error("{path} is not a valid `pyproject.toml`: {source}")]
InvalidPyProject {
source: Box<PyProjectError>,
path: SystemPathBuf,
},
#[error("{path} is not a valid `knot.toml`: {source}")]
InvalidKnotToml {
source: Box<KnotTomlError>,
path: SystemPathBuf,
},
#[error("Invalid `requires-python` version specifier (`{path}`): {source}")]
InvalidRequiresPythonConstraint {
source: ResolveRequiresPythonError,
path: SystemPathBuf,
},
}
#[cfg(test)]
mod tests {
//! Integration tests for project discovery
use anyhow::{anyhow, Context};
use insta::assert_ron_snapshot;
use ruff_db::system::{SystemPathBuf, TestSystem};
use ruff_python_ast::PythonVersion;
use crate::{ProjectDiscoveryError, ProjectMetadata};
#[test]
fn project_without_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([(root.join("foo.py"), ""), (root.join("bar.py"), "")])
.context("Failed to write files")?;
let project =
ProjectMetadata::discover(&root, &system).context("Failed to discover project")?;
assert_eq!(project.root(), &*root);
with_escaped_paths(|| {
assert_ron_snapshot!(&project, @r#"
ProjectMetadata(
name: Name("app"),
root: "/app",
options: Options(),
)
"#);
});
Ok(())
}
#[test]
fn project_with_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "backend"
"#,
),
(root.join("db/__init__.py"), ""),
])
.context("Failed to write files")?;
let project =
ProjectMetadata::discover(&root, &system).context("Failed to discover project")?;
assert_eq!(project.root(), &*root);
with_escaped_paths(|| {
assert_ron_snapshot!(&project, @r#"
ProjectMetadata(
name: Name("backend"),
root: "/app",
options: Options(),
)
"#);
});
// Discovering the same package from a subdirectory should give the same result
let from_src = ProjectMetadata::discover(&root.join("db"), &system)
.context("Failed to discover project from src sub-directory")?;
assert_eq!(from_src, project);
Ok(())
}
#[test]
fn project_with_invalid_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "backend"
[tool.knot
"#,
),
(root.join("db/__init__.py"), ""),
])
.context("Failed to write files")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!("Expected project discovery to fail because of invalid syntax in the pyproject.toml"));
};
assert_error_eq(
&error,
r#"/app/pyproject.toml is not a valid `pyproject.toml`: TOML parse error at line 5, column 31
|
5 | [tool.knot
| ^
invalid table header
expected `.`, `]`
"#,
);
Ok(())
}
#[test]
fn nested_projects_in_sub_project() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.knot.src]
root = "src"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
[tool.knot.src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let sub_project = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(sub_project, @r#"
ProjectMetadata(
name: Name("nested-project"),
root: "/app/packages/a",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)
"#);
});
Ok(())
}
#[test]
fn nested_projects_in_root_project() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.knot.src]
root = "src"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
[tool.knot.src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root, &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(root, @r#"
ProjectMetadata(
name: Name("project-root"),
root: "/app",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)
"#);
});
Ok(())
}
#[test]
fn nested_projects_without_knot_sections() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
"#,
),
])
.context("Failed to write files")?;
let sub_project = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(sub_project, @r#"
ProjectMetadata(
name: Name("nested-project"),
root: "/app/packages/a",
options: Options(),
)
"#);
});
Ok(())
}
#[test]
fn nested_projects_with_outer_knot_section() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.knot.environment]
python-version = "3.10"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(root, @r#"
ProjectMetadata(
name: Name("project-root"),
root: "/app",
options: Options(
environment: Some(EnvironmentOptions(
r#python-version: Some("3.10"),
)),
),
)
"#);
});
Ok(())
}
/// A `knot.toml` takes precedence over any `pyproject.toml`.
///
/// However, the `pyproject.toml` is still loaded to get the project name and, in the future,
/// the requires-python constraint.
#[test]
fn project_with_knot_and_pyproject_toml() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files_all([
(
root.join("pyproject.toml"),
r#"
[project]
name = "super-app"
requires-python = ">=3.12"
[tool.knot.src]
root = "this_option_is_ignored"
"#,
),
(
root.join("knot.toml"),
r#"
[src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root, &system)?;
with_escaped_paths(|| {
assert_ron_snapshot!(root, @r#"
ProjectMetadata(
name: Name("super-app"),
root: "/app",
options: Options(
environment: Some(EnvironmentOptions(
r#python-version: Some("3.12"),
)),
src: Some(SrcOptions(
root: Some("src"),
)),
),
)
"#);
});
Ok(())
}
#[test]
fn requires_python_major_minor() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3.12"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY312)
);
Ok(())
}
#[test]
fn requires_python_major_only() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::from((3, 0)))
);
Ok(())
}
/// A `requires-python` constraint with major, minor and patch can be simplified
/// to major and minor (e.g. 3.12.1 -> 3.12).
#[test]
fn requires_python_major_minor_patch() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3.12.8"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY312)
);
Ok(())
}
#[test]
fn requires_python_beta_version() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">= 3.13.0b0"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY313)
);
Ok(())
}
#[test]
fn requires_python_greater_than_major_minor() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
# This is somewhat nonsensical because 3.12.1 > 3.12 is true.
# That's why simplifying the constraint to >= 3.12 is correct
requires-python = ">3.12"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY312)
);
Ok(())
}
/// `python-version` takes precedence if both `requires-python` and `python-version` are configured.
#[test]
fn requires_python_and_python_version() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=3.12"
[tool.knot.environment]
python-version = "3.10"
"#,
)
.context("Failed to write file")?;
let root = ProjectMetadata::discover(&root, &system)?;
assert_eq!(
root.options
.environment
.unwrap_or_default()
.python_version
.as_deref(),
Some(&PythonVersion::PY310)
);
Ok(())
}
#[test]
fn requires_python_less_than() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = "<3.12"
"#,
)
.context("Failed to write file")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!("Expected project discovery to fail because the `requires-python` doesn't specify a lower bound (it only specifies an upper bound)."));
};
assert_error_eq(&error, "Invalid `requires-python` version specifier (`/app/pyproject.toml`): value `<3.12` does not contain a lower bound. Add a lower bound to indicate the minimum compatible Python version (e.g., `>=3.13`) or specify a version in `environment.python-version`.");
Ok(())
}
#[test]
fn requires_python_no_specifiers() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ""
"#,
)
.context("Failed to write file")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!("Expected project discovery to fail because the `requires-python` specifiers are empty and don't define a lower bound."));
};
assert_error_eq(&error, "Invalid `requires-python` version specifier (`/app/pyproject.toml`): value `` does not contain a lower bound. Add a lower bound to indicate the minimum compatible Python version (e.g., `>=3.13`) or specify a version in `environment.python-version`.");
Ok(())
}
#[test]
fn requires_python_too_large_major_version() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_file_all(
root.join("pyproject.toml"),
r#"
[project]
requires-python = ">=999.0"
"#,
)
.context("Failed to write file")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!("Expected project discovery to fail because of the requires-python major version that is larger than 255."));
};
assert_error_eq(&error, "Invalid `requires-python` version specifier (`/app/pyproject.toml`): The major version `999` is larger than the maximum supported value 255");
Ok(())
}
#[track_caller]
fn assert_error_eq(error: &ProjectDiscoveryError, message: &str) {
assert_eq!(error.to_string().replace('\\', "/"), message);
}
fn with_escaped_paths<R>(f: impl FnOnce() -> R) -> R {
let mut settings = insta::Settings::clone_current();
settings.add_dynamic_redaction(".root", |content, _path| {
content.as_str().unwrap().replace('\\', "/")
});
settings.bind(f)
}
}

View File

@@ -1,69 +0,0 @@
use std::sync::Arc;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use thiserror::Error;
use crate::metadata::value::ValueSource;
use super::options::{KnotTomlError, Options};
/// A `knot.toml` configuration file with the options it contains.
pub(crate) struct ConfigurationFile {
path: SystemPathBuf,
options: Options,
}
impl ConfigurationFile {
/// Loads the user-level configuration file if it exists.
///
/// Returns `None` if the file does not exist or if the concept of user-level configurations
/// doesn't exist on `system`.
pub(crate) fn user(system: &dyn System) -> Result<Option<Self>, ConfigurationFileError> {
let Some(configuration_directory) = system.user_config_directory() else {
return Ok(None);
};
let knot_toml_path = configuration_directory.join("knot").join("knot.toml");
tracing::debug!(
"Searching for a user-level configuration at `{path}`",
path = &knot_toml_path
);
let Ok(knot_toml_str) = system.read_to_string(&knot_toml_path) else {
return Ok(None);
};
match Options::from_toml_str(
&knot_toml_str,
ValueSource::File(Arc::new(knot_toml_path.clone())),
) {
Ok(options) => Ok(Some(Self {
path: knot_toml_path,
options,
})),
Err(error) => Err(ConfigurationFileError::InvalidKnotToml {
source: Box::new(error),
path: knot_toml_path,
}),
}
}
/// Returns the path to the configuration file.
pub(crate) fn path(&self) -> &SystemPath {
&self.path
}
pub(crate) fn into_options(self) -> Options {
self.options
}
}
#[derive(Debug, Error)]
pub enum ConfigurationFileError {
#[error("{path} is not a valid `knot.toml`: {source}")]
InvalidKnotToml {
source: Box<KnotTomlError>,
path: SystemPathBuf,
},
}

View File

@@ -1,419 +0,0 @@
use crate::metadata::value::{RangedValue, RelativePathBuf, ValueSource, ValueSourceGuard};
use crate::Db;
use red_knot_python_semantic::lint::{GetLintError, Level, LintSource, RuleSelection};
use red_knot_python_semantic::{ProgramSettings, PythonPath, PythonPlatform, SearchPathSettings};
use ruff_db::diagnostic::{Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, Severity, Span};
use ruff_db::files::system_path_to_file;
use ruff_db::system::{System, SystemPath};
use ruff_macros::Combine;
use ruff_python_ast::PythonVersion;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
use thiserror::Error;
use super::settings::{Settings, TerminalSettings};
/// The options for the project.
#[derive(Debug, Default, Clone, PartialEq, Eq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Options {
/// Configures the type checking environment.
#[serde(skip_serializing_if = "Option::is_none")]
pub environment: Option<EnvironmentOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
pub src: Option<SrcOptions>,
/// Configures the enabled lints and their severity.
#[serde(skip_serializing_if = "Option::is_none")]
pub rules: Option<Rules>,
#[serde(skip_serializing_if = "Option::is_none")]
pub terminal: Option<TerminalOptions>,
}
impl Options {
pub(crate) fn from_toml_str(content: &str, source: ValueSource) -> Result<Self, KnotTomlError> {
let _guard = ValueSourceGuard::new(source, true);
let options = toml::from_str(content)?;
Ok(options)
}
pub fn deserialize_with<'de, D>(source: ValueSource, deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let _guard = ValueSourceGuard::new(source, false);
Self::deserialize(deserializer)
}
pub(crate) fn to_program_settings(
&self,
project_root: &SystemPath,
system: &dyn System,
) -> ProgramSettings {
let python_version = self
.environment
.as_ref()
.and_then(|env| env.python_version.as_deref().copied())
.unwrap_or_default();
let python_platform = self
.environment
.as_ref()
.and_then(|env| env.python_platform.as_deref().cloned())
.unwrap_or_else(|| {
let default = PythonPlatform::default();
tracing::info!(
"Defaulting to default python version for this platform: '{default}'",
);
default
});
ProgramSettings {
python_version,
python_platform,
search_paths: self.to_search_path_settings(project_root, system),
}
}
fn to_search_path_settings(
&self,
project_root: &SystemPath,
system: &dyn System,
) -> SearchPathSettings {
let src_roots = if let Some(src_root) = self.src.as_ref().and_then(|src| src.root.as_ref())
{
vec![src_root.absolute(project_root, system)]
} else {
let src = project_root.join("src");
// Default to `src` and the project root if `src` exists and the root hasn't been specified.
if system.is_directory(&src) {
vec![project_root.to_path_buf(), src]
} else {
vec![project_root.to_path_buf()]
}
};
let (extra_paths, python, typeshed) = self
.environment
.as_ref()
.map(|env| {
(
env.extra_paths.clone(),
env.python.clone(),
env.typeshed.clone(),
)
})
.unwrap_or_default();
SearchPathSettings {
extra_paths: extra_paths
.unwrap_or_default()
.into_iter()
.map(|path| path.absolute(project_root, system))
.collect(),
src_roots,
custom_typeshed: typeshed.map(|path| path.absolute(project_root, system)),
python_path: python
.map(|python_path| {
PythonPath::from_cli_flag(python_path.absolute(project_root, system))
})
.or_else(|| {
std::env::var("VIRTUAL_ENV")
.ok()
.map(PythonPath::from_virtual_env_var)
})
.unwrap_or_else(|| PythonPath::Discover(project_root.to_path_buf())),
}
}
#[must_use]
pub(crate) fn to_settings(&self, db: &dyn Db) -> (Settings, Vec<OptionDiagnostic>) {
let (rules, diagnostics) = self.to_rule_selection(db);
let mut settings = Settings::new(rules);
if let Some(terminal) = self.terminal.as_ref() {
settings.set_terminal(TerminalSettings {
output_format: terminal
.output_format
.as_deref()
.copied()
.unwrap_or_default(),
error_on_warning: terminal.error_on_warning.unwrap_or_default(),
});
}
(settings, diagnostics)
}
#[must_use]
fn to_rule_selection(&self, db: &dyn Db) -> (RuleSelection, Vec<OptionDiagnostic>) {
let registry = db.lint_registry();
let mut diagnostics = Vec::new();
// Initialize the selection with the defaults
let mut selection = RuleSelection::from_registry(registry);
let rules = self
.rules
.as_ref()
.into_iter()
.flat_map(|rules| rules.inner.iter());
for (rule_name, level) in rules {
let source = rule_name.source();
match registry.get(rule_name) {
Ok(lint) => {
let lint_source = match source {
ValueSource::File(_) => LintSource::File,
ValueSource::Cli => LintSource::Cli,
};
if let Ok(severity) = Severity::try_from(**level) {
selection.enable(lint, severity, lint_source);
} else {
selection.disable(lint);
}
}
Err(error) => {
// `system_path_to_file` can return `Err` if the file was deleted since the configuration
// was read. This should be rare and it should be okay to default to not showing a configuration
// file in that case.
let file = source
.file()
.and_then(|path| system_path_to_file(db.upcast(), path).ok());
// TODO: Add a note if the value was configured on the CLI
let diagnostic = match error {
GetLintError::Unknown(_) => OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!("Unknown lint rule `{rule_name}`"),
Severity::Warning,
),
GetLintError::PrefixedWithCategory { suggestion, .. } => {
OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!(
"Unknown lint rule `{rule_name}`. Did you mean `{suggestion}`?"
),
Severity::Warning,
)
}
GetLintError::Removed(_) => OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!("Unknown lint rule `{rule_name}`"),
Severity::Warning,
),
};
let span = file.map(Span::from).map(|span| {
if let Some(range) = rule_name.range() {
span.with_range(range)
} else {
span
}
});
diagnostics.push(diagnostic.with_span(span));
}
}
}
(selection, diagnostics)
}
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct EnvironmentOptions {
/// Specifies the version of Python that will be used to analyze the source code.
/// The version should be specified as a string in the format `M.m` where `M` is the major version
/// and `m` is the minor (e.g. "3.0" or "3.6").
/// If a version is provided, knot will generate errors if the source code makes use of language features
/// that are not supported in that version.
/// It will also tailor its use of type stub files, which conditionalizes type definitions based on the version.
#[serde(skip_serializing_if = "Option::is_none")]
pub python_version: Option<RangedValue<PythonVersion>>,
/// Specifies the target platform that will be used to analyze the source code.
/// If specified, Red Knot will tailor its use of type stub files,
/// which conditionalize type definitions based on the platform.
///
/// If no platform is specified, knot will use the current platform:
/// - `win32` for Windows
/// - `darwin` for macOS
/// - `android` for Android
/// - `ios` for iOS
/// - `linux` for everything else
#[serde(skip_serializing_if = "Option::is_none")]
pub python_platform: Option<RangedValue<PythonPlatform>>,
/// List of user-provided paths that should take first priority in the module resolution.
/// Examples in other type checkers are mypy's MYPYPATH environment variable,
/// or pyright's stubPath configuration setting.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra_paths: Option<Vec<RelativePathBuf>>,
/// Optional path to a "typeshed" directory on disk for us to use for standard-library types.
/// If this is not provided, we will fallback to our vendored typeshed stubs for the stdlib,
/// bundled as a zip file in the binary
#[serde(skip_serializing_if = "Option::is_none")]
pub typeshed: Option<RelativePathBuf>,
/// Path to the Python installation from which Red Knot resolves type information and third-party dependencies.
///
/// Red Knot will search in the path's `site-packages` directories for type information and
/// third-party imports.
///
/// This option is commonly used to specify the path to a virtual environment.
#[serde(skip_serializing_if = "Option::is_none")]
pub python: Option<RelativePathBuf>,
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SrcOptions {
/// The root of the project, used for finding first-party modules.
#[serde(skip_serializing_if = "Option::is_none")]
pub root: Option<RelativePathBuf>,
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Rules {
#[cfg_attr(feature = "schemars", schemars(with = "schema::Rules"))]
inner: FxHashMap<RangedValue<String>, RangedValue<Level>>,
}
impl FromIterator<(RangedValue<String>, RangedValue<Level>)> for Rules {
fn from_iter<T: IntoIterator<Item = (RangedValue<String>, RangedValue<Level>)>>(
iter: T,
) -> Self {
Self {
inner: iter.into_iter().collect(),
}
}
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct TerminalOptions {
/// The format to use for printing diagnostic messages.
///
/// Defaults to `full`.
#[serde(skip_serializing_if = "Option::is_none")]
pub output_format: Option<RangedValue<DiagnosticFormat>>,
/// Use exit code 1 if there are any warning-level diagnostics.
///
/// Defaults to `false`.
pub error_on_warning: Option<bool>,
}
#[cfg(feature = "schemars")]
mod schema {
use crate::DEFAULT_LINT_REGISTRY;
use red_knot_python_semantic::lint::Level;
use schemars::gen::SchemaGenerator;
use schemars::schema::{
InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SubschemaValidation,
};
use schemars::JsonSchema;
pub(super) struct Rules;
impl JsonSchema for Rules {
fn schema_name() -> String {
"Rules".to_string()
}
fn json_schema(gen: &mut SchemaGenerator) -> Schema {
let registry = &*DEFAULT_LINT_REGISTRY;
let level_schema = gen.subschema_for::<Level>();
let properties: schemars::Map<String, Schema> = registry
.lints()
.iter()
.map(|lint| {
(
lint.name().to_string(),
Schema::Object(SchemaObject {
metadata: Some(Box::new(Metadata {
title: Some(lint.summary().to_string()),
description: Some(lint.documentation()),
deprecated: lint.status.is_deprecated(),
default: Some(lint.default_level.to_string().into()),
..Metadata::default()
})),
subschemas: Some(Box::new(SubschemaValidation {
one_of: Some(vec![level_schema.clone()]),
..Default::default()
})),
..Default::default()
}),
)
})
.collect();
Schema::Object(SchemaObject {
instance_type: Some(InstanceType::Object.into()),
object: Some(Box::new(ObjectValidation {
properties,
// Allow unknown rules: Red Knot will warn about them.
// It gives a better experience when using an older Red Knot version because
// the schema will not deny rules that have been removed in newer versions.
additional_properties: Some(Box::new(level_schema)),
..ObjectValidation::default()
})),
..Default::default()
})
}
}
}
#[derive(Error, Debug)]
pub enum KnotTomlError {
#[error(transparent)]
TomlSyntax(#[from] toml::de::Error),
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct OptionDiagnostic {
id: DiagnosticId,
message: String,
severity: Severity,
span: Option<Span>,
}
impl OptionDiagnostic {
pub fn new(id: DiagnosticId, message: String, severity: Severity) -> Self {
Self {
id,
message,
severity,
span: None,
}
}
#[must_use]
fn with_span(self, span: Option<Span>) -> Self {
OptionDiagnostic { span, ..self }
}
pub(crate) fn to_diagnostic(&self) -> Diagnostic {
if let Some(ref span) = self.span {
let mut diag = Diagnostic::new(self.id, self.severity, "");
diag.annotate(Annotation::primary(span.clone()).message(&self.message));
diag
} else {
Diagnostic::new(self.id, self.severity, &self.message)
}
}
}

Some files were not shown because too many files have changed in this diff Show More