Compare commits

..

1 Commits

Author SHA1 Message Date
Charlie Marsh
e187c66573 Try a bit set 2022-11-18 22:20:41 -05:00
9376 changed files with 52551 additions and 1097685 deletions

View File

@@ -1,14 +1,2 @@
[alias]
dev = "run --package ruff_dev --bin ruff_dev"
benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# statically link the C runtime so the executable does not depend on
# that shared/dynamic library.
#
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.'wasm32-unknown-unknown']
# See https://docs.rs/getrandom/latest/getrandom/#webassembly-support
rustflags = ["--cfg", 'getrandom_backend="wasm_js"']

View File

@@ -1,15 +0,0 @@
[profile.ci]
# Print out output for failing tests as soon as they fail, and also at the end
# of the run (for easy scrollability).
failure-output = "immediate-final"
# Do not cancel the test run on the first failure.
fail-fast = false
status-level = "skip"
# Mark tests that take longer than 1s as slow.
# Terminate after 60s as a stop-gap measure to terminate on deadlock.
slow-timeout = { period = "1s", terminate-after = 60 }
# Show slow jobs in the final summary
final-status-level = "slow"

View File

@@ -1,46 +0,0 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/rust
{
"name": "Ruff",
"image": "mcr.microsoft.com/devcontainers/rust:0-1-bullseye",
"mounts": [
{
"source": "devcontainer-cargo-cache-${devcontainerId}",
"target": "/usr/local/cargo",
"type": "volume"
}
],
"customizations": {
"codespaces": {
"openFiles": [
"CONTRIBUTING.md"
]
},
"vscode": {
"extensions": [
"ms-python.python",
"rust-lang.rust-analyzer",
"fill-labs.dependi",
"tamasfe.even-better-toml",
"Swellaby.vscode-rust-test-adapter",
"charliermarsh.ruff"
],
"settings": {
"rust-analyzer.updates.askBeforeDownload": false
}
}
},
// Features to add to the dev container. More info: https://containers.dev/features.
"features": {
"ghcr.io/devcontainers/features/python": {
"installTools": false
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
"postCreateCommand": ".devcontainer/post-create.sh"
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
rustup default < rust-toolchain
rustup component add clippy rustfmt
cargo install cargo-insta
cargo fetch
pip install maturin pre-commit

View File

@@ -10,14 +10,5 @@ indent_style = space
insert_final_newline = true
indent_size = 2
[*.{rs,py,pyi}]
[*.{rs,py}]
indent_size = 4
[*.snap]
trim_trailing_whitespace = false
[*.md]
max_line_length = 100
[*.toml]
indent_size = 4

20
.gitattributes vendored
View File

@@ -1,20 +0,0 @@
* text=auto eol=lf
crates/ruff_linter/resources/test/fixtures/isort/line_ending_crlf.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W605_1.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_2.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_3.py text eol=crlf
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_crlf.py text eol=crlf
crates/ruff_python_formatter/tests/snapshots/format@docstring_code_examples_crlf.py.snap text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lexing/line_continuation_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_mac_eol.py text eol=cr
crates/ruff_python_parser/resources/inline linguist-generated=true
ruff.schema.json -diff linguist-generated=true text=auto eol=lf
crates/ruff_python_ast/src/generated.rs -diff linguist-generated=true text=auto eol=lf
crates/ruff_python_formatter/src/generated.rs -diff linguist-generated=true text=auto eol=lf
*.md.snap linguist-language=Markdown

24
.github/CODEOWNERS vendored
View File

@@ -1,24 +0,0 @@
# GitHub code owners file. For more info: https://help.github.com/articles/about-codeowners/
#
# - Comment lines begin with `#` character.
# - Each line is a file pattern followed by one or more owners.
# - The '*' pattern is global owners.
# - Order is important. The last matching pattern has the most precedence.
/crates/ruff_notebook/ @dhruvmanila
/crates/ruff_formatter/ @MichaReiser
/crates/ruff_python_formatter/ @MichaReiser
/crates/ruff_python_parser/ @MichaReiser @dhruvmanila
/crates/ruff_annotate_snippets/ @BurntSushi
# flake8-pyi
/crates/ruff_linter/src/rules/flake8_pyi/ @AlexWaygood
# Script for fuzzing the parser/red-knot etc.
/python/py-fuzzer/ @AlexWaygood
# red-knot
/crates/red_knot* @carljm @MichaReiser @AlexWaygood @sharkdp @dcreager
/crates/ruff_db/ @carljm @MichaReiser @AlexWaygood @sharkdp @dcreager
/scripts/knot_benchmark/ @carljm @MichaReiser @AlexWaygood @sharkdp @dcreager
/crates/red_knot_python_semantic @carljm @AlexWaygood @sharkdp @dcreager

View File

@@ -1,31 +0,0 @@
name: Bug report
description: Report an error or unexpected behavior
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to report an issue! We're glad to have you involved with Ruff.
**Before reporting, please make sure to search through [existing issues](https://github.com/astral-sh/ruff/issues?q=is:issue+is:open+label:bug) (including [closed](https://github.com/astral-sh/ruff/issues?q=is:issue%20state:closed%20label:bug)).**
- type: textarea
attributes:
label: Summary
description: |
A clear and concise description of the bug, including a minimal reproducible example.
Be sure to include the command you invoked (e.g., `ruff check /path/to/file.py --fix`), ideally including the `--isolated` flag and
the current Ruff settings (e.g., relevant sections from your `pyproject.toml`).
If possible, try to include the [playground](https://play.ruff.rs) link that reproduces this issue.
validations:
required: true
- type: input
attributes:
label: Version
description: What version of ruff are you using? (see `ruff version`)
placeholder: e.g., ruff 0.9.3 (90589372d 2025-01-23)
validations:
required: false

View File

@@ -1,10 +0,0 @@
name: Rule request
description: Anything related to lint rules (proposing new rules, changes to existing rules, auto-fixes, etc.)
body:
- type: textarea
attributes:
label: Summary
description: |
A clear and concise description of the relevant request. If applicable, please describe the current behavior as well.
validations:
required: true

View File

@@ -1,18 +0,0 @@
name: Question
description: Ask a question about Ruff
labels: ["question"]
body:
- type: textarea
attributes:
label: Question
description: Describe your question in detail.
validations:
required: true
- type: input
attributes:
label: Version
description: What version of ruff are you using? (see `ruff version`)
placeholder: e.g., ruff 0.9.3 (90589372d 2025-01-23)
validations:
required: false

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: true
contact_links:
- name: Documentation
url: https://docs.astral.sh/ruff
about: Please consult the documentation before creating an issue.
- name: Community
url: https://discord.com/invite/astral-sh
about: Join our Discord community to ask questions and collaborate.

View File

@@ -1,15 +0,0 @@
<!--
Thank you for contributing to Ruff! To help us out with reviewing, please consider the following:
- Does this pull request include a summary of the change? (See below.)
- Does this pull request include a descriptive title?
- Does this pull request include references to any relevant issues?
-->
## Summary
<!-- What's the purpose of the change? What does it do, and why? -->
## Test Plan
<!-- How was it tested? -->

View File

@@ -1,10 +0,0 @@
# Configuration for the actionlint tool, which we run via pre-commit
# to verify the correctness of the syntax in our GitHub Actions workflows.
self-hosted-runner:
# Various runners we use that aren't recognized out-of-the-box by actionlint:
labels:
- depot-ubuntu-latest-8
- depot-ubuntu-22.04-16
- github-windows-2025-x86_64-8
- github-windows-2025-x86_64-16

115
.github/renovate.json5 vendored
View File

@@ -1,115 +0,0 @@
{
$schema: "https://docs.renovatebot.com/renovate-schema.json",
dependencyDashboard: true,
suppressNotifications: ["prEditedNotification"],
extends: ["config:recommended"],
labels: ["internal"],
schedule: ["before 4am on Monday"],
semanticCommits: "disabled",
separateMajorMinor: false,
prHourlyLimit: 10,
enabledManagers: ["github-actions", "pre-commit", "cargo", "pep621", "pip_requirements", "npm"],
cargo: {
// See https://docs.renovatebot.com/configuration-options/#rangestrategy
rangeStrategy: "update-lockfile",
},
pep621: {
// The default for this package manager is to only search for `pyproject.toml` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/pep621/#file-matching
fileMatch: ["^(python|scripts)/.*pyproject\\.toml$"],
},
pip_requirements: {
// The default for this package manager is to run on all requirements.txt files:
// https://docs.renovatebot.com/modules/manager/pip_requirements/#file-matching
// `fileMatch` doesn't work for excluding files; to exclude `requirements.txt` files
// outside the `doc/` directory, we instead have to use `ignorePaths`. Unlike `fileMatch`,
// which takes a regex string, `ignorePaths` takes a glob string, so we have to use
// a "negative glob pattern".
// See:
// - https://docs.renovatebot.com/modules/manager/#ignoring-files-that-match-the-default-filematch
// - https://docs.renovatebot.com/configuration-options/#ignorepaths
// - https://docs.renovatebot.com/string-pattern-matching/#negative-matching
ignorePaths: ["!docs/requirements*.txt"]
},
npm: {
// The default for this package manager is to only search for `package.json` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/npm/#file-matching
fileMatch: ["^playground/.*package\\.json$"],
},
"pre-commit": {
enabled: true,
},
packageRules: [
// Pin GitHub Actions to immutable SHAs.
{
matchDepTypes: ["action"],
pinDigests: true,
},
// Annotate GitHub Actions SHAs with a SemVer version.
{
extends: ["helpers:pinGitHubActionDigests"],
extractVersion: "^(?<version>v?\\d+\\.\\d+\\.\\d+)$",
versioning: "regex:^v?(?<major>\\d+)(\\.(?<minor>\\d+)\\.(?<patch>\\d+))?$",
},
{
// Group upload/download artifact updates, the versions are dependent
groupName: "Artifact GitHub Actions dependencies",
matchManagers: ["github-actions"],
matchDatasources: ["gitea-tags", "github-tags"],
matchPackageNames: ["actions/.*-artifact"],
description: "Weekly update of artifact-related GitHub Actions dependencies",
},
{
// This package rule disables updates for GitHub runners:
// we'd only pin them to a specific version
// if there was a deliberate reason to do so
groupName: "GitHub runners",
matchManagers: ["github-actions"],
matchDatasources: ["github-runners"],
description: "Disable PRs updating GitHub runners (e.g. 'runs-on: macos-14')",
enabled: false,
},
{
// Disable updates of `zip-rs`; intentionally pinned for now due to ownership change
// See: https://github.com/astral-sh/uv/issues/3642
matchPackageNames: ["zip"],
matchManagers: ["cargo"],
enabled: false,
},
{
// `mkdocs-material` requires a manual update to keep the version in sync
// with `mkdocs-material-insider`.
// See: https://squidfunk.github.io/mkdocs-material/insiders/upgrade/
matchManagers: ["pip_requirements"],
matchPackageNames: ["mkdocs-material"],
enabled: false,
},
{
groupName: "pre-commit dependencies",
matchManagers: ["pre-commit"],
description: "Weekly update of pre-commit dependencies",
},
{
groupName: "NPM Development dependencies",
matchManagers: ["npm"],
matchDepTypes: ["devDependencies"],
description: "Weekly update of NPM development dependencies",
},
{
groupName: "Monaco",
matchManagers: ["npm"],
matchPackageNames: ["monaco"],
description: "Weekly update of the Monaco editor",
},
{
groupName: "strum",
matchManagers: ["cargo"],
matchPackageNames: ["strum"],
description: "Weekly update of strum dependencies",
}
],
vulnerabilityAlerts: {
commitMessageSuffix: "",
labels: ["internal", "security"],
},
}

View File

@@ -1,480 +0,0 @@
# Build ruff on all platforms.
#
# Generates both wheels (for PyPI) and archived binaries (for GitHub releases).
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
name: "Build binaries"
on:
workflow_call:
inputs:
plan:
required: true
type: string
pull_request:
paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work.
- pyproject.toml
# And when we change this workflow itself...
- .github/workflows/build-binaries.yml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
PYTHON_VERSION: "3.13"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
sdist:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build sdist"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
command: sdist
args: --out dist
- name: "Test sdist"
run: |
pip install dist/"${PACKAGE_NAME}"-*.tar.gz --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
- name: "Upload sdist"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-sdist
path: dist
macos-x86_64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - x86_64"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: x86_64
args: --release --locked --out dist
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-macos-x86_64
path: dist
- name: "Archive binary"
run: |
TARGET=x86_64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-macos-x86_64
path: |
*.tar.gz
*.sha256
macos-aarch64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: arm64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - aarch64"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: aarch64
args: --release --locked --out dist
- name: "Test wheel - aarch64"
run: |
pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-aarch64-apple-darwin
path: dist
- name: "Archive binary"
run: |
TARGET=aarch64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-aarch64-apple-darwin
path: |
*.tar.gz
*.sha256
windows:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: windows-latest
strategy:
matrix:
platform:
- target: x86_64-pc-windows-msvc
arch: x64
- target: i686-pc-windows-msvc
arch: x86
- target: aarch64-pc-windows-msvc
arch: x64
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.platform.arch }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: ${{ matrix.platform.target }}
args: --release --locked --out dist
env:
# aarch64 build fails, see https://github.com/PyO3/maturin/issues/2110
XWIN_VERSION: 16
- name: "Test wheel"
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.zip
7z a $ARCHIVE_FILE ./target/${{ matrix.platform.target }}/release/ruff.exe
sha256sum $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-${{ matrix.platform.target }}
path: |
*.zip
*.sha256
linux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-gnu
- i686-unknown-linux-gnu
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --locked --out dist
- name: "Test wheel"
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-${{ matrix.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-${{ matrix.target }}
path: |
*.tar.gz
*.sha256
linux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-gnu
arch: aarch64
# see https://github.com/astral-sh/ruff/issues/3791
# and https://github.com/gnzlbg/jemallocator/issues/170#issuecomment-1503228963
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: armv7-unknown-linux-gnueabihf
arch: armv7
- target: s390x-unknown-linux-gnu
arch: s390x
- target: powerpc64le-unknown-linux-gnu
arch: ppc64le
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: powerpc64-unknown-linux-gnu
arch: ppc64
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: arm-unknown-linux-musleabihf
arch: arm
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: ${{ matrix.platform.target }}
manylinux: auto
docker-options: ${{ matrix.platform.maturin_docker_options }}
args: --release --locked --out dist
- uses: uraimo/run-on-arch-action@ac33288c3728ca72563c97b8b88dda5a65a84448 # v2
if: ${{ matrix.platform.arch != 'ppc64' && matrix.platform.arch != 'ppc64le'}}
name: Test wheel
with:
arch: ${{ matrix.platform.arch == 'arm' && 'armv6' || matrix.platform.arch }}
distro: ${{ matrix.platform.arch == 'arm' && 'bullseye' || 'ubuntu20.04' }}
githubToken: ${{ github.token }}
install: |
apt-get update
apt-get install -y --no-install-recommends python3 python3-pip
pip3 install -U pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-${{ matrix.platform.target }}
path: |
*.tar.gz
*.sha256
musllinux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
- name: "Test wheel"
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@4f65fabd2431ebc8d299f8e5a018d79a769ae185 # v3
with:
image: alpine:latest
options: -v ${{ github.workspace }}:/io -w /io
run: |
apk add python3
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-${{ matrix.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-${{ matrix.target }}
path: |
*.tar.gz
*.sha256
musllinux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-musl
arch: aarch64
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: armv7-unknown-linux-musleabihf
arch: armv7
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
docker-options: ${{ matrix.platform.maturin_docker_options }}
- uses: uraimo/run-on-arch-action@ac33288c3728ca72563c97b8b88dda5a65a84448 # v2
name: Test wheel
with:
arch: ${{ matrix.platform.arch }}
distro: alpine_latest
githubToken: ${{ github.token }}
install: |
apk add python3
run: |
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: artifacts-${{ matrix.platform.target }}
path: |
*.tar.gz
*.sha256

View File

@@ -1,299 +0,0 @@
# Build and publish a Docker image.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
#
# TODO(charlie): Ideally, the publish step would happen as a publish job within `cargo-dist`, but
# sharing the built image as an artifact between jobs is challenging.
name: "[ruff] Build Docker image"
on:
workflow_call:
inputs:
plan:
required: true
type: string
pull_request:
paths:
- .github/workflows/build-docker.yml
env:
RUFF_BASE_IMG: ghcr.io/${{ github.repository_owner }}/ruff
jobs:
docker-build:
name: Build Docker image (ghcr.io/astral-sh/ruff) for ${{ matrix.platform }}
runs-on: ubuntu-latest
environment:
name: release
strategy:
fail-fast: false
matrix:
platform:
- linux/amd64
- linux/arm64
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
submodules: recursive
persist-credentials: false
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Check tag consistency
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
env:
TAG: ${{ inputs.plan != '' && fromJson(inputs.plan).announcement_tag || 'dry-run' }}
run: |
version=$(grep -m 1 "^version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${TAG}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${TAG}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
with:
images: ${{ env.RUFF_BASE_IMG }}
# Defining this makes sure the org.opencontainers.image.version OCI label becomes the actual release version and not the branch name
tags: |
type=raw,value=dry-run,enable=${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}
type=pep440,pattern={{ version }},value=${{ inputs.plan != '' && fromJson(inputs.plan).announcement_tag || 'dry-run' }},enable=${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
- name: Normalize Platform Pair (replace / with -)
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_TUPLE=${platform//\//-}" >> "$GITHUB_ENV"
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Build and push by digest
id: build
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6
with:
context: .
platforms: ${{ matrix.platform }}
cache-from: type=gha,scope=ruff-${{ env.PLATFORM_TUPLE }}
cache-to: type=gha,mode=min,scope=ruff-${{ env.PLATFORM_TUPLE }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.RUFF_BASE_IMG }},push-by-digest=true,name-canonical=true,push=${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
- name: Export digests
env:
digest: ${{ steps.build.outputs.digest }}
run: |
mkdir -p /tmp/digests
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digests
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: digests-${{ env.PLATFORM_TUPLE }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
docker-publish:
name: Publish Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
needs:
- docker-build
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
with:
images: ${{ env.RUFF_BASE_IMG }}
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
tags: |
type=pep440,pattern={{ version }},value=${{ fromJson(inputs.plan).announcement_tag }}
type=pep440,pattern={{ major }}.{{ minor }},value=${{ fromJson(inputs.plan).announcement_tag }}
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Create manifest list and push
working-directory: /tmp/digests
# The jq command expands the docker/metadata json "tags" array entry to `-t tag1 -t tag2 ...` for each tag in the array
# The printf will expand the base image with the `<RUFF_BASE_IMG>@sha256:<sha256> ...` for each sha256 in the directory
# The final command becomes `docker buildx imagetools create -t tag1 -t tag2 ... <RUFF_BASE_IMG>@sha256:<sha256_1> <RUFF_BASE_IMG>@sha256:<sha256_2> ...`
run: |
# shellcheck disable=SC2046
docker buildx imagetools create \
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "${RUFF_BASE_IMG}@sha256:%s " *)
docker-publish-extra:
name: Publish additional Docker image based on ${{ matrix.image-mapping }}
runs-on: ubuntu-latest
environment:
name: release
needs:
- docker-publish
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
strategy:
fail-fast: false
matrix:
# Mapping of base image followed by a comma followed by one or more base tags (comma separated)
# Note, org.opencontainers.image.version label will use the first base tag (use the most specific tag first)
image-mapping:
- alpine:3.21,alpine3.21,alpine
- debian:bookworm-slim,bookworm-slim,debian-slim
- buildpack-deps:bookworm,bookworm,debian
steps:
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate Dynamic Dockerfile Tags
shell: bash
env:
TAG_VALUE: ${{ fromJson(inputs.plan).announcement_tag }}
run: |
set -euo pipefail
# Extract the image and tags from the matrix variable
IFS=',' read -r BASE_IMAGE BASE_TAGS <<< "${{ matrix.image-mapping }}"
# Generate Dockerfile content
cat <<EOF > Dockerfile
FROM ${BASE_IMAGE}
COPY --from=${RUFF_BASE_IMG}:latest /ruff /usr/local/bin/ruff
ENTRYPOINT []
CMD ["/usr/local/bin/ruff"]
EOF
# Initialize a variable to store all tag docker metadata patterns
TAG_PATTERNS=""
# Loop through all base tags and append its docker metadata pattern to the list
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
IFS=','; for TAG in ${BASE_TAGS}; do
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ version }},suffix=-${TAG},value=${TAG_VALUE}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ major }}.{{ minor }},suffix=-${TAG},value=${TAG_VALUE}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=raw,value=${TAG}\n"
done
# Remove the trailing newline from the pattern list
TAG_PATTERNS="${TAG_PATTERNS%\\n}"
# Export image cache name
echo "IMAGE_REF=${BASE_IMAGE//:/-}" >> "$GITHUB_ENV"
# Export tag patterns using the multiline env var syntax
{
echo "TAG_PATTERNS<<EOF"
echo -e "${TAG_PATTERNS}"
echo EOF
} >> "$GITHUB_ENV"
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
# ghcr.io prefers index level annotations
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: ${{ env.RUFF_BASE_IMG }}
flavor: |
latest=false
tags: |
${{ env.TAG_PATTERNS }}
- name: Build and push
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6
with:
context: .
platforms: linux/amd64,linux/arm64
# We do not really need to cache here as the Dockerfile is tiny
#cache-from: type=gha,scope=ruff-${{ env.IMAGE_REF }}
#cache-to: type=gha,mode=min,scope=ruff-${{ env.IMAGE_REF }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
# This is effectively a duplicate of `docker-publish` to make https://github.com/astral-sh/ruff/pkgs/container/ruff
# show the ruff base image first since GitHub always shows the last updated image digests
# This works by annotating the original digests (previously non-annotated) which triggers an update to ghcr.io
docker-republish:
name: Annotate Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
needs:
- docker-publish-extra
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
steps:
- name: Download digests
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: ${{ env.RUFF_BASE_IMG }}
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
tags: |
type=pep440,pattern={{ version }},value=${{ fromJson(inputs.plan).announcement_tag }}
type=pep440,pattern={{ major }}.{{ minor }},value=${{ fromJson(inputs.plan).announcement_tag }}
- uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Create manifest list and push
working-directory: /tmp/digests
# The readarray part is used to make sure the quoting and special characters are preserved on expansion (e.g. spaces)
# The jq command expands the docker/metadata json "tags" array entry to `-t tag1 -t tag2 ...` for each tag in the array
# The printf will expand the base image with the `<RUFF_BASE_IMG>@sha256:<sha256> ...` for each sha256 in the directory
# The final command becomes `docker buildx imagetools create -t tag1 -t tag2 ... <RUFF_BASE_IMG>@sha256:<sha256_1> <RUFF_BASE_IMG>@sha256:<sha256_2> ...`
run: |
readarray -t lines <<< "$DOCKER_METADATA_OUTPUT_ANNOTATIONS"; annotations=(); for line in "${lines[@]}"; do annotations+=(--annotation "$line"); done
# shellcheck disable=SC2046
docker buildx imagetools create \
"${annotations[@]}" \
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "${RUFF_BASE_IMG}@sha256:%s " *)

View File

@@ -1,871 +1,137 @@
name: CI
permissions: {}
on:
push:
branches: [main]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
branches: [main]
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.13"
jobs:
determine_changes:
name: "Determine changes"
cargo_build:
name: "cargo build"
runs-on: ubuntu-latest
outputs:
# Flag that is raised when any code that affects parser is changed
parser: ${{ steps.check_parser.outputs.changed }}
# Flag that is raised when any code that affects linter is changed
linter: ${{ steps.check_linter.outputs.changed }}
# Flag that is raised when any code that affects formatter is changed
formatter: ${{ steps.check_formatter.outputs.changed }}
# Flag that is raised when any code is changed
# This is superset of the linter and formatter
code: ${{ steps.check_code.outputs.changed }}
# Flag that is raised when any code that affects the fuzzer is changed
fuzz: ${{ steps.check_fuzzer.outputs.changed }}
# Flag that is set to "true" when code related to red-knot changes.
red_knot: ${{ steps.check_red_knot.outputs.changed }}
# Flag that is set to "true" when code related to the playground changes.
playground: ${{ steps.check_playground.outputs.changed }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
fetch-depth: 0
persist-credentials: false
- name: Determine merge base
id: merge_base
profile: minimal
- uses: actions/cache@v3
env:
BASE_REF: ${{ github.event.pull_request.base.ref || 'main' }}
run: |
sha=$(git merge-base HEAD "origin/${BASE_REF}")
echo "sha=${sha}" >> "$GITHUB_OUTPUT"
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: cargo build --all --release
- name: Check if the parser code changed
id: check_parser
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
':Cargo.toml' \
':Cargo.lock' \
':crates/ruff_python_trivia/**' \
':crates/ruff_source_file/**' \
':crates/ruff_text_size/**' \
':crates/ruff_python_ast/**' \
':crates/ruff_python_parser/**' \
':python/py-fuzzer/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if the linter code changed
id: check_linter
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':Cargo.toml' \
':Cargo.lock' \
':crates/**' \
':!crates/red_knot*/**' \
':!crates/ruff_python_formatter/**' \
':!crates/ruff_formatter/**' \
':!crates/ruff_dev/**' \
':!crates/ruff_db/**' \
':scripts/*' \
':python/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if the formatter code changed
id: check_formatter
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':Cargo.toml' \
':Cargo.lock' \
':crates/ruff_python_formatter/**' \
':crates/ruff_formatter/**' \
':crates/ruff_python_trivia/**' \
':crates/ruff_python_ast/**' \
':crates/ruff_source_file/**' \
':crates/ruff_python_index/**' \
':crates/ruff_python_index/**' \
':crates/ruff_text_size/**' \
':crates/ruff_python_parser/**' \
':scripts/*' \
':python/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if the fuzzer code changed
id: check_fuzzer
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':Cargo.toml' \
':Cargo.lock' \
':fuzz/fuzz_targets/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if there was any code related change
id: check_code
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- ':**' \
':!**/*.md' \
':crates/red_knot_python_semantic/resources/mdtest/**/*.md' \
':!docs/**' \
':!assets/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if there was any playground related change
id: check_playground
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
':playground/**' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
- name: Check if the red-knot code changed
id: check_red_knot
env:
MERGE_BASE: ${{ steps.merge_base.outputs.sha }}
run: |
if git diff --quiet "${MERGE_BASE}...HEAD" -- \
':Cargo.toml' \
':Cargo.lock' \
':crates/red_knot*/**' \
':crates/ruff_db/**' \
':crates/ruff_annotate_snippets/**' \
':crates/ruff_python_ast/**' \
':crates/ruff_python_parser/**' \
':crates/ruff_python_trivia/**' \
':crates/ruff_source_file/**' \
':crates/ruff_text_size/**' \
':.github/workflows/ci.yaml' \
; then
echo "changed=false" >> "$GITHUB_OUTPUT"
else
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
cargo-fmt:
cargo_fmt:
name: "cargo fmt"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup component add rustfmt
profile: minimal
toolchain: nightly-2022-11-01
override: true
components: rustfmt
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: cargo fmt --all --check
cargo-clippy:
cargo_clippy:
name: "cargo clippy"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: |
rustup component add clippy
rustup target add wasm32-unknown-unknown
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features --locked -- -D warnings
- name: "Clippy (wasm)"
run: cargo clippy -p ruff_wasm -p red_knot_wasm --target wasm32-unknown-unknown --all-features --locked -- -D warnings
profile: minimal
toolchain: nightly-2022-11-01
override: true
components: clippy
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: cargo clippy --workspace --all-targets --all-features -- -D warnings
cargo-test-linux:
name: "cargo test (linux)"
runs-on: depot-ubuntu-22.04-16
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@e16410e7f8d9e167b74ad5697a9089a35126eb50 # v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
with:
tool: cargo-insta
- name: Red-knot mdtests (GitHub annotations)
if: ${{ needs.determine_changes.outputs.red_knot == 'true' }}
env:
NO_COLOR: 1
MDTEST_GITHUB_ANNOTATIONS_FORMAT: 1
# Ignore errors if this step fails; we want to continue to later steps in the workflow anyway.
# This step is just to get nice GitHub annotations on the PR diff in the files-changed tab.
run: cargo test -p red_knot_python_semantic --test mdtest || true
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo insta test --all-features --unreferenced reject --test-runner nextest
# Check for broken links in the documentation.
- run: cargo doc --all --no-deps
env:
RUSTDOCFLAGS: "-D warnings"
# Use --document-private-items so that all our doc comments are kept in
# sync, not just public items. Eventually we should do this for all
# crates; for now add crates here as they are warning-clean to prevent
# regression.
- run: cargo doc --no-deps -p red_knot_python_semantic -p red_knot -p red_knot_test -p ruff_db --document-private-items
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: ruff
path: target/debug/ruff
cargo-test-linux-release:
name: "cargo test (linux, release)"
runs-on: depot-ubuntu-22.04-16
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@e16410e7f8d9e167b74ad5697a9089a35126eb50 # v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
with:
tool: cargo-insta
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo insta test --release --all-features --unreferenced reject --test-runner nextest
cargo-test-windows:
name: "cargo test (windows)"
runs-on: github-windows-2025-x86_64-16
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo nextest"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
with:
tool: cargo-nextest
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
# Workaround for <https://github.com/nextest-rs/nextest/issues/1493>.
RUSTUP_WINDOWS_PATH_ADD_BIN: 1
run: |
cargo nextest run --all-features --profile ci
cargo test --all-features --doc
cargo-test-wasm:
name: "cargo test (wasm)"
cargo_test:
name: "cargo test"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 20
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa # v0.4.0
with:
version: v0.13.1
- name: "Test ruff_wasm"
run: |
cd crates/ruff_wasm
wasm-pack test --node
- name: "Test red_knot_wasm"
run: |
cd crates/red_knot_wasm
wasm-pack test --node
cargo-build-release:
name: "cargo build (release)"
runs-on: macos-latest
if: ${{ github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@e16410e7f8d9e167b74ad5697a9089a35126eb50 # v1
- name: "Build"
run: cargo build --release --locked
cargo-build-msrv:
name: "cargo build (msrv)"
runs-on: depot-ubuntu-latest-8
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: SebRollen/toml-action@b1b3628f55fc3a28208d4203ada8b737e9687876 # v1.2.0
id: msrv
with:
file: "Cargo.toml"
field: "workspace.package.rust-version"
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
profile: minimal
toolchain: nightly-2022-11-01
override: true
- uses: actions/cache@v3
env:
MSRV: ${{ steps.msrv.outputs.value }}
run: rustup default "${MSRV}"
- name: "Install mold"
uses: rui314/setup-mold@e16410e7f8d9e167b74ad5697a9089a35126eb50 # v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
cache-name: cache-cargo
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: cargo test --all
maturin_build:
name: "maturin build"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
tool: cargo-insta
- name: "Run tests"
shell: bash
profile: minimal
toolchain: nightly-2022-11-01
override: true
- uses: actions/setup-python@v4
with:
python-version: "3.10"
- run: pip install maturin
- uses: actions/cache@v3
env:
NEXTEST_PROFILE: "ci"
MSRV: ${{ steps.msrv.outputs.value }}
run: cargo "+${MSRV}" insta test --all-features --unreferenced reject --test-runner nextest
cargo-fuzz-build:
name: "cargo fuzz build"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ github.ref == 'refs/heads/main' || needs.determine_changes.outputs.fuzz == 'true' || needs.determine_changes.outputs.code == 'true' }}
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
cache-name: cache-cargo
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:
workspaces: "fuzz -> target"
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@63aaa5c1932cebabc34eceda9d92a70215dcead6 # v1.12.3
with:
tool: cargo-fuzz@0.11.2
- name: "Install cargo-fuzz"
# Download the latest version from quick install and not the github releases because github releases only has MUSL targets.
run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm
- run: cargo fuzz build -s none
fuzz-parser:
name: "fuzz parser"
runs-on: ubuntu-latest
needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && needs.determine_changes.outputs.parser == 'true' }}
timeout-minutes: 20
env:
FORCE_COLOR: 1
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
name: Download Ruff binary to test
id: download-cached-binary
with:
name: ruff
path: ruff-to-test
- name: Fuzz
env:
DOWNLOAD_PATH: ${{ steps.download-cached-binary.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x "${DOWNLOAD_PATH}/ruff"
(
uvx \
--python="${PYTHON_VERSION}" \
--from=./python/py-fuzzer \
fuzz \
--test-executable="${DOWNLOAD_PATH}/ruff" \
--bin=ruff \
0-500
)
scripts:
name: "test scripts"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 5
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup component add rustfmt
# Run all code generation scripts, and verify that the current output is
# already checked into git.
- run: python crates/ruff_python_ast/generate.py
- run: python crates/ruff_python_formatter/generate.py
- run: test -z "$(git status --porcelain)"
# Verify that adding a plugin or rule produces clean code.
- run: ./scripts/add_rule.py --name DoTheThing --prefix F --code 999 --linter pyflakes
- run: cargo check
- run: cargo fmt --all --check
- run: |
./scripts/add_plugin.py test --url https://pypi.org/project/-test/0.1.0/ --prefix TST
./scripts/add_rule.py --name FirstRule --prefix TST --code 001 --linter test
- run: cargo check
- run: cargo fmt --all --check
ecosystem:
name: "ecosystem"
runs-on: depot-ubuntu-latest-8
needs:
- cargo-test-linux
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
name: Download comparison Ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download baseline Ruff binary
with:
name: ruff
branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
check_artifacts: true
- name: Install ruff-ecosystem
run: |
pip install ./python/ruff-ecosystem
- name: Run `ruff check` stable ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY"
echo "### Linter (stable)" > ecosystem-result
cat ecosystem-result-check-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff check` preview ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY"
echo "### Linter (preview)" >> ecosystem-result
cat ecosystem-result-check-preview >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` stable ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY"
echo "### Formatter (stable)" >> ecosystem-result
cat ecosystem-result-format-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` preview ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY"
echo "### Formatter (preview)" >> ecosystem-result
cat ecosystem-result-format-preview >> ecosystem-result
echo "" >> ecosystem-result
- name: Export pull request number
run: |
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
name: Upload Results
with:
name: ecosystem-result
path: ecosystem-result
cargo-shear:
name: "cargo shear"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: cargo-bins/cargo-binstall@63aaa5c1932cebabc34eceda9d92a70215dcead6 # v1.12.3
- run: cargo binstall --no-confirm cargo-shear
- run: cargo shear
python-package:
name: "python package"
runs-on: ubuntu-latest
timeout-minutes: 20
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@aef21716ff3dcae8a1c301d23ec3e4446972a6e3 # v1.49.1
with:
args: --out dist
- name: "Test wheel"
run: |
pip install --force-reinstall --find-links dist "${PACKAGE_NAME}"
ruff --help
python -m ruff --help
- name: "Remove wheels from cache"
run: rm -rf target/wheels
pre-commit:
name: "pre-commit"
runs-on: depot-ubuntu-22.04-16
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- name: "Cache pre-commit"
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: "Run pre-commit"
run: |
echo '```console' > "$GITHUB_STEP_SUMMARY"
# Enable color output for pre-commit and remove it for the summary
# Use --hook-stage=manual to enable slower pre-commit hooks that are skipped by default
SKIP=cargo-fmt,clippy,dev-generate-all uvx --python="${PYTHON_VERSION}" pre-commit run --all-files --show-diff-on-failure --color=always --hook-stage=manual | \
tee >(sed -E 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[mGK]//g' >> "$GITHUB_STEP_SUMMARY") >&1
exit_code="${PIPESTATUS[0]}"
echo '```' >> "$GITHUB_STEP_SUMMARY"
exit "$exit_code"
docs:
name: "mkdocs"
runs-on: ubuntu-latest
timeout-minutes: 10
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: "3.13"
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- name: Install uv
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: uv pip install -r docs/requirements-insiders.txt --system
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: uv pip install -r docs/requirements.txt --system
- name: "Update README File"
run: python scripts/transform_readme.py --target mkdocs
- name: "Generate docs"
run: python scripts/generate_mkdocs.py
- name: "Check docs formatting"
run: python scripts/check_docs_formatted.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
check-formatter-instability-and-black-similarity:
name: "formatter instabilities and black similarity"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 10
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Run checks"
run: scripts/formatter_ecosystem_checks.sh
- name: "Github step summary"
run: cat target/formatter-ecosystem/stats.txt > "$GITHUB_STEP_SUMMARY"
- name: "Remove checkouts from cache"
run: rm -r target/formatter-ecosystem
check-ruff-lsp:
name: "test ruff-lsp"
runs-on: ubuntu-latest
timeout-minutes: 5
needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
steps:
- uses: extractions/setup-just@dd310ad5a97d8e7b41793f8ef055398d51ad4de6 # v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
name: "Download ruff-lsp source"
with:
persist-credentials: false
repository: "astral-sh/ruff-lsp"
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
# installation fails on 3.13 and newer
python-version: "3.12"
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
name: Download development ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- name: Install ruff-lsp dependencies
run: |
just install
- name: Run ruff-lsp tests
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Setup development binary
pip uninstall --yes ruff
chmod +x "${DOWNLOAD_PATH}/ruff"
export PATH="${DOWNLOAD_PATH}:${PATH}"
ruff version
just test
check-playground:
name: "check playground"
runs-on: ubuntu-latest
timeout-minutes: 5
needs:
- determine_changes
if: ${{ (needs.determine_changes.outputs.playground == 'true') }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 22
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Build playgrounds"
run: npm run dev:wasm
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Check formatting"
run: npm run fmt:check
working-directory: playground
benchmarks:
runs-on: ubuntu-24.04
needs: determine_changes
if: ${{ github.repository == 'astral-sh/ruff' && !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- name: "Checkout Branch"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Rust toolchain"
run: rustup show
- name: "Install codspeed"
uses: taiki-e/install-action@09dc018eee06ae1c9e0409786563f534210ceb83 # v2
with:
tool: cargo-codspeed
- name: "Build benchmarks"
run: cargo codspeed build --features codspeed -p ruff_benchmark
- name: "Run benchmarks"
uses: CodSpeedHQ/action@0010eb0ca6e89b80c88e8edaaa07cfe5f3e6664d # v3.5.0
with:
run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: maturin build -b bin

View File

@@ -1,78 +0,0 @@
name: Daily parser fuzz
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *"
pull_request:
paths:
- ".github/workflows/daily_fuzz.yaml"
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
FORCE_COLOR: 1
jobs:
fuzz:
name: Fuzz
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@e16410e7f8d9e167b74ad5697a9089a35126eb50 # v1
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Build ruff
# A debug build means the script runs slower once it gets started,
# but this is outweighed by the fact that a release build takes *much* longer to compile in CI
run: cargo build --locked
- name: Fuzz
run: |
# shellcheck disable=SC2046
(
uvx \
--python=3.12 \
--from=./python/py-fuzzer \
fuzz \
--test-executable=target/debug/ruff \
--bin=ruff \
$(shuf -i 0-9999999999999999999 -n 1000)
)
create-issue-on-failure:
name: Create an issue if the daily fuzz surfaced any bugs
runs-on: ubuntu-latest
needs: fuzz
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.fuzz.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Daily parser fuzz failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "parser", "fuzzer"],
})

View File

@@ -1,72 +0,0 @@
name: Daily property test run
on:
workflow_dispatch:
schedule:
- cron: "0 12 * * *"
pull_request:
paths:
- ".github/workflows/daily_property_tests.yaml"
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
FORCE_COLOR: 1
jobs:
property_tests:
name: Property tests
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@e16410e7f8d9e167b74ad5697a9089a35126eb50 # v1
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: Build Red Knot
# A release build takes longer (2 min vs 1 min), but the property tests run much faster in release
# mode (1.5 min vs 14 min), so the overall time is shorter with a release build.
run: cargo build --locked --release --package red_knot_python_semantic --tests
- name: Run property tests
shell: bash
run: |
export QUICKCHECK_TESTS=100000
for _ in {1..5}; do
cargo test --locked --release --package red_knot_python_semantic -- --ignored list::property_tests
cargo test --locked --release --package red_knot_python_semantic -- --ignored types::property_tests::stable
done
create-issue-on-failure:
name: Create an issue if the daily property test run surfaced any bugs
runs-on: ubuntu-latest
needs: property_tests
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.property_tests.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Daily property test run failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "red-knot", "testing"],
})

301
.github/workflows/flake8-to-ruff.yaml vendored Normal file
View File

@@ -0,0 +1,301 @@
name: "[flake8-to-ruff] Release"
on: workflow_dispatch
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
PACKAGE_NAME: flake8-to-ruff
CRATE_NAME: flake8_to_ruff
PYTHON_VERSION: "3.7" # to build abi3 wheels
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
RUSTUP_MAX_RETRIES: 10
jobs:
macos-x86_64:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
- name: Build wheels - x86_64
uses: messense/maturin-action@v1
with:
target: x86_64
args: --release --out dist --sdist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- name: Install built wheel - x86_64
run: |
pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
macos-universal:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
- name: Build wheels - universal2
uses: messense/maturin-action@v1
with:
args: --release --universal2 --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- name: Install built wheel - universal2
run: |
pip install dist/${{ env.CRATE_NAME }}-*universal2.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
windows:
runs-on: windows-latest
strategy:
matrix:
target: [x64, x86]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.target }}
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- name: Install built wheel
shell: bash
run: |
python -m pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
linux:
runs-on: ubuntu-latest
strategy:
matrix:
target: [x86_64, i686]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- name: Install built wheel
if: matrix.target == 'x86_64'
run: |
pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
linux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
target: [aarch64, armv7, s390x, ppc64le, ppc64]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --no-default-features --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- uses: uraimo/run-on-arch-action@v2.0.5
if: matrix.target != 'ppc64'
name: Install built wheel
with:
arch: ${{ matrix.target }}
distro: ubuntu20.04
githubToken: ${{ github.token }}
install: |
apt-get update
apt-get install -y --no-install-recommends python3 python3-pip
pip3 install -U pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
musllinux:
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- name: Install built wheel
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@v3
with:
image: alpine:latest
options: -v ${{ github.workspace }}:/io -w /io
run: |
apk add py3-pip
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links /io/dist/ --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
musllinux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-musl
arch: aarch64
- target: armv7-unknown-linux-musleabihf
arch: armv7
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
maturin-version: "v0.13.0"
- uses: uraimo/run-on-arch-action@master
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}
distro: alpine_latest
githubToken: ${{ github.token }}
install: |
apk add py3-pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
pypy:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
target: [x86_64, aarch64]
python-version:
- "3.7"
- "3.8"
- "3.9"
exclude:
- os: macos-latest
target: aarch64
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: pypy${{ matrix.python-version }}
- name: Build wheels
uses: messense/maturin-action@v1
with:
maturin-version: "v0.13.0"
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist -i pypy${{ matrix.python-version }} -m ./${{ env.CRATE_NAME }}/Cargo.toml
- name: Install built wheel
if: matrix.target == 'x86_64'
run: |
pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
release:
name: Release
runs-on: ubuntu-latest
needs:
- macos-universal
- macos-x86_64
- windows
- linux
- linux-cross
- musllinux
- musllinux-cross
- pypy
steps:
- uses: actions/download-artifact@v2
with:
name: wheels
- uses: actions/setup-python@v4
- name: Publish to PyPi
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.FLAKE8_TO_RUFF_TOKEN }}
run: |
pip install --upgrade twine
twine upload --skip-existing *

View File

@@ -1,96 +0,0 @@
name: Run mypy_primer
permissions: {}
on:
pull_request:
paths:
- "crates/red_knot*/**"
- "crates/ruff_db"
- "crates/ruff_python_ast"
- "crates/ruff_python_parser"
- ".github/workflows/mypy_primer.yaml"
- ".github/workflows/mypy_primer_comment.yaml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.event.pull_request.number || github.sha }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
mypy_primer:
name: Run mypy_primer
runs-on: depot-ubuntu-22.04-16
timeout-minutes: 20
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: ruff
fetch-depth: 0
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:
workspaces: "ruff"
- name: Install Rust toolchain
run: rustup show
- name: Install mypy_primer
run: |
uv tool install "git+https://github.com/astral-sh/mypy_primer.git@add-red-knot-support-v5"
- name: Run mypy_primer
shell: bash
run: |
cd ruff
PRIMER_SELECTOR="$(paste -s -d'|' crates/red_knot_python_semantic/resources/primer/good.txt)"
echo "new commit"
git rev-list --format=%s --max-count=1 "$GITHUB_SHA"
MERGE_BASE="$(git merge-base "$GITHUB_SHA" "origin/$GITHUB_BASE_REF")"
git checkout -b base_commit "$MERGE_BASE"
echo "base commit"
git rev-list --format=%s --max-count=1 base_commit
cd ..
echo "Project selector: $PRIMER_SELECTOR"
# Allow the exit code to be 0 or 1, only fail for actual mypy_primer crashes/bugs
uvx mypy_primer \
--repo ruff \
--type-checker knot \
--old base_commit \
--new "$GITHUB_SHA" \
--project-selector "/($PRIMER_SELECTOR)\$" \
--output concise \
--debug > mypy_primer.diff || [ $? -eq 1 ]
# Output diff with ANSI color codes
cat mypy_primer.diff
# Remove ANSI color codes before uploading
sed -ie 's/\x1b\[[0-9;]*m//g' mypy_primer.diff
echo ${{ github.event.number }} > pr-number
- name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: mypy_primer_diff
path: mypy_primer.diff
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number

View File

@@ -1,97 +0,0 @@
name: PR comment (mypy_primer)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run mypy_primer]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The mypy_primer workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download mypy_primer results"
id: download-mypy_primer_diff
if: steps.pr-number.outputs.pr-number
with:
name: mypy_primer_diff
workflow: mypy_primer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/mypy_primer_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-mypy_primer_diff.outputs.found_artifact == 'true'
run: |
# Guard against malicious mypy_primer results that symlink to a secret
# file on this runner
if [[ -L pr/mypy_primer_diff/mypy_primer.diff ]]
then
echo "Error: mypy_primer.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment mypy_primer -->' >> comment.txt
echo '## `mypy_primer` results' >> comment.txt
if [ -s "pr/mypy_primer_diff/mypy_primer.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running on open source projects</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/mypy_primer_diff/mypy_primer.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No ecosystem changes detected ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment mypy_primer -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@@ -1,29 +0,0 @@
# Notify downstream repositories of a new release.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[ruff] Notify dependents"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
update-dependents:
name: Notify dependents
runs-on: ubuntu-latest
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

View File

@@ -1,88 +0,0 @@
name: Ecosystem check comment
on:
workflow_run:
workflows: [CI]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The ecosystem workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download pull request number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
with:
name: ecosystem-result
workflow: ci.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/ecosystem
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Guard against malicious ecosystem results that symlink to a secret
# file on this runner
if [[ -L pr/ecosystem/ecosystem-result ]]
then
echo "Error: ecosystem-result cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt
echo '## `ruff-ecosystem` results' >> comment.txt
cat pr/ecosystem/ecosystem-result >> comment.txt
echo "" >> comment.txt
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment ecosystem -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@@ -1,144 +0,0 @@
# Publish the Ruff documentation.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.ref }}
persist-credentials: true
- uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
with:
python-version: 3.12
- name: "Set docs version"
env:
version: ${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}
run: |
# if version is missing, use 'latest'
if [ -z "$version" ]; then
echo "Using 'latest' as version"
version="latest"
fi
# Use version as display name for now
display_name="$version"
echo "version=$version" >> "$GITHUB_ENV"
echo "display_name=$display_name" >> "$GITHUB_ENV"
- name: "Set branch name"
run: |
timestamp="$(date +%s)"
# create branch_display_name from display_name by replacing all
# characters disallowed in git branch names with hyphens
branch_display_name="$(echo "${display_name}" | tr -c '[:alnum:]._' '-' | tr -s '-')"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> "$GITHUB_ENV"
echo "timestamp=$timestamp" >> "$GITHUB_ENV"
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo"
run: git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
- name: "Copy docs"
run: rm -rf astral-docs/site/ruff && mkdir -p astral-docs/site && cp -r site/ruff astral-docs/site/
- name: "Commit docs"
working-directory: astral-docs
run: |
git config user.name "astral-docs-bot"
git config user.email "176161322+astral-docs-bot@users.noreply.github.com"
git checkout -b "${branch_name}"
git add site/ruff
git commit -m "Update ruff documentation for $version"
- name: "Create Pull Request"
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
# set the PR title
pull_request_title="Update ruff documentation for ${display_name}"
# Delete any existing pull requests that are open for this version
# by checking against pull_request_title because the new PR will
# supersede the old one.
gh pr list --state open --json title --jq '.[] | select(.title == "$pull_request_title") | .number' | \
xargs -I {} gh pr close {}
# push the branch to GitHub
git push origin "${branch_name}"
# create the PR
gh pr create \
--base=main \
--head="${branch_name}" \
--title="${pull_request_title}" \
--body="Automated documentation update for ${display_name}" \
--label="documentation"
- name: "Merge Pull Request"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
# auto-merge the PR if the build was triggered by a release. Manual builds should be reviewed by a human.
# give the PR a few seconds to be created before trying to auto-merge it
sleep 10
gh pr merge --squash "${branch_name}"

View File

@@ -1,58 +0,0 @@
# Publish the Red Knot playground.
name: "[Knot Playground] Release"
permissions: {}
on:
push:
branches: [main]
paths:
- "crates/red_knot*/**"
- "crates/ruff_db/**"
- "crates/ruff_python_ast/**"
- "crates/ruff_python_parser/**"
- "playground/**"
- ".github/workflows/publish-knot-playground.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
publish:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 22
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Build Knot playground"
run: npm run build --workspace knot-playground
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65 # v3.14.1
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy playground/knot/dist --project-name=knot-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -1,54 +0,0 @@
# Publish the Ruff playground.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[Playground] Release"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
publish:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 22
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Build Ruff playground"
run: npm run build --workspace ruff-playground
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65 # v3.14.1
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy playground/ruff/dist --project-name=ruff-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -1,32 +0,0 @@
# Publish a release to PyPI.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish job
# within `cargo-dist`.
name: "[ruff] Publish to PyPI"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
pypi-publish:
name: Upload to PyPI
runs-on: ubuntu-latest
environment:
name: release
permissions:
# For PyPI's trusted publishing.
id-token: write
steps:
- name: "Install uv"
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
- uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
pattern: wheels-*
path: wheels
merge-multiple: true
- name: Publish to PyPi
run: uv publish -v wheels/*

View File

@@ -1,59 +0,0 @@
# Build and publish ruff-api for wasm.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish
# job within `cargo-dist`.
name: "Build and publish wasm"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
ruff_wasm:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
strategy:
matrix:
target: [web, bundler, nodejs]
fail-fast: false
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa # v0.4.0
with:
version: v0.13.1
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Run wasm-pack build"
run: wasm-pack build --target ${{ matrix.target }} crates/ruff_wasm
- name: "Rename generated package"
run: | # Replace the package name w/ jq
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
with:
node-version: 20
registry-url: "https://registry.npmjs.org"
- name: "Publish (dry-run)"
if: ${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --dry-run crates/ruff_wasm/pkg
- name: "Publish"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --provenance --access public crates/ruff_wasm/pkg
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -1,305 +0,0 @@
# This file was autogenerated by dist: https://github.com/astral-sh/cargo-dist
#
# Copyright 2022-2024, axodotdev
# Copyright 2025 Astral Software Inc.
# SPDX-License-Identifier: MIT or Apache-2.0
#
# CI that:
#
# * checks for a Git Tag that looks like a release
# * builds artifacts with dist (archives, installers, hashes)
# * uploads those artifacts to temporary workflow zip
# * on success, uploads the artifacts to a GitHub Release
#
# Note that the GitHub Release will be created with a generated
# title/body based on your changelogs.
name: Release
permissions:
"contents": "write"
# This task will run whenever you workflow_dispatch with a tag that looks like a version
# like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc.
# Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where
# PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION
# must be a Cargo-style SemVer Version (must have at least major.minor.patch).
#
# If PACKAGE_NAME is specified, then the announcement will be for that
# package (erroring out if it doesn't have the given version or isn't dist-able).
#
# If PACKAGE_NAME isn't specified, then the announcement will be for all
# (dist-able) packages in the workspace with that version (this mode is
# intended for workspaces with only one dist-able package, or with all dist-able
# packages versioned/released in lockstep).
#
# If you push multiple tags at once, separate instances of this workflow will
# spin up, creating an independent announcement for each one. However, GitHub
# will hard limit this to 3 tags per commit, as it will assume more tags is a
# mistake.
#
# If there's a prerelease-style suffix to the version, then the release(s)
# will be marked as a prerelease.
on:
workflow_dispatch:
inputs:
tag:
description: Release Tag
required: true
default: dry-run
type: string
jobs:
# Run 'dist plan' (or host) to determine what tasks we need to do
plan:
runs-on: "depot-ubuntu-latest-4"
outputs:
val: ${{ steps.plan.outputs.manifest }}
tag: ${{ (inputs.tag != 'dry-run' && inputs.tag) || '' }}
tag-flag: ${{ inputs.tag && inputs.tag != 'dry-run' && format('--tag={0}', inputs.tag) || '' }}
publishing: ${{ inputs.tag && inputs.tag != 'dry-run' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: false
submodules: recursive
- name: Install dist
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/cargo-dist/releases/download/v0.28.4-prerelease.1/cargo-dist-installer.sh | sh"
- name: Cache dist
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: cargo-dist-cache
path: ~/.cargo/bin/dist
# sure would be cool if github gave us proper conditionals...
# so here's a doubly-nested ternary-via-truthiness to try to provide the best possible
# functionality based on whether this is a pull_request, and whether it's from a fork.
# (PRs run on the *source* but secrets are usually on the *target* -- that's *good*
# but also really annoying to build CI around when it needs secrets to work right.)
- id: plan
run: |
dist ${{ (inputs.tag && inputs.tag != 'dry-run' && format('host --steps=create --tag={0}', inputs.tag)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "dist ran successfully"
cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json
custom-build-binaries:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-binaries.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-build-docker:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-docker.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
permissions:
"contents": "read"
"packages": "write"
# Build and package all the platform-agnostic(ish) things
build-global-artifacts:
needs:
- plan
- custom-build-binaries
- custom-build-docker
runs-on: "depot-ubuntu-latest-4"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: false
submodules: recursive
- name: Install cached dist
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- id: cargo-dist
shell: bash
run: |
dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "dist ran successfully"
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: artifacts-build-global
path: |
${{ steps.cargo-dist.outputs.paths }}
${{ env.BUILD_MANIFEST_NAME }}
# Determines if we should publish/announce
host:
needs:
- plan
- custom-build-binaries
- custom-build-docker
- build-global-artifacts
# Only run if we're "publishing", and only if local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "depot-ubuntu-latest-4"
outputs:
val: ${{ steps.host.outputs.manifest }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: false
submodules: recursive
- name: Install cached dist
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist
# Fetch artifacts from scratch-storage
- name: Fetch artifacts
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
# This is a harmless no-op for GitHub Releases, hosting for that happens in "announce"
- id: host
shell: bash
run: |
dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
echo "artifacts uploaded and released successfully"
cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
# Overwrite the previous copy
name: artifacts-dist-manifest
path: dist-manifest.json
custom-publish-pypi:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-pypi.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"id-token": "write"
"packages": "write"
custom-publish-wasm:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-wasm.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"contents": "read"
"id-token": "write"
"packages": "write"
# Create a GitHub Release while uploading all files to it
announce:
needs:
- plan
- host
- custom-publish-pypi
- custom-publish-wasm
# use "always() && ..." to allow us to wait for all publish jobs while
# still allowing individual publish jobs to skip themselves (for prereleases).
# "host" however must run to completion, no skipping allowed!
if: ${{ always() && needs.host.result == 'success' && (needs.custom-publish-pypi.result == 'skipped' || needs.custom-publish-pypi.result == 'success') && (needs.custom-publish-wasm.result == 'skipped' || needs.custom-publish-wasm.result == 'success') }}
runs-on: "depot-ubuntu-latest-4"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: false
submodules: recursive
# Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts"
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e
with:
pattern: artifacts-*
path: artifacts
merge-multiple: true
- name: Cleanup
run: |
# Remove the granular manifests
rm -f artifacts/*-dist-manifest.json
- name: Create GitHub Release
env:
PRERELEASE_FLAG: "${{ fromJson(needs.host.outputs.val).announcement_is_prerelease && '--prerelease' || '' }}"
ANNOUNCEMENT_TITLE: "${{ fromJson(needs.host.outputs.val).announcement_title }}"
ANNOUNCEMENT_BODY: "${{ fromJson(needs.host.outputs.val).announcement_github_body }}"
RELEASE_COMMIT: "${{ github.sha }}"
run: |
# Write and read notes from a file to avoid quoting breaking things
echo "$ANNOUNCEMENT_BODY" > $RUNNER_TEMP/notes.txt
gh release create "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --title "$ANNOUNCEMENT_TITLE" --notes-file "$RUNNER_TEMP/notes.txt" artifacts/*
custom-notify-dependents:
needs:
- plan
- announce
uses: ./.github/workflows/notify-dependents.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-docs:
needs:
- plan
- announce
uses: ./.github/workflows/publish-docs.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-playground:
needs:
- plan
- announce
uses: ./.github/workflows/publish-playground.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit

304
.github/workflows/ruff.yaml vendored Normal file
View File

@@ -0,0 +1,304 @@
name: "[ruff] Release"
on:
create:
tags:
- v*
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.7" # to build abi3 wheels
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
RUSTUP_MAX_RETRIES: 10
jobs:
macos-x86_64:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
- name: Build wheels - x86_64
uses: messense/maturin-action@v1
with:
target: x86_64
args: --release --out dist --sdist
maturin-version: "v0.13.0"
- name: Install built wheel - x86_64
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
macos-universal:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
- name: Build wheels - universal2
uses: messense/maturin-action@v1
with:
args: --release --universal2 --out dist
maturin-version: "v0.13.0"
- name: Install built wheel - universal2
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
windows:
runs-on: windows-latest
strategy:
matrix:
target: [x64, x86]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.target }}
- name: Install Rust toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
default: true
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
args: --release --out dist
maturin-version: "v0.13.0"
- name: Install built wheel
shell: bash
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
linux:
runs-on: ubuntu-latest
strategy:
matrix:
target: [x86_64, i686]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist
maturin-version: "v0.13.0"
- name: Install built wheel
if: matrix.target == 'x86_64'
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
linux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
target: [aarch64, armv7, s390x, ppc64le, ppc64]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --no-default-features --release --out dist
maturin-version: "v0.13.0"
- uses: uraimo/run-on-arch-action@v2.0.5
if: matrix.target != 'ppc64'
name: Install built wheel
with:
arch: ${{ matrix.target }}
distro: ubuntu20.04
githubToken: ${{ github.token }}
install: |
apt-get update
apt-get install -y --no-install-recommends python3 python3-pip
pip3 install -U pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
musllinux:
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --out dist
maturin-version: "v0.13.0"
- name: Install built wheel
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@v3
with:
image: alpine:latest
options: -v ${{ github.workspace }}:/io -w /io
run: |
apk add py3-pip
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links /io/dist/ --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
musllinux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-musl
arch: aarch64
- target: armv7-unknown-linux-musleabihf
arch: armv7
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build wheels
uses: messense/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --out dist
maturin-version: "v0.13.0"
- uses: uraimo/run-on-arch-action@master
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}
distro: alpine_latest
githubToken: ${{ github.token }}
install: |
apk add py3-pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
pypy:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
target: [x86_64, aarch64]
python-version:
- "3.7"
- "3.8"
- "3.9"
exclude:
- os: macos-latest
target: aarch64
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: pypy${{ matrix.python-version }}
- name: Build wheels
uses: messense/maturin-action@v1
with:
maturin-version: "v0.13.0"
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist -i pypy${{ matrix.python-version }}
- name: Install built wheel
if: matrix.target == 'x86_64'
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
- name: Upload wheels
uses: actions/upload-artifact@v2
with:
name: wheels
path: dist
release:
name: Release
runs-on: ubuntu-latest
needs:
- macos-universal
- macos-x86_64
- windows
- linux
- linux-cross
- musllinux
- musllinux-cross
- pypy
if: "startsWith(github.ref, 'refs/tags/')"
steps:
- uses: actions/download-artifact@v2
with:
name: wheels
- uses: actions/setup-python@v4
- name: Publish to PyPi
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.RUFF_TOKEN }}
run: |
pip install --upgrade twine
twine upload --skip-existing *

View File

@@ -1,83 +0,0 @@
name: Sync typeshed
on:
workflow_dispatch:
schedule:
# Run on the 1st and the 15th of every month:
- cron: "0 0 1,15 * *"
env:
FORCE_COLOR: 1
GH_TOKEN: ${{ github.token }}
jobs:
sync:
name: Sync typeshed
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
name: Checkout Ruff
with:
path: ruff
persist-credentials: true
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
name: Checkout typeshed
with:
repository: python/typeshed
path: typeshed
persist-credentials: false
- name: Setup git
run: |
git config --global user.name typeshedbot
git config --global user.email '<>'
- name: Sync typeshed
id: sync
run: |
rm -rf ruff/crates/red_knot_vendored/vendor/typeshed
mkdir ruff/crates/red_knot_vendored/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_vendored/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_vendored/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_vendored/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_vendored/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_vendored/vendor/typeshed/source_commit.txt
- name: Commit the changes
id: commit
if: ${{ steps.sync.outcome == 'success' }}
run: |
cd ruff
git checkout -b typeshedbot/sync-typeshed
git add .
git diff --staged --quiet || git commit -m "Sync typeshed. Source commit: https://github.com/python/typeshed/commit/$(git -C ../typeshed rev-parse HEAD)"
- name: Create a PR
if: ${{ steps.sync.outcome == 'success' && steps.commit.outcome == 'success' }}
run: |
cd ruff
git push --force origin typeshedbot/sync-typeshed
gh pr list --repo "$GITHUB_REPOSITORY" --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
create-issue-on-failure:
name: Create an issue if the typeshed sync failed
runs-on: ubuntu-latest
needs: [sync]
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.sync.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "red-knot"],
})

19
.github/zizmor.yml vendored
View File

@@ -1,19 +0,0 @@
# Configuration for the zizmor static analysis tool, run via pre-commit in CI
# https://woodruffw.github.io/zizmor/configuration/
#
# TODO: can we remove the ignores here so that our workflows are more secure?
rules:
dangerous-triggers:
ignore:
- pr-comment.yaml
cache-poisoning:
ignore:
- build-docker.yml
- publish-playground.yml
excessive-permissions:
# it's hard to test what the impact of removing these ignores would be
# without actually running the release workflow...
ignore:
- build-docker.yml
- publish-playground.yml
- publish-docs.yml

52
.gitignore vendored
View File

@@ -1,37 +1,6 @@
# Benchmarking cpython (CONTRIBUTING.md)
crates/ruff_linter/resources/test/cpython
# generate_mkdocs.py
mkdocs.generated.yml
# check_ecosystem.py
ruff-old
github_search*.jsonl
# update_schemastore.py
schemastore
# `maturin develop` and ecosystem_all_check.sh
.venv*
# Formatter debugging (crates/ruff_python_formatter/README.md)
scratch.*
# Created by `perf` (CONTRIBUTING.md)
perf.data
perf.data.old
# Created by `flamegraph` (CONTRIBUTING.md)
flamegraph.svg
# Additional target directories that don't invalidate the main compile cache when changing linker settings,
# e.g. `CARGO_TARGET_DIR=target-maturin maturin build --release --strip` or
# `CARGO_TARGET_DIR=target-llvm-lines RUSTFLAGS="-Csymbol-mangling-version=v0" cargo llvm-lines -p ruff --lib`
/target*
# samply profiles
profile.json
# tracing-flame traces
tracing.folded
tracing-flamechart.svg
tracing-flamegraph.svg
# insta
*.rs.pending-snap
# Local cache
.ruff_cache
resources/test/cpython
###
# Rust.gitignore
@@ -104,7 +73,6 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
repos/
# Translations
*.mo
@@ -213,17 +181,3 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/
.vimspector.json
# Visual Studio Code
.vscode/
# VIM
.*.sw?
.sw?
# Custom re-inclusions for the resolver test cases
!crates/ruff_python_resolver/resources/test/airflow/venv/
!crates/ruff_python_resolver/resources/test/airflow/venv/lib
!crates/ruff_python_resolver/resources/test/airflow/venv/lib/python3.11/site-packages/_watchdog_fsevents.cpython-311-darwin.so
!crates/ruff_python_resolver/resources/test/airflow/venv/lib/python3.11/site-packages/orjson/orjson.cpython-311-darwin.so

View File

@@ -1 +0,0 @@
!/.github/

View File

@@ -1,31 +0,0 @@
# default to true for all rules
default: true
# MD007/unordered-list-indent
MD007:
indent: 4
# MD033/no-inline-html
MD033: false
# MD041/first-line-h1
MD041: false
# MD013/line-length
MD013: false
# MD014/commands-show-output
MD014: false
# MD024/no-duplicate-heading
MD024:
# Allow when nested under different parents e.g. CHANGELOG.md
siblings_only: true
# MD046/code-block-style
#
# Ignore this because it conflicts with the code block style used in content
# tabs of mkdocs-material which is to add a blank line after the content title.
#
# Ref: https://github.com/astral-sh/ruff/pull/15011#issuecomment-2544790854
MD046: false

View File

@@ -1,130 +1,10 @@
fail_fast: false
exclude: |
(?x)^(
.github/workflows/release.yml|
crates/red_knot_vendored/vendor/.*|
crates/red_knot_project/resources/.*|
crates/ruff_benchmark/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff_notebook/resources/.*|
crates/ruff_server/resources/.*|
crates/ruff/resources/.*|
crates/ruff_python_formatter/resources/.*|
crates/ruff_python_formatter/tests/snapshots/.*|
crates/ruff_python_resolver/resources/.*|
crates/ruff_python_resolver/tests/snapshots/.*
)$
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.128
hooks:
- id: check-merge-conflict
- id: ruff
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.24.1
rev: v0.10.1
hooks:
- id: validate-pyproject
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.22
hooks:
- id: mdformat
additional_dependencies:
- mdformat-mkdocs==4.0.0
- mdformat-footnote==0.1.1
exclude: |
(?x)^(
docs/formatter/black\.md
| docs/\w+\.md
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.44.0
hooks:
- id: markdownlint-fix
exclude: |
(?x)^(
docs/formatter/black\.md
| docs/\w+\.md
)$
- repo: https://github.com/adamchainz/blacken-docs
rev: 1.19.1
hooks:
- id: blacken-docs
args: ["--pyi", "--line-length", "130"]
files: '^crates/.*/resources/mdtest/.*\.md'
exclude: |
(?x)^(
.*?invalid(_.+)*_syntax\.md
)$
additional_dependencies:
- black==25.1.0
- repo: https://github.com/crate-ci/typos
rev: v1.31.1
hooks:
- id: typos
- repo: local
hooks:
- id: cargo-fmt
name: cargo fmt
entry: cargo fmt --
language: system
types: [rust]
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.6
hooks:
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
types_or: [python, pyi]
require_serial: true
# Prettier
- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.5.3
hooks:
- id: prettier
types: [yaml]
# zizmor detects security vulnerabilities in GitHub Actions workflows.
# Additional configuration for the tool is found in `.github/zizmor.yml`
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.6.0
hooks:
- id: zizmor
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.33.0
hooks:
- id: check-github-workflows
# `actionlint` hook, for verifying correct syntax in GitHub Actions workflows.
# Some additional configuration for `actionlint` can be found in `.github/actionlint.yaml`.
- repo: https://github.com/rhysd/actionlint
rev: v1.7.7
hooks:
- id: actionlint
stages:
# This hook is disabled by default, since it's quite slow.
# To run all hooks *including* this hook, use `uvx pre-commit run -a --hook-stage=manual`.
# To run *just* this hook, use `uvx pre-commit run -a actionlint --hook-stage=manual`.
- manual
args:
- "-ignore=SC2129" # ignorable stylistic lint from shellcheck
- "-ignore=SC2016" # another shellcheck lint: seems to have false positives?
additional_dependencies:
# actionlint has a shellcheck integration which extracts shell scripts in `run:` steps from GitHub Actions
# and checks these with shellcheck. This is arguably its most useful feature,
# but the integration only works if shellcheck is installed
- "github.com/wasilibs/go-shellcheck/cmd/shellcheck@v0.10.0"
ci:
skip: [cargo-fmt, dev-generate-all]

View File

@@ -1,2 +0,0 @@
# Auto-generated by `cargo-dist`.
.github/workflows/release.yml

View File

@@ -1,5 +0,0 @@
{
"recommendations": [
"rust-lang.rust-analyzer"
]
}

View File

@@ -1,6 +0,0 @@
{
"rust-analyzer.check.extraArgs": [
"--all-features"
],
"rust-analyzer.check.command": "clippy",
}

View File

@@ -1,557 +0,0 @@
# Breaking Changes
## 0.11.0
This is a follow-up to release 0.10.0. Because of a mistake in the release process, the `requires-python` inference changes were not included in that release. Ruff 0.11.0 now includes this change as well as the stabilization of the preview behavior for `PGH004`.
- **Changes to how the Python version is inferred when a `target-version` is not specified** ([#16319](https://github.com/astral-sh/ruff/pull/16319))
In previous versions of Ruff, you could specify your Python version with:
- The `target-version` option in a `ruff.toml` file or the `[tool.ruff]` section of a pyproject.toml file.
- The `project.requires-python` field in a `pyproject.toml` file with a `[tool.ruff]` section.
These options worked well in most cases, and are still recommended for fine control of the Python version. However, because of the way Ruff discovers config files, `pyproject.toml` files without a `[tool.ruff]` section would be ignored, including the `requires-python` setting. Ruff would then use the default Python version (3.9 as of this writing) instead, which is surprising when you've attempted to request another version.
In v0.10, config discovery has been updated to address this issue:
- If Ruff finds a `ruff.toml` file without a `target-version`, it will check
for a `pyproject.toml` file in the same directory and respect its
`requires-python` version, even if it does not contain a `[tool.ruff]`
section.
- If Ruff finds a user-level configuration, the `requires-python` field of the closest `pyproject.toml` in a parent directory will take precedence.
- If there is no config file (`ruff.toml`or `pyproject.toml` with a
`[tool.ruff]` section) in the directory of the file being checked, Ruff will
search for the closest `pyproject.toml` in the parent directories and use its
`requires-python` setting.
## 0.10.0
- **Changes to how the Python version is inferred when a `target-version` is not specified** ([#16319](https://github.com/astral-sh/ruff/pull/16319))
Because of a mistake in the release process, the `requires-python` inference changes are not included in this release and instead shipped as part of 0.11.0.
You can find a description of this change in the 0.11.0 section.
- **Updated `TYPE_CHECKING` behavior** ([#16669](https://github.com/astral-sh/ruff/pull/16669))
Previously, Ruff only recognized typechecking blocks that tested the `typing.TYPE_CHECKING` symbol. Now, Ruff recognizes any local variable named `TYPE_CHECKING`. This release also removes support for the legacy `if 0:` and `if False:` typechecking checks. Use a local `TYPE_CHECKING` variable instead.
- **More robust noqa parsing** ([#16483](https://github.com/astral-sh/ruff/pull/16483))
The syntax for both file-level and in-line suppression comments has been unified and made more robust to certain errors. In most cases, this will result in more suppression comments being read by Ruff, but there are a few instances where previously read comments will now log an error to the user instead. Please refer to the documentation on [_Error suppression_](https://docs.astral.sh/ruff/linter/#error-suppression) for the full specification.
- **Avoid unnecessary parentheses around with statements with a single context manager and a trailing comment** ([#14005](https://github.com/astral-sh/ruff/pull/14005))
This change fixes a bug in the formatter where it introduced unnecessary parentheses around with statements with a single context manager and a trailing comment. This change may result in a change in formatting for some users.
- **Bump alpine default tag to 3.21 for derived Docker images** ([#16456](https://github.com/astral-sh/ruff/pull/16456))
Alpine 3.21 was released in Dec 2024 and is used in the official Alpine-based Python images. Now the ruff:alpine image will use 3.21 instead of 3.20 and ruff:alpine3.20 will no longer be updated.
- **\[`unsafe-markup-use`\]: `RUF035` has been recoded to `S704`** ([#15957](https://github.com/astral-sh/ruff/pull/15957))
## 0.9.0
Ruff now formats your code according to the 2025 style guide. As a result, your code might now get formatted differently. See the [changelog](./CHANGELOG.md#090) for a detailed list of changes.
## 0.8.0
- **Default to Python 3.9**
Ruff now defaults to Python 3.9 instead of 3.8 if no explicit Python version is configured using [`ruff.target-version`](https://docs.astral.sh/ruff/settings/#target-version) or [`project.requires-python`](https://packaging.python.org/en/latest/guides/writing-pyproject-toml/#python-requires) ([#13896](https://github.com/astral-sh/ruff/pull/13896))
- **Changed location of `pydoclint` diagnostics**
[`pydoclint`](https://docs.astral.sh/ruff/rules/#pydoclint-doc) diagnostics now point to the first-line of the problematic docstring. Previously, this was not the case.
If you've opted into these preview rules but have them suppressed using
[`noqa`](https://docs.astral.sh/ruff/linter/#error-suppression) comments in
some places, this change may mean that you need to move the `noqa` suppression
comments. Most users should be unaffected by this change.
- **Use XDG (i.e. `~/.local/bin`) instead of the Cargo home directory in the standalone installer**
Previously, Ruff's installer used `$CARGO_HOME` or `~/.cargo/bin` for its target install directory. Now, Ruff will be installed into `$XDG_BIN_HOME`, `$XDG_DATA_HOME/../bin`, or `~/.local/bin` (in that order).
This change is only relevant to users of the standalone Ruff installer (using the shell or PowerShell script). If you installed Ruff using uv or pip, you should be unaffected.
- **Changes to the line width calculation**
Ruff now uses a new version of the [unicode-width](https://github.com/unicode-rs/unicode-width) Rust crate to calculate the line width. In very rare cases, this may lead to lines containing Unicode characters being reformatted, or being considered too long when they were not before ([`E501`](https://docs.astral.sh/ruff/rules/line-too-long/)).
## 0.7.0
- The pytest rules `PT001` and `PT023` now default to omitting the decorator parentheses when there are no arguments
([#12838](https://github.com/astral-sh/ruff/pull/12838), [#13292](https://github.com/astral-sh/ruff/pull/13292)).
This was a change that we attempted to make in Ruff v0.6.0, but only partially made due to an error on our part.
See the [blog post](https://astral.sh/blog/ruff-v0.7.0) for more details.
- The `useless-try-except` rule (in our `tryceratops` category) has been recoded from `TRY302` to
`TRY203` ([#13502](https://github.com/astral-sh/ruff/pull/13502)). This ensures Ruff's code is consistent with
the same rule in the [`tryceratops`](https://github.com/guilatrova/tryceratops) linter.
- The `lint.allow-unused-imports` setting has been removed ([#13677](https://github.com/astral-sh/ruff/pull/13677)). Use
[`lint.pyflakes.allow-unused-imports`](https://docs.astral.sh/ruff/settings/#lint_pyflakes_allowed-unused-imports)
instead.
## 0.6.0
- Detect imports in `src` layouts by default for `isort` rules ([#12848](https://github.com/astral-sh/ruff/pull/12848))
- The pytest rules `PT001` and `PT023` now default to omitting the decorator parentheses when there are no arguments ([#12838](https://github.com/astral-sh/ruff/pull/12838)).
- Lint and format Jupyter Notebook by default ([#12878](https://github.com/astral-sh/ruff/pull/12878)).
You can disable specific rules for notebooks using [`per-file-ignores`](https://docs.astral.sh/ruff/settings/#lint_per-file-ignores):
```toml
[tool.ruff.lint.per-file-ignores]
"*.ipynb" = ["E501"] # disable line-too-long in notebooks
```
If you'd prefer to either only lint or only format Jupyter Notebook files, you can use the
section-specific `exclude` option to do so. For example, the following would only lint Jupyter
Notebook files and not format them:
```toml
[tool.ruff.format]
exclude = ["*.ipynb"]
```
And, conversely, the following would only format Jupyter Notebook files and not lint them:
```toml
[tool.ruff.lint]
exclude = ["*.ipynb"]
```
You can completely disable Jupyter Notebook support by updating the [`extend-exclude`](https://docs.astral.sh/ruff/settings/#extend-exclude) setting:
```toml
[tool.ruff]
extend-exclude = ["*.ipynb"]
```
## 0.5.0
- Follow the XDG specification to discover user-level configurations on macOS (same as on other Unix platforms)
- Selecting `ALL` now excludes deprecated rules
- The released archives now include an extra level of nesting, which can be removed with `--strip-components=1` when untarring.
- The release artifact's file name no longer includes the version tag. This enables users to install via `/latest` URLs on GitHub.
## 0.3.0
### Ruff 2024.2 style
The formatter now formats code according to the Ruff 2024.2 style guide. Read the [changelog](./CHANGELOG.md#030) for a detailed list of stabilized style changes.
### `isort`: Use one blank line after imports in typing stub files ([#9971](https://github.com/astral-sh/ruff/pull/9971))
Previously, Ruff used one or two blank lines (or the number configured by `isort.lines-after-imports`) after imports in typing stub files (`.pyi` files).
The [typing style guide for stubs](https://typing.readthedocs.io/en/latest/source/stubs.html#style-guide) recommends using at most 1 blank line for grouping.
As of this release, `isort` now always uses one blank line after imports in stub files, the same as the formatter.
### `build` is no longer excluded by default ([#10093](https://github.com/astral-sh/ruff/pull/10093))
Ruff maintains a list of directories and files that are excluded by default. This list now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git`
- `.git-rewrite`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `dist`
- `node_modules`
- `site-packages`
- `venv`
Previously, the `build` directory was included in this list. However, the `build` directory tends to be a not-unpopular directory
name, and excluding it by default caused confusion. Ruff now no longer excludes `build` except if it is excluded by a `.gitignore` file
or because it is listed in `extend-exclude`.
### `--format` is no longer a valid `rule` or `linter` command option
Previously, `ruff rule` and `ruff linter` accepted the `--format <FORMAT>` option as an alias for `--output-format`. Ruff no longer
supports this alias. Please use `ruff rule --output-format <FORMAT>` and `ruff linter --output-format <FORMAT>` instead.
## 0.1.9
### `site-packages` is now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))
Ruff maintains a list of default exclusions, which now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git-rewrite`
- `.git`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `build`
- `dist`
- `node_modules`
- `site-packages`
- `venv`
Previously, the `site-packages` directory was not excluded by default. While `site-packages` tends
to be excluded anyway by virtue of the `.venv` exclusion, this may not be the case when using Ruff
from VS Code outside a virtual environment.
## 0.1.0
### The deprecated `format` setting has been removed
Ruff previously used the `format` setting, `--format` CLI option, and `RUFF_FORMAT` environment variable to
configure the output format of the CLI. This usage was deprecated in `v0.0.291` — the `format` setting is now used
to control Ruff's code formatting. As of this release:
- The `format` setting cannot be used to configure the output format, use `output-format` instead
- The `RUFF_FORMAT` environment variable is ignored, use `RUFF_OUTPUT_FORMAT` instead
- The `--format` option has been removed from `ruff check`, use `--output-format` instead
### Unsafe fixes are not applied by default ([#7769](https://github.com/astral-sh/ruff/pull/7769))
Ruff labels fixes as "safe" and "unsafe". The meaning and intent of your code will be retained when applying safe
fixes, but the meaning could be changed when applying unsafe fixes. Previously, unsafe fixes were always displayed
and applied when fixing was enabled. Now, unsafe fixes are hidden by default and not applied. The `--unsafe-fixes`
flag or `unsafe-fixes` configuration option can be used to enable unsafe fixes.
See the [docs](https://docs.astral.sh/ruff/configuration/#fix-safety) for details.
### Remove formatter-conflicting rules from the default rule set ([#7900](https://github.com/astral-sh/ruff/pull/7900))
Previously, Ruff enabled all implemented rules in Pycodestyle (`E`) by default. Ruff now only includes the
Pycodestyle prefixes `E4`, `E7`, and `E9` to exclude rules that conflict with automatic formatters. Consequently,
the stable rule set no longer includes `line-too-long` (`E501`) and `mixed-spaces-and-tabs` (`E101`). Other
excluded Pycodestyle rules include whitespace enforcement in `E1` and `E2`; these rules are currently in preview, and are already omitted by default.
This change only affects those using Ruff under its default rule set. Users that include `E` in their `select` will experience no change in behavior.
## 0.0.288
### Remove support for emoji identifiers ([#7212](https://github.com/astral-sh/ruff/pull/7212))
Previously, Ruff supported non-standards-compliant emoji identifiers such as `📦 = 1`.
We decided to remove this non-standard language extension. Ruff now reports syntax errors for invalid emoji identifiers in your code, the same as CPython.
### Improved GitLab fingerprints ([#7203](https://github.com/astral-sh/ruff/pull/7203))
GitLab uses fingerprints to identify new, existing, or fixed violations. Previously, Ruff included the violation's position in the fingerprint. Using the location has the downside that changing any code before the violation causes the fingerprint to change, resulting in GitLab reporting one fixed and one new violation even though it is a pre-existing violation.
Ruff now uses a more stable location-agnostic fingerprint to minimize that existing violations incorrectly get marked as fixed and re-reported as new violations.
Expect GitLab to report each pre-existing violation in your project as fixed and a new violation in your Ruff upgrade PR.
## 0.0.283 / 0.284
### The target Python version now defaults to 3.8 instead of 3.10 ([#6397](https://github.com/astral-sh/ruff/pull/6397))
Previously, when a target Python version was not specified, Ruff would use a default of Python 3.10. However, it is safer to default to an _older_ Python version to avoid assuming the availability of new features. We now default to the oldest supported Python version which is currently Python 3.8.
(We still support Python 3.7 but since [it has reached EOL](https://devguide.python.org/versions/#unsupported-versions) we've decided not to make it the default here.)
Note this change was announced in 0.0.283 but not active until 0.0.284.
## 0.0.277
### `.ipynb_checkpoints`, `.pyenv`, `.pytest_cache`, and `.vscode` are now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))
Ruff maintains a list of default exclusions, which now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git`
- `.git-rewrite`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `build`
- `dist`
- `node_modules`
- `venv`
Previously, the `.ipynb_checkpoints`, `.pyenv`, `.pytest_cache`, and `.vscode` directories were not
excluded by default. This change brings Ruff's default exclusions in line with other tools like
Black.
## 0.0.276
### The `keep-runtime-typing` setting has been reinstated ([#5470](https://github.com/astral-sh/ruff/pull/5470))
The `keep-runtime-typing` setting has been reinstated with revised semantics. This setting was
removed in [#4427](https://github.com/astral-sh/ruff/pull/4427), as it was equivalent to ignoring
the `UP006` and `UP007` rules via Ruff's standard `ignore` mechanism.
Taking `UP006` (rewrite `List[int]` to `list[int]`) as an example, the setting now behaves as
follows:
- On Python 3.7 and Python 3.8, setting `keep-runtime-typing = true` will cause Ruff to ignore
`UP006` violations, even if `from __future__ import annotations` is present in the file.
While such annotations are valid in Python 3.7 and Python 3.8 when combined with
`from __future__ import annotations`, they aren't supported by libraries like Pydantic and
FastAPI, which rely on runtime type checking.
- On Python 3.9 and above, the setting has no effect, as `list[int]` is a valid type annotation,
and libraries like Pydantic and FastAPI support it without issue.
In short: `keep-runtime-typing` can be used to ensure that Ruff doesn't introduce type annotations
that are not supported at runtime by the current Python version, which are unsupported by libraries
like Pydantic and FastAPI.
Note that this is not a breaking change, but is included here to complement the previous removal
of `keep-runtime-typing`.
## 0.0.268
### The `keep-runtime-typing` setting has been removed ([#4427](https://github.com/astral-sh/ruff/pull/4427))
Enabling the `keep-runtime-typing` option, located under the `pyupgrade` section, is equivalent
to ignoring the `UP006` and `UP007` rules via Ruff's standard `ignore` mechanism. As there's no
need for a dedicated setting to disable these rules, the `keep-runtime-typing` option has been
removed.
## 0.0.267
### `update-check` is no longer a valid configuration option ([#4313](https://github.com/astral-sh/ruff/pull/4313))
The `update-check` functionality was deprecated in [#2530](https://github.com/astral-sh/ruff/pull/2530),
in that the behavior itself was removed, and Ruff was changed to warn when that option was enabled.
Now, Ruff will throw an error when `update-check` is provided via a configuration file (e.g.,
`update-check = false`) or through the command-line, since it has no effect. Users should remove
this option from their configuration.
## 0.0.265
### `--fix-only` now exits with a zero exit code, unless `--exit-non-zero-on-fix` is specified ([#4146](https://github.com/astral-sh/ruff/pull/4146))
Previously, `--fix-only` would exit with a non-zero exit code if any fixes were applied. This
behavior was inconsistent with `--fix`, and further, meant that `--exit-non-zero-on-fix` was
effectively ignored when `--fix-only` was specified.
Now, `--fix-only` will exit with a zero exit code, unless `--exit-non-zero-on-fix` is specified,
in which case it will exit with a non-zero exit code if any fixes were applied.
## 0.0.260
### Fixes are now represented as a list of edits ([#3709](https://github.com/astral-sh/ruff/pull/3709))
Previously, Ruff represented each fix as a single edit, which prohibited Ruff from automatically
fixing violations that required multiple edits across a file. As such, Ruff now represents each
fix as a list of edits.
This primarily affects the JSON API. Ruff's JSON representation used to represent the `fix` field as
a single edit, like so:
```json
{
"message": "Remove unused import: `sys`",
"content": "",
"location": {"row": 1, "column": 0},
"end_location": {"row": 2, "column": 0}
}
```
The updated representation instead includes a list of edits:
```json
{
"message": "Remove unused import: `sys`",
"edits": [
{
"content": "",
"location": {"row": 1, "column": 0},
"end_location": {"row": 2, "column": 0},
}
]
}
```
## 0.0.246
### `multiple-statements-on-one-line-def` (`E704`) was removed ([#2773](https://github.com/astral-sh/ruff/pull/2773))
This rule was introduced in v0.0.245. However, it turns out that pycodestyle and Flake8 ignore this
rule by default, as it is not part of PEP 8. As such, we've removed it from Ruff.
## 0.0.245
### Ruff's public `check` method was removed ([#2709](https://github.com/astral-sh/ruff/pull/2709))
Previously, Ruff exposed a `check` method as a public Rust API. This method was used by few,
if any clients, and was not well documented or supported. As such, it has been removed, with
the intention of adding a stable public API in the future.
## 0.0.238
### `select`, `extend-select`, `ignore`, and `extend-ignore` have new semantics ([#2312](https://github.com/astral-sh/ruff/pull/2312))
Previously, the interplay between `select` and its related options could lead to unexpected
behavior. For example, `ruff --select E501 --ignore ALL` and `ruff --select E501 --extend-ignore ALL`
behaved differently. (See [#2312](https://github.com/astral-sh/ruff/pull/2312) for more
examples.)
When Ruff determines the enabled rule set, it has to reconcile `select` and `ignore` from a variety
of sources, including the current `pyproject.toml`, any inherited `pyproject.toml` files, and the
CLI.
The new semantics are such that Ruff uses the "highest-priority" `select` as the basis for the rule
set, and then applies any `extend-select`, `ignore`, and `extend-ignore` adjustments. CLI options
are given higher priority than `pyproject.toml` options, and the current `pyproject.toml` file is
given higher priority than any inherited `pyproject.toml` files.
`extend-select` and `extend-ignore` are no longer given "top priority"; instead, they merely append
to the `select` and `ignore` lists, as in Flake8.
This change is largely backwards compatible -- most users should experience no change in behavior.
However, as an example of a breaking change, consider the following:
```toml
[tool.ruff]
ignore = ["F401"]
```
Running `ruff --select F` would previously have enabled all `F` rules, apart from `F401`. Now, it
will enable all `F` rules, including `F401`, as the command line's `--select` resets the resolution.
### `remove-six-compat` (`UP016`) has been removed ([#2332](https://github.com/astral-sh/ruff/pull/2332))
The `remove-six-compat` rule has been removed. This rule was only useful for one-time Python 2-to-3
upgrades.
## 0.0.237
### `--explain`, `--clean`, and `--generate-shell-completion` are now subcommands ([#2190](https://github.com/astral-sh/ruff/pull/2190))
`--explain`, `--clean`, and `--generate-shell-completion` are now implemented as subcommands:
```console
ruff . # Still works! And will always work.
ruff check . # New! Also works.
ruff --explain E402 # Still works.
ruff rule E402 # New! Also works. (And preferred.)
# Oops! The command has to come first.
ruff --format json --explain E402 # No longer works.
ruff --explain E402 --format json # Still works!
ruff rule E402 --format json # Works! (And preferred.)
```
This change is largely backwards compatible -- most users should experience
no change in behavior. However, please note the following exceptions:
- Subcommands will now fail when invoked with unsupported arguments, instead
of silently ignoring them. For example, the following will now fail:
```console
ruff --clean --respect-gitignore
```
(the `clean` command doesn't support `--respect-gitignore`.)
- The semantics of `ruff <arg>` have changed slightly when `<arg>` is a valid subcommand.
For example, prior to this release, running `ruff rule` would run `ruff` over a file or
directory called `rule`. Now, `ruff rule` would invoke the `rule` subcommand. This should
only impact projects with files or directories named `rule`, `check`, `explain`, `clean`,
or `generate-shell-completion`.
- Scripts that invoke ruff should supply `--` before any positional arguments.
(The semantics of `ruff -- <arg>` have not changed.)
- `--explain` previously treated `--format grouped` as a synonym for `--format text`.
This is no longer supported; instead, use `--format text`.
## 0.0.226
### `misplaced-comparison-constant` (`PLC2201`) was deprecated in favor of `SIM300` ([#1980](https://github.com/astral-sh/ruff/pull/1980))
These two rules contain (nearly) identical logic. To deduplicate the rule set, we've upgraded
`SIM300` to handle a few more cases, and deprecated `PLC2201` in favor of `SIM300`.
## 0.0.225
### `@functools.cache` rewrites have been moved to a standalone rule (`UP033`) ([#1938](https://github.com/astral-sh/ruff/pull/1938))
Previously, `UP011` handled both `@functools.lru_cache()`-to-`@functools.lru_cache` conversions,
_and_ `@functools.lru_cache(maxsize=None)`-to-`@functools.cache` conversions. The latter has been
moved out to its own rule (`UP033`). As such, some `# noqa: UP011` comments may need to be updated
to reflect the change in rule code.
## 0.0.222
### `--max-complexity` has been removed from the CLI ([#1877](https://github.com/astral-sh/ruff/pull/1877))
The McCabe plugin's `--max-complexity` setting has been removed from the CLI, for consistency with
the treatment of other, similar settings.
To set the maximum complexity, use the `max-complexity` property in your `pyproject.toml` file,
like so:
```toml
[tool.ruff.mccabe]
max-complexity = 10
```
## 0.0.181
### Files excluded by `.gitignore` are now ignored ([#1234](https://github.com/astral-sh/ruff/pull/1234))
Ruff will now avoid checking files that are excluded by `.ignore`, `.gitignore`,
`.git/info/exclude`, and global `gitignore` files. This behavior is powered by the [`ignore`](https://docs.rs/ignore/latest/ignore/struct.WalkBuilder.html#ignore-rules)
crate, and is applied in addition to Ruff's built-in `exclude` system.
To disable this behavior, set `respect-gitignore = false` in your `pyproject.toml` file.
Note that hidden files (i.e., files and directories prefixed with a `.`) are _not_ ignored by
default.
## 0.0.178
### Configuration files are now resolved hierarchically ([#1190](https://github.com/astral-sh/ruff/pull/1190))
`pyproject.toml` files are now resolved hierarchically, such that for each Python file, we find
the first `pyproject.toml` file in its path, and use that to determine its lint settings.
See the [documentation](https://docs.astral.sh/ruff/configuration/#python-file-discovery) for more.

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,5 @@
# Contributor Covenant Code of Conduct
- [Our Pledge](#our-pledge)
- [Our Standards](#our-standards)
- [Enforcement Responsibilities](#enforcement-responsibilities)
- [Scope](#scope)
- [Enforcement](#enforcement)
- [Enforcement Guidelines](#enforcement-guidelines)
- [1. Correction](#1-correction)
- [2. Warning](#2-warning)
- [3. Temporary Ban](#3-temporary-ban)
- [4. Permanent Ban](#4-permanent-ban)
- [Attribution](#attribution)
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
@@ -29,24 +17,24 @@ diverse, inclusive, and healthy community.
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the
overall community
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or
advances of any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email
address, without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
@@ -72,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
<charlie.r.marsh@gmail.com>.
charlie.r.marsh@gmail.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
@@ -118,7 +106,7 @@ Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
@@ -127,12 +115,14 @@ the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available [here](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
For answers to common questions about this code of conduct, see the [FAQ](https://www.contributor-covenant.org/faq).
Translations are available [here](https://www.contributor-covenant.org/translations).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@@ -2,21 +2,12 @@
Welcome! We're happy to have you here. Thank you in advance for your contribution to Ruff.
## The Basics
## The basics
Ruff welcomes contributions in the form of pull requests.
For small changes (e.g., bug fixes), feel free to submit a PR.
For larger changes (e.g., new lint rules, new functionality, new configuration options), consider
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed change.
You can also join us on [Discord](https://discord.com/invite/astral-sh) to discuss your idea with the
community. We've labeled [beginner-friendly tasks](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
in the issue tracker, along with [bugs](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
and [improvements](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted)
that are ready for contributions.
If you have suggestions on how we might improve the contributing documentation, [let us know](https://github.com/astral-sh/ruff/discussions/5693)!
Ruff welcomes contributions in the form of Pull Requests. For small changes (e.g., bug fixes), feel
free to submit a PR. For larger changes (e.g., new lint rules, new functionality, new configuration
options), consider submitting an [Issue](https://github.com/charliermarsh/ruff/issues) outlining
your proposed change.
### Prerequisites
@@ -29,262 +20,92 @@ You'll also need [Insta](https://insta.rs/docs/) to update snapshot tests:
cargo install cargo-insta
```
You'll need [uv](https://docs.astral.sh/uv/getting-started/installation/) (or `pipx` and `pip`) to
run Python utility commands.
You can optionally install pre-commit hooks to automatically run the validation checks
when making a commit:
```shell
uv tool install pre-commit
pre-commit install
```
We recommend [nextest](https://nexte.st/) to run Ruff's test suite (via `cargo nextest run`),
though it's not strictly necessary:
```shell
cargo install cargo-nextest --locked
```
Throughout this guide, any usages of `cargo test` can be replaced with `cargo nextest run`,
if you choose to install `nextest`.
### Development
After cloning the repository, run Ruff locally from the repository root with:
After cloning the repository, run Ruff locally with:
```shell
cargo run -p ruff -- check /path/to/file.py --no-cache
cargo run resources/test/fixtures --no-cache
```
Prior to opening a pull request, ensure that your code has been auto-formatted,
and that it passes both the lint and test validation checks:
Prior to opening a pull request, ensure that your code has been auto-formatted, and that it passes
both the lint and test validation checks:
```shell
cargo clippy --workspace --all-targets --all-features -- -D warnings # Rust linting
RUFF_UPDATE_SCHEMA=1 cargo test # Rust testing and updating ruff.schema.json
uvx pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
cargo +nightly fmt --all # Auto-formatting...
cargo +nightly clippy --all # Linting...
cargo +nightly test --all # Testing...
```
These checks will run on GitHub Actions when you open your pull request, but running them locally
These checks will run on GitHub Actions when you open your Pull Request, but running them locally
will save you time and expedite the merge process.
If you're using VS Code, you can also install the recommended [rust-analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) extension to get these checks while editing.
Note that many code changes also require updating the snapshot tests, which is done interactively
after running `cargo test` like so:
```shell
cargo insta review
```
If your pull request relates to a specific lint rule, include the category and rule code in the
title, as in the following examples:
- \[`flake8-bugbear`\] Avoid false positive for usage after `continue` (`B031`)
- \[`flake8-simplify`\] Detect implicit `else` cases in `needless-bool` (`SIM103`)
- \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
Your pull request will be reviewed by a maintainer, which may involve a few rounds of iteration
Your Pull Request will be reviewed by a maintainer, which may involve a few rounds of iteration
prior to merging.
### Project Structure
Ruff is structured as a monorepo with a [flat crate structure](https://matklad.github.io/2021/08/22/large-rust-workspaces.html),
such that all crates are contained in a flat `crates` directory.
The vast majority of the code, including all lint rules, lives in the `ruff_linter` crate (located
at `crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
At the time of writing, the repository includes the following crates:
- `crates/ruff_linter`: library crate containing all lint rules and the core logic for running them.
If you're working on a rule, this is the crate for you.
- `crates/ruff_benchmark`: binary crate for running micro-benchmarks.
- `crates/ruff_cache`: library crate for caching lint results.
- `crates/ruff`: binary crate containing Ruff's command-line interface.
- `crates/ruff_dev`: binary crate containing utilities used in the development of Ruff itself (e.g.,
`cargo dev generate-all`), see the [`cargo dev`](#cargo-dev) section below.
- `crates/ruff_diagnostics`: library crate for the rule-independent abstractions in the lint
diagnostics APIs.
- `crates/ruff_formatter`: library crate for language agnostic code formatting logic based on an
intermediate representation. The backend for `ruff_python_formatter`.
- `crates/ruff_index`: library crate inspired by `rustc_index`.
- `crates/ruff_macros`: proc macro crate containing macros used by Ruff.
- `crates/ruff_notebook`: library crate for parsing and manipulating Jupyter notebooks.
- `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities.
- `crates/ruff_python_codegen`: library crate containing utilities for generating Python source code.
- `crates/ruff_python_formatter`: library crate implementing the Python formatter. Emits an
intermediate representation for each node, which `ruff_formatter` prints based on the configured
line length.
- `crates/ruff_python_semantic`: library crate containing Python-specific semantic analysis logic,
including Ruff's semantic model. Used to resolve queries like "What import does this variable
refer to?"
- `crates/ruff_python_stdlib`: library crate containing Python-specific standard library data, e.g.
the names of all built-in exceptions and which standard library types are immutable.
- `crates/ruff_python_trivia`: library crate containing Python-specific trivia utilities (e.g.,
for analyzing indentation, newlines, etc.).
- `crates/ruff_python_parser`: library crate containing the Python parser.
- `crates/ruff_wasm`: library crate for exposing Ruff as a WebAssembly module. Powers the
[Ruff Playground](https://play.ruff.rs/).
### Example: Adding a new lint rule
At a high level, the steps involved in adding a new lint rule are as follows:
There are four phases to adding a new lint rule:
1. Determine a name for the new rule as per our [rule naming convention](#rule-naming-convention)
(e.g., `AssertFalse`, as in, "allow `assert False`").
1. Define the rule in `src/checks.rs`.
2. Define the _logic_ for triggering the rule in `src/check_ast.rs` (for AST-based checks),
`src/check_tokens.rs` (for token-based checks), or `src/check_lines.rs` (for text-based checks).
3. Add a test fixture.
4. Update the generated files (documentation and generated code).
1. Create a file for your rule (e.g., `crates/ruff_linter/src/rules/flake8_bugbear/rules/assert_false.rs`).
To define the rule, open up `src/checks.rs`. You'll need to define both a `CheckCode` and
`CheckKind`. As an example, you can grep for `E402` and `ModuleImportNotAtTopOfFile`, and follow the
pattern implemented therein.
1. In that file, define a violation struct (e.g., `pub struct AssertFalse`). You can grep for
`#[derive(ViolationMetadata)]` to see examples.
To trigger the rule, you'll likely want to augment the logic in `src/check_ast.rs`, which defines
the Python AST visitor, responsible for iterating over the abstract syntax tree and collecting
lint-rule violations as it goes. If you need to inspect the AST, you can run `cargo dev print-ast`
with a Python file. Grep for the `Check::new` invocations to understand how other, similar rules
are implemented.
1. In that file, define a function that adds the violation to the diagnostic list as appropriate
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
an `ast::StmtAssert` node).
To add a test fixture, create a file under `resources/test/fixtures`, named to match the `CheckCode`
you defined earlier (e.g., `E402.py`). This file should contain a variety of violations and
non-violations designed to evaluate and demonstrate the behavior of your lint rule. Run Ruff locally
with (e.g.) `cargo run resources/test/fixtures/E402.py --no-cache`. Once you're satisfied with the
output, codify the behavior as a snapshot test by adding a new `testcase` macro to the `mod tests`
section of `src/linter.rs`, like so:
1. Define the logic for invoking the diagnostic in `crates/ruff_linter/src/checkers/ast/analyze` (for
AST-based rules), `crates/ruff_linter/src/checkers/tokens.rs` (for token-based rules),
`crates/ruff_linter/src/checkers/physical_lines.rs` (for text-based rules),
`crates/ruff_linter/src/checkers/filesystem.rs` (for filesystem-based rules), etc. For AST-based rules,
you'll likely want to modify `analyze/statement.rs` (if your rule is based on analyzing
statements, like imports) or `analyze/expression.rs` (if your rule is based on analyzing
expressions, like function calls).
```rust
#[test_case(CheckCode::A001, Path::new("A001.py"); "A001")]
...
```
1. Map the violation struct to a rule code in `crates/ruff_linter/src/codes.rs` (e.g., `B011`). New rules
should be added in `RuleGroup::Preview`.
Then, run `cargo test`. Your test will fail, but you'll be prompted to follow-up with
`cargo insta review`. Accept the generated snapshot, then commit the snapshot file alongside the
rest of your changes.
1. Add proper [testing](#rule-testing-fixtures-and-snapshots) for your rule.
1. Update the generated files (documentation and generated code).
To trigger the violation, you'll likely want to augment the logic in `crates/ruff_linter/src/checkers/ast.rs`
to call your new function at the appropriate time and with the appropriate inputs. The `Checker`
defined therein is a Python AST visitor, which iterates over the AST, building up a semantic model,
and calling out to lint rule analyzer functions as it goes.
If you need to inspect the AST, you can run `cargo dev print-ast` with a Python file. Grep
for the `Diagnostic::new` invocations to understand how other, similar rules are implemented.
Once you're satisfied with your code, add tests for your rule
(see: [rule testing](#rule-testing-fixtures-and-snapshots)), and regenerate the documentation and
associated assets (like our JSON Schema) with `cargo dev generate-all`.
Finally, submit a pull request, and include the category, rule name, and rule code in the title, as
in:
> \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
#### Rule naming convention
Like Clippy, Ruff's rule names should make grammatical and logical sense when read as "allow
${rule}" or "allow ${rule} items", as in the context of suppression comments.
For example, `AssertFalse` fits this convention: it flags `assert False` statements, and so a
suppression comment would be framed as "allow `assert False`".
As such, rule names should...
- Highlight the pattern that is being linted against, rather than the preferred alternative.
For example, `AssertFalse` guards against `assert False` statements.
- _Not_ contain instructions on how to fix the violation, which instead belong in the rule
documentation and the `fix_title`.
- _Not_ contain a redundant prefix, like `Disallow` or `Banned`, which are already implied by the
convention.
When re-implementing rules from other linters, we prioritize adhering to this convention over
preserving the original rule name.
#### Rule testing: fixtures and snapshots
To test rules, Ruff uses snapshots of Ruff's output for a given file (fixture). Generally, there
will be one file per rule (e.g., `E402.py`), and each file will contain all necessary examples of
both violations and non-violations. `cargo insta review` will generate a snapshot file containing
Ruff's output for each fixture, which you can then commit alongside your changes.
Once you've completed the code for the rule itself, you can define tests with the following steps:
1. Add a Python file to `crates/ruff_linter/resources/test/fixtures/[linter]` that contains the code you
want to test. The file name should match the rule name (e.g., `E402.py`), and it should include
examples of both violations and non-violations.
1. Run Ruff locally against your file and verify the output is as expected. Once you're satisfied
with the output (you see the violations you expect, and no others), proceed to the next step.
For example, if you're adding a new rule named `E402`, you would run:
```shell
cargo run -p ruff -- check crates/ruff_linter/resources/test/fixtures/pycodestyle/E402.py --no-cache --preview --select E402
```
**Note:** Only a subset of rules are enabled by default. When testing a new rule, ensure that
you activate it by adding `--select ${rule_code}` to the command.
1. Add the test to the relevant `crates/ruff_linter/src/rules/[linter]/mod.rs` file. If you're contributing
a rule to a pre-existing set, you should be able to find a similar example to pattern-match
against. If you're adding a new linter, you'll need to create a new `mod.rs` file (see,
e.g., `crates/ruff_linter/src/rules/flake8_bugbear/mod.rs`)
1. Run `cargo test`. Your test will fail, but you'll be prompted to follow-up
with `cargo insta review`. Run `cargo insta review`, review and accept the generated snapshot,
then commit the snapshot file alongside the rest of your changes.
1. Run `cargo test` again to ensure that your test passes.
Finally, to update the documentation, run `cargo dev generate-rules-table` from the repo root. To
update the generated prefix map, run `cargo dev generate-check-code-prefix`. Both of these commands
should be run whenever a new check is added to the codebase.
### Example: Adding a new configuration option
Ruff's user-facing settings live in a few different places.
Ruff's user-facing settings live in two places: first, the command-line options defined with
[clap](https://docs.rs/clap/latest/clap/) via the `Cli` struct in `src/main.rs`; and second, the
`Config` struct defined `src/pyproject.rs`, which is responsible for extracting user-defined
settings from a `pyproject.toml` file.
First, the command-line options are defined via the `Args` struct in `crates/ruff/src/args.rs`.
Ultimately, these two sources of configuration are merged into the `Settings` struct defined
in `src/settings.rs`, which is then threaded through the codebase.
Second, the `pyproject.toml` options are defined in `crates/ruff_workspace/src/options.rs` (via the
`Options` struct), `crates/ruff_workspace/src/configuration.rs` (via the `Configuration` struct),
and `crates/ruff_workspace/src/settings.rs` (via the `Settings` struct), which then includes
the `LinterSettings` struct as a field.
To add a new configuration option, you'll likely want to _both_ add a CLI option to `src/main.rs`
_and_ a `pyproject.toml` parameter to `src/pyproject.rs`. If you want to pattern-match against an
existing example, grep for `dummy_variable_rgx`, which defines a regular expression to match against
acceptable unused variables (e.g., `_`).
These represent, respectively: the schema used to parse the `pyproject.toml` file; an internal,
intermediate representation; and the final, internal representation used to power Ruff.
If the new plugin's configuration should be cached between runs, you'll need to add it to the
`Hash` implementation for `Settings` in `src/settings/mod.rs`.
To add a new configuration option, you'll likely want to modify these latter few files (along with
`args.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`dummy_variable_rgx`, which defines a regular expression to match against acceptable unused
variables (e.g., `_`).
You may also want to add the new configuration option to the `flake8-to-ruff` tool, which is
responsible for converting `flake8` configuration files to Ruff's TOML format. This logic
lives in `flake8_to_ruff/src/converter.rs`.
Note that plugin-specific configuration options are defined in their own modules (e.g.,
`Settings` in `crates/ruff_linter/src/flake8_unused_arguments/settings.rs` coupled with
`Flake8UnusedArgumentsOptions` in `crates/ruff_workspace/src/options.rs`).
Finally, regenerate the documentation and generated code with `cargo dev generate-all`.
## MkDocs
To preview any changes to the documentation locally:
1. Install the [Rust toolchain](https://www.rust-lang.org/tools/install).
1. Generate the MkDocs site with:
```shell
uv run --no-project --isolated --with-requirements docs/requirements.txt scripts/generate_mkdocs.py
```
1. Run the development server with:
```shell
# For contributors.
uvx --with-requirements docs/requirements.txt -- mkdocs serve -f mkdocs.public.yml
# For members of the Astral org, which has access to MkDocs Insiders via sponsorship.
uvx --with-requirements docs/requirements-insiders.txt -- mkdocs serve -f mkdocs.insiders.yml
```
The documentation should then be available locally at
[http://127.0.0.1:8000/ruff/](http://127.0.0.1:8000/ruff/).
## Release Process
## Release process
As of now, Ruff has an ad hoc release process: releases are cut with high frequency via GitHub
Actions, which automatically generates the appropriate wheels across architectures and publishes
@@ -292,617 +113,3 @@ them to [PyPI](https://pypi.org/project/ruff/).
Ruff follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software,
even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4).
### Creating a new release
1. Install `uv`: `curl -LsSf https://astral.sh/uv/install.sh | sh`
1. Run `./scripts/release.sh`; this command will:
- Generate a temporary virtual environment with `rooster`
- Generate a changelog entry in `CHANGELOG.md`
- Update versions in `pyproject.toml` and `Cargo.toml`
- Update references to versions in the `README.md` and documentation
- Display contributors for the release
1. The changelog should then be editorialized for consistency
- Often labels will be missing from pull requests they will need to be manually organized into the proper section
- Changes should be edited to be user-facing descriptions, avoiding internal details
1. Highlight any breaking changes in `BREAKING_CHANGES.md`
1. Run `cargo check`. This should update the lock file with new versions.
1. Create a pull request with the changelog and version updates
1. Merge the PR
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yml) with:
- The new version number (without starting `v`)
1. The release workflow will do the following:
1. Build all the assets. If this fails (even though we tested in step 4), we haven't tagged or
uploaded anything, you can restart after pushing a fix. If you just need to rerun the build,
make sure you're [re-running all the failed
jobs](https://docs.github.com/en/actions/managing-workflow-runs/re-running-workflows-and-jobs#re-running-failed-jobs-in-a-workflow) and not just a single failed job.
1. Upload to PyPI.
1. Create and push the Git tag (as extracted from `pyproject.toml`). We create the Git tag only
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/astral-sh/ruff/issues/4468)).
1. Attach artifacts to draft GitHub release
1. Trigger downstream repositories. This can fail non-catastrophically, as we can run any
downstream jobs manually if needed.
1. Verify the GitHub release:
1. The Changelog should match the content of `CHANGELOG.md`
1. Append the contributors from the `scripts/release.sh` script
1. If needed, [update the schemastore](https://github.com/astral-sh/ruff/blob/main/scripts/update_schemastore.py).
1. One can determine if an update is needed when
`git diff old-version-tag new-version-tag -- ruff.schema.json` returns a non-empty diff.
1. Once run successfully, you should follow the link in the output to create a PR.
1. If needed, update the [`ruff-lsp`](https://github.com/astral-sh/ruff-lsp) and
[`ruff-vscode`](https://github.com/astral-sh/ruff-vscode) repositories and follow
the release instructions in those repositories. `ruff-lsp` should always be updated
before `ruff-vscode`.
This step is generally not required for a patch release, but should always be done
for a minor release.
## Ecosystem CI
GitHub Actions will run your changes against a number of real-world projects from GitHub and
report on any linter or formatter differences. You can also run those checks locally via:
```shell
uvx --from ./python/ruff-ecosystem ruff-ecosystem check ruff "./target/debug/ruff"
uvx --from ./python/ruff-ecosystem ruff-ecosystem format ruff "./target/debug/ruff"
```
See the [ruff-ecosystem package](https://github.com/astral-sh/ruff/tree/main/python/ruff-ecosystem) for more details.
## Benchmarking and Profiling
We have several ways of benchmarking and profiling Ruff:
- Our main performance benchmark comparing Ruff with other tools on the CPython codebase
- Microbenchmarks which run the linter or the formatter on individual files. These run on pull requests.
- Profiling the linter on either the microbenchmarks or entire projects
> **Note**
> When running benchmarks, ensure that your CPU is otherwise idle (e.g., close any background
> applications, like web browsers). You may also want to switch your CPU to a "performance"
> mode, if it exists, especially when benchmarking short-lived processes.
### CPython Benchmark
First, clone [CPython](https://github.com/python/cpython). It's a large and diverse Python codebase,
which makes it a good target for benchmarking.
```shell
git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff_linter/resources/test/cpython
```
Install `hyperfine`:
```shell
cargo install hyperfine
```
To benchmark the release build:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ -e"
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
Time (mean ± σ): 293.8 ms ± 3.2 ms [User: 2384.6 ms, System: 90.3 ms]
Range (min … max): 289.9 ms … 301.6 ms 10 runs
Benchmark 2: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/
Time (mean ± σ): 48.0 ms ± 3.1 ms [User: 65.2 ms, System: 124.7 ms]
Range (min … max): 45.0 ms … 66.7 ms 62 runs
Summary
'./target/release/ruff ./crates/ruff_linter/resources/test/cpython/' ran
6.12 ± 0.41 times faster than './target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache'
```
To benchmark against the ecosystem's existing tools:
```shell
hyperfine --ignore-failure --warmup 5 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"pyflakes crates/ruff_linter/resources/test/cpython" \
"autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython" \
"pycodestyle crates/ruff_linter/resources/test/cpython" \
"flake8 crates/ruff_linter/resources/test/cpython"
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
Time (mean ± σ): 294.3 ms ± 3.3 ms [User: 2467.5 ms, System: 89.6 ms]
Range (min … max): 291.1 ms … 302.8 ms 10 runs
Warning: Ignoring non-zero exit code.
Benchmark 2: pyflakes crates/ruff_linter/resources/test/cpython
Time (mean ± σ): 15.786 s ± 0.143 s [User: 15.560 s, System: 0.214 s]
Range (min … max): 15.640 s … 16.157 s 10 runs
Warning: Ignoring non-zero exit code.
Benchmark 3: autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython
Time (mean ± σ): 6.175 s ± 0.169 s [User: 54.102 s, System: 1.057 s]
Range (min … max): 5.950 s … 6.391 s 10 runs
Benchmark 4: pycodestyle crates/ruff_linter/resources/test/cpython
Time (mean ± σ): 46.921 s ± 0.508 s [User: 46.699 s, System: 0.202 s]
Range (min … max): 46.171 s … 47.863 s 10 runs
Warning: Ignoring non-zero exit code.
Benchmark 5: flake8 crates/ruff_linter/resources/test/cpython
Time (mean ± σ): 12.260 s ± 0.321 s [User: 102.934 s, System: 1.230 s]
Range (min … max): 11.848 s … 12.933 s 10 runs
Warning: Ignoring non-zero exit code.
Summary
'./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache' ran
20.98 ± 0.62 times faster than 'autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython'
41.66 ± 1.18 times faster than 'flake8 crates/ruff_linter/resources/test/cpython'
53.64 ± 0.77 times faster than 'pyflakes crates/ruff_linter/resources/test/cpython'
159.43 ± 2.48 times faster than 'pycodestyle crates/ruff_linter/resources/test/cpython'
```
To benchmark a subset of rules, e.g. `LineTooLong` and `DocLineTooLong`:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
```
You can run `uv venv --project ./scripts/benchmarks`, activate the venv and then run `uv sync --project ./scripts/benchmarks` to create a working environment for the
above. All reported benchmarks were computed using the versions specified by
`./scripts/benchmarks/pyproject.toml` on Python 3.11.
To benchmark Pylint, remove the following files from the CPython repository:
```shell
rm Lib/test/bad_coding.py \
Lib/test/bad_coding2.py \
Lib/test/bad_getattr.py \
Lib/test/bad_getattr2.py \
Lib/test/bad_getattr3.py \
Lib/test/badcert.pem \
Lib/test/badkey.pem \
Lib/test/badsyntax_3131.py \
Lib/test/badsyntax_future10.py \
Lib/test/badsyntax_future3.py \
Lib/test/badsyntax_future4.py \
Lib/test/badsyntax_future5.py \
Lib/test/badsyntax_future6.py \
Lib/test/badsyntax_future7.py \
Lib/test/badsyntax_future8.py \
Lib/test/badsyntax_future9.py \
Lib/test/badsyntax_pep3120.py \
Lib/test/test_asyncio/test_runners.py \
Lib/test/test_copy.py \
Lib/test/test_inspect.py \
Lib/test/test_typing.py
```
Then, from `crates/ruff_linter/resources/test/cpython`, run: `time pylint -j 0 -E $(git ls-files '*.py')`. This
will execute Pylint with maximum parallelism and only report errors.
To benchmark Pyupgrade, run the following from `crates/ruff_linter/resources/test/cpython`:
```shell
hyperfine --ignore-failure --warmup 5 --prepare "git reset --hard HEAD" \
"find . -type f -name \"*.py\" | xargs -P 0 pyupgrade --py311-plus"
Benchmark 1: find . -type f -name "*.py" | xargs -P 0 pyupgrade --py311-plus
Time (mean ± σ): 30.119 s ± 0.195 s [User: 28.638 s, System: 0.390 s]
Range (min … max): 29.813 s … 30.356 s 10 runs
```
### Microbenchmarks
The `ruff_benchmark` crate benchmarks the linter and the formatter on individual files.
You can run the benchmarks with
```shell
cargo benchmark
```
`cargo benchmark` is an alias for `cargo bench -p ruff_benchmark --bench linter --bench formatter --`
#### Benchmark-driven Development
Ruff uses [Criterion.rs](https://bheisler.github.io/criterion.rs/book/) for benchmarks. You can use
`--save-baseline=<name>` to store an initial baseline benchmark (e.g., on `main`) and then use
`--benchmark=<name>` to compare against that benchmark. Criterion will print a message telling you
if the benchmark improved/regressed compared to that baseline.
```shell
# Run once on your "baseline" code
cargo bench -p ruff_benchmark -- --save-baseline=main
# Then iterate with
cargo bench -p ruff_benchmark -- --baseline=main
```
#### PR Summary
You can use `--save-baseline` and `critcmp` to get a pretty comparison between two recordings.
This is useful to illustrate the improvements of a PR.
```shell
# On main
cargo bench -p ruff_benchmark -- --save-baseline=main
# After applying your changes
cargo bench -p ruff_benchmark -- --save-baseline=pr
critcmp main pr
```
You must install [`critcmp`](https://github.com/BurntSushi/critcmp) for the comparison.
```bash
cargo install critcmp
```
#### Tips
- Use `cargo bench -p ruff_benchmark <filter>` to only run specific benchmarks. For example: `cargo bench -p ruff_benchmark lexer`
to only run the lexer benchmarks.
- Use `cargo bench -p ruff_benchmark -- --quiet` for a more cleaned up output (without statistical relevance)
- Use `cargo bench -p ruff_benchmark -- --quick` to get faster results (more prone to noise)
### Profiling Projects
You can either use the microbenchmarks from above or a project directory for benchmarking. There
are a lot of profiling tools out there,
[The Rust Performance Book](https://nnethercote.github.io/perf-book/profiling.html) lists some
examples.
#### Linux
Install `perf` and build `ruff_benchmark` with the `profiling` profile and then run it with perf
```shell
cargo bench -p ruff_benchmark --no-run --profile=profiling && perf record --call-graph dwarf -F 9999 cargo bench -p ruff_benchmark --profile=profiling -- --profile-time=1
```
You can also use the `ruff_dev` launcher to run `ruff check` multiple times on a repository to
gather enough samples for a good flamegraph (change the 999, the sample rate, and the 30, the number
of checks, to your liking)
```shell
cargo build --bin ruff_dev --profile=profiling
perf record -g -F 999 target/profiling/ruff_dev repeat --repeat 30 --exit-zero --no-cache path/to/cpython > /dev/null
```
Then convert the recorded profile
```shell
perf script -F +pid > /tmp/test.perf
```
You can now view the converted file with [firefox profiler](https://profiler.firefox.com/), with a
more in-depth guide [here](https://profiler.firefox.com/docs/#/./guide-perf-profiling)
An alternative is to convert the perf data to `flamegraph.svg` using
[flamegraph](https://github.com/flamegraph-rs/flamegraph) (`cargo install flamegraph`):
```shell
flamegraph --perfdata perf.data --no-inline
```
#### Mac
Install [`cargo-instruments`](https://crates.io/crates/cargo-instruments):
```shell
cargo install cargo-instruments
```
Then run the profiler with
```shell
cargo instruments -t time --bench linter --profile profiling -p ruff_benchmark -- --profile-time=1
```
- `-t`: Specifies what to profile. Useful options are `time` to profile the wall time and `alloc`
for profiling the allocations.
- You may want to pass an additional filter to run a single test file
Otherwise, follow the instructions from the linux section.
## `cargo dev`
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
utils with it:
- `cargo dev print-ast <file>`: Print the AST of a python file using Ruff's
[Python parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser).
For `if True: pass # comment`, you can see the syntax tree, the byte offsets for start and
stop of each node and also how the `:` token, the comment and whitespace are not represented
anymore:
```text
[
If(
StmtIf {
range: 0..13,
test: Constant(
ExprConstant {
range: 3..7,
value: Bool(
true,
),
kind: None,
},
),
body: [
Pass(
StmtPass {
range: 9..13,
},
),
],
orelse: [],
},
),
]
```
- `cargo dev print-tokens <file>`: Print the tokens that the AST is built upon. Again for
`if True: pass # comment`:
```text
0 If 2
3 True 7
7 Colon 8
9 Pass 13
14 Comment(
"# comment",
) 23
23 Newline 24
```
- `cargo dev print-cst <file>`: Print the CST of a Python file using
[LibCST](https://github.com/Instagram/LibCST), which is used in addition to the RustPython parser
in Ruff. For example, for `if True: pass # comment`, everything, including the whitespace, is represented:
```text
Module {
body: [
Compound(
If(
If {
test: Name(
Name {
value: "True",
lpar: [],
rpar: [],
},
),
body: SimpleStatementSuite(
SimpleStatementSuite {
body: [
Pass(
Pass {
semicolon: None,
},
),
],
leading_whitespace: SimpleWhitespace(
" ",
),
trailing_whitespace: TrailingWhitespace {
whitespace: SimpleWhitespace(
" ",
),
comment: Some(
Comment(
"# comment",
),
),
newline: Newline(
None,
Real,
),
},
},
),
orelse: None,
leading_lines: [],
whitespace_before_test: SimpleWhitespace(
" ",
),
whitespace_after_test: SimpleWhitespace(
"",
),
is_elif: false,
},
),
),
],
header: [],
footer: [],
default_indent: " ",
default_newline: "\n",
has_trailing_newline: true,
encoding: "utf-8",
}
```
- `cargo dev generate-all`: Update `ruff.schema.json`, `docs/configuration.md` and `docs/rules`.
You can also set `RUFF_UPDATE_SCHEMA=1` to update `ruff.schema.json` during `cargo test`.
- `cargo dev generate-cli-help`, `cargo dev generate-docs` and `cargo dev generate-json-schema`:
Update just `docs/configuration.md`, `docs/rules` and `ruff.schema.json` respectively.
- `cargo dev generate-options`: Generate a markdown-compatible table of all `pyproject.toml`
options. Used for <https://docs.astral.sh/ruff/settings/>.
- `cargo dev generate-rules-table`: Generate a markdown-compatible table of all rules. Used for <https://docs.astral.sh/ruff/rules/>.
- `cargo dev round-trip <python file or jupyter notebook>`: Read a Python file or Jupyter Notebook,
parse it, serialize the parsed representation and write it back. Used to check how good our
representation is so that fixes don't rewrite irrelevant parts of a file.
- `cargo dev format_dev`: See ruff_python_formatter README.md
## Subsystems
### Compilation Pipeline
If we view Ruff as a compiler, in which the inputs are paths to Python files and the outputs are
diagnostics, then our current compilation pipeline proceeds as follows:
1. **File discovery**: Given paths like `foo/`, locate all Python files in any specified subdirectories, taking into account our hierarchical settings system and any `exclude` options.
1. **Package resolution**: Determine the "package root" for every file by traversing over its parent directories and looking for `__init__.py` files.
1. **Cache initialization**: For every "package root", initialize an empty cache.
1. **Analysis**: For every file, in parallel:
1. **Cache read**: If the file is cached (i.e., its modification timestamp hasn't changed since it was last analyzed), short-circuit, and return the cached diagnostics.
1. **Tokenization**: Run the lexer over the file to generate a token stream.
1. **Indexing**: Extract metadata from the token stream, such as: comment ranges, `# noqa` locations, `# isort: off` locations, "doc lines", etc.
1. **Token-based rule evaluation**: Run any lint rules that are based on the contents of the token stream (e.g., commented-out code).
1. **Filesystem-based rule evaluation**: Run any lint rules that are based on the contents of the filesystem (e.g., lack of `__init__.py` file in a package).
1. **Logical line-based rule evaluation**: Run any lint rules that are based on logical lines (e.g., stylistic rules).
1. **Parsing**: Run the parser over the token stream to produce an AST. (This consumes the token stream, so anything that relies on the token stream needs to happen before parsing.)
1. **AST-based rule evaluation**: Run any lint rules that are based on the AST. This includes the vast majority of lint rules. As part of this step, we also build the semantic model for the current file as we traverse over the AST. Some lint rules are evaluated eagerly, as we iterate over the AST, while others are evaluated in a deferred manner (e.g., unused imports, since we can't determine whether an import is unused until we've finished analyzing the entire file), after we've finished the initial traversal.
1. **Import-based rule evaluation**: Run any lint rules that are based on the module's imports (e.g., import sorting). These could, in theory, be included in the AST-based rule evaluation phase — they're just separated for simplicity.
1. **Physical line-based rule evaluation**: Run any lint rules that are based on physical lines (e.g., line-length).
1. **Suppression enforcement**: Remove any violations that are suppressed via `# noqa` directives or `per-file-ignores`.
1. **Cache write**: Write the generated diagnostics to the package cache using the file as a key.
1. **Reporting**: Print diagnostics in the specified format (text, JSON, etc.), to the specified output channel (stdout, a file, etc.).
### Import Categorization
To understand Ruff's import categorization system, we first need to define two concepts:
- "Project root": The directory containing the `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file,
discovered by identifying the "closest" such directory for each Python file. (If you're running
via `ruff --config /path/to/pyproject.toml`, then the current working directory is used as the
"project root".)
- "Package root": The top-most directory defining the Python package that includes a given Python
file. To find the package root for a given Python file, traverse up its parent directories until
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't in a subtree
marked as a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
just before that, i.e., the first directory in the package.
For example, given:
```text
my_project
├── pyproject.toml
└── src
└── foo
├── __init__.py
└── bar
├── __init__.py
└── baz.py
```
Then when analyzing `baz.py`, the project root would be the top-level directory (`./my_project`),
and the package root would be `./my_project/src/foo`.
#### Project root
The project root does not have a significant impact beyond that all relative paths within the loaded
configuration file are resolved relative to the project root.
For example, to indicate that `bar` above is a namespace package (it isn't, but let's run with it),
the `pyproject.toml` would list `namespace-packages = ["./src/bar"]`, which would resolve
to `my_project/src/bar`.
The same logic applies when providing a configuration file via `--config`. In that case, the
_current working directory_ is used as the project root, and so all paths in that configuration file
are resolved relative to the current working directory. (As a general rule, we want to avoid relying
on the current working directory as much as possible, to ensure that Ruff exhibits the same behavior
regardless of where and how you invoke it — but that's hard to avoid in this case.)
Additionally, if a `pyproject.toml` file _extends_ another configuration file, Ruff will still use
the directory containing that `pyproject.toml` file as the project root. For example, if
`./my_project/pyproject.toml` contains:
```toml
[tool.ruff]
extend = "/path/to/pyproject.toml"
```
Then Ruff will use `./my_project` as the project root, even though the configuration file extends
`/path/to/pyproject.toml`. As such, if the configuration file at `/path/to/pyproject.toml` contains
any relative paths, they will be resolved relative to `./my_project`.
If a project uses nested configuration files, then Ruff would detect multiple project roots, one for
each configuration file.
#### Package root
The package root is used to determine a file's "module path". Consider, again, `baz.py`. In that
case, `./my_project/src/foo` was identified as the package root, so the module path for `baz.py`
would resolve to `foo.bar.baz` — as computed by taking the relative path from the package root
(inclusive of the root itself). The module path can be thought of as "the path you would use to
import the module" (e.g., `import foo.bar.baz`).
The package root and module path are used to, e.g., convert relative to absolute imports, and for
import categorization, as described below.
#### Import categorization
When sorting and formatting import blocks, Ruff categorizes every import into one of five
categories:
1. **"Future"**: the import is a `__future__` import. That's easy: just look at the name of the
imported module!
1. **"Standard library"**: the import comes from the Python standard library (e.g., `import os`).
This is easy too: we include a list of all known standard library modules in Ruff itself, so it's
a simple lookup.
1. **"Local folder"**: the import is a relative import (e.g., `from .foo import bar`). This is easy
too: just check if the import includes a `level` (i.e., a dot-prefix).
1. **"First party"**: the import is part of the current project. (More on this below.)
1. **"Third party"**: everything else.
The real challenge lies in determining whether an import is first-party — everything else is either
trivial, or (as in the case of third-party) merely defined as "not first-party".
There are three ways in which an import can be categorized as "first-party":
1. **Explicit settings**: the import is marked as such via the `known-first-party` setting. (This
should generally be seen as an escape hatch.)
1. **Same-package**: the imported module is in the same package as the current file. This gets back
to the importance of the "package root" and the file's "module path". Imagine that we're
analyzing `baz.py` above. If `baz.py` contains any imports that appear to come from the `foo`
package (e.g., `from foo import bar` or `import foo.bar`), they'll be classified as first-party
automatically. This check is as simple as comparing the first segment of the current file's
module path to the first segment of the import.
1. **Source roots**: Ruff supports a [`src`](https://docs.astral.sh/ruff/settings/#src) setting, which
sets the directories to scan when identifying first-party imports. The algorithm is
straightforward: given an import, like `import foo`, iterate over the directories enumerated in
the `src` setting and, for each directory, check for the existence of a subdirectory `foo` or a
file `foo.py`.
By default, `src` is set to the project root, along with `"src"` subdirectory in the project root.
This ensures that Ruff supports both flat and "src" layouts out of the box.

5216
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,252 +1,80 @@
[workspace]
members = ["crates/*"]
resolver = "2"
members = [
"flake8_to_ruff",
"ruff_dev",
]
[workspace.package]
[package]
name = "ruff"
version = "0.0.128"
edition = "2021"
rust-version = "1.84"
homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_annotate_snippets = { path = "crates/ruff_annotate_snippets" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db", default-features = false }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_graph = { path = "crates/ruff_graph" }
ruff_index = { path = "crates/ruff_index" }
ruff_linter = { path = "crates/ruff_linter" }
ruff_macros = { path = "crates/ruff_macros" }
ruff_notebook = { path = "crates/ruff_notebook" }
ruff_python_ast = { path = "crates/ruff_python_ast" }
ruff_python_codegen = { path = "crates/ruff_python_codegen" }
ruff_python_formatter = { path = "crates/ruff_python_formatter" }
ruff_python_index = { path = "crates/ruff_python_index" }
ruff_python_literal = { path = "crates/ruff_python_literal" }
ruff_python_parser = { path = "crates/ruff_python_parser" }
ruff_python_semantic = { path = "crates/ruff_python_semantic" }
ruff_python_stdlib = { path = "crates/ruff_python_stdlib" }
ruff_python_trivia = { path = "crates/ruff_python_trivia" }
ruff_server = { path = "crates/ruff_server" }
ruff_source_file = { path = "crates/ruff_source_file" }
ruff_text_size = { path = "crates/ruff_text_size" }
red_knot_vendored = { path = "crates/red_knot_vendored" }
ruff_workspace = { path = "crates/ruff_workspace" }
[lib]
name = "ruff"
red_knot_ide = { path = "crates/red_knot_ide" }
red_knot_project = { path = "crates/red_knot_project", default-features = false }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
red_knot_server = { path = "crates/red_knot_server" }
red_knot_test = { path = "crates/red_knot_test" }
aho-corasick = { version = "1.1.3" }
anstream = { version = "0.6.18" }
anstyle = { version = "1.0.10" }
anyhow = { version = "1.0.80" }
assert_fs = { version = "1.1.0" }
argfile = { version = "0.2.0" }
[dependencies]
annotate-snippets = { version = "0.9.1", features = ["color"] }
anyhow = { version = "1.0.66" }
atty = { version = "0.2.14" }
bincode = { version = "1.3.3" }
bitflags = { version = "2.5.0" }
bstr = { version = "1.9.1" }
cachedir = { version = "0.3.1" }
camino = { version = "1.1.7" }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete_command = { version = "0.6.0" }
clearscreen = { version = "4.0.0" }
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
colored = { version = "3.0.0" }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
compact_str = "0.9.0"
criterion = { version = "0.5.1", default-features = false }
crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" }
dir-test = { version = "0.4.0" }
dunce = { version = "1.0.5" }
drop_bomb = { version = "0.1.5" }
env_logger = { version = "0.11.0" }
etcetera = { version = "0.10.0" }
fern = { version = "0.7.0" }
filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
globwalk = { version = "0.9.1" }
hashbrown = { version = "0.15.0", default-features = false, features = [
"raw-entry",
"equivalent",
"inline-more",
] }
ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" }
imperative = { version = "1.0.4" }
indexmap = { version = "2.6.0" }
indicatif = { version = "0.17.8" }
indoc = { version = "2.0.4" }
insta = { version = "1.35.1" }
insta-cmd = { version = "0.6.0" }
is-macro = { version = "0.3.5" }
is-wsl = { version = "0.4.0" }
itertools = { version = "0.14.0" }
jiff = { version = "0.2.0" }
js-sys = { version = "0.3.69" }
jod-thread = { version = "0.1.2" }
libc = { version = "0.2.153" }
libcst = { version = "1.1.0", default-features = false }
bitflags = { version = "1.3.2" }
chrono = { version = "0.4.21", default-features = false, features = ["clock"] }
clap = { version = "4.0.1", features = ["derive"] }
colored = { version = "2.0.0" }
common-path = { version = "1.0.0" }
dirs = { version = "4.0.0" }
fern = { version = "0.6.1" }
filetime = { version = "0.2.17" }
fnv = { version = "1.0.7" }
glob = { version = "0.3.0" }
itertools = { version = "0.10.5" }
libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "a13ec97dd4eb925bde4d426c6e422582793b260c" }
log = { version = "0.4.17" }
lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed",
] }
matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" }
notify = { version = "8.0.0" }
ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.7.1" }
pretty_assertions = "1.3.0"
proc-macro2 = { version = "1.0.79" }
pyproject-toml = { version = "0.13.4" }
quick-junit = { version = "0.5.0" }
quote = { version = "1.0.23" }
rand = { version = "0.9.0" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
# When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml`
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "87bf6b6c2d5f6479741271da73bd9d30c2580c26" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }
serde-wasm-bindgen = { version = "0.6.4" }
serde_json = { version = "1.0.113" }
serde_test = { version = "1.0.152" }
serde_with = { version = "3.6.0", default-features = false, features = [
"macros",
] }
shellexpand = { version = "3.0.0" }
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
snapbox = { version = "0.6.0", features = [
"diff",
"term-svg",
"cmd",
"examples",
] }
static_assertions = "1.1.0"
strum = { version = "0.27.0", features = ["strum_macros"] }
strum_macros = { version = "0.27.0" }
syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" }
test-case = { version = "3.3.1" }
thiserror = { version = "2.0.0" }
tikv-jemallocator = { version = "0.6.0" }
toml = { version = "0.8.11" }
tracing = { version = "0.1.40" }
tracing-flame = { version = "0.2.0" }
tracing-indicatif = { version = "0.3.6" }
tracing-log = { version = "0.2.0" }
tracing-subscriber = { version = "0.3.18", default-features = false, features = [
"env-filter",
"fmt",
] }
tracing-tree = { version = "0.4.0" }
tryfn = { version = "0.2.1" }
typed-arena = { version = "2.0.2" }
unic-ucd-category = { version = "0.9" }
unicode-ident = { version = "1.0.12" }
unicode-width = { version = "0.2.0" }
unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
"fast-rng",
"macro-diagnostics",
"js",
] }
nohash-hasher = { version = "0.2.0" }
notify = { version = "4.0.17" }
num-bigint = { version = "0.4.3" }
once_cell = { version = "1.16.0" }
path-absolutize = { version = "3.0.14", features = ["once_cell_cache", "use_unix_paths_on_wasm"] }
rayon = { version = "1.5.3" }
regex = { version = "1.6.0" }
ropey = { version = "1.5.0", features = ["cr_lines", "simd"], default-features = false }
rustpython-ast = { features = ["unparse"], git = "https://github.com/RustPython/RustPython.git", rev = "27bf82a2251d7e6ac6cd75e6ad51be12a53d84bb" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "27bf82a2251d7e6ac6cd75e6ad51be12a53d84bb" }
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "27bf82a2251d7e6ac6cd75e6ad51be12a53d84bb" }
serde = { version = "1.0.147", features = ["derive"] }
serde_json = { version = "1.0.87" }
strum = { version = "0.24.1", features = ["strum_macros"] }
strum_macros = { version = "0.24.3" }
textwrap = { version = "0.16.0" }
titlecase = { version = "2.2.1" }
toml = { version = "0.5.9" }
update-informer = { version = "0.5.0", default-features = false, features = ["pypi"], optional = true }
walkdir = { version = "2.3.2" }
wasm-bindgen = { version = "0.2.92" }
wasm-bindgen-test = { version = "0.3.42" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false }
[workspace.metadata.cargo-shear]
ignored = ["getrandom"]
[target.'cfg(not(target_family = "wasm"))'.dependencies]
cacache = { version = "10.0.1" } # uses async-std
clearscreen = { version = "1.0.10" } # uses which
# https://docs.rs/getrandom/0.2.7/getrandom/#webassembly-support
# For (future) wasm-pack support
[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies]
getrandom = { version = "0.2.7", features = ["js"] }
[workspace.lints.rust]
unsafe_code = "warn"
unreachable_pub = "warn"
unexpected_cfgs = { level = "warn", check-cfg = [
"cfg(fuzzing)",
"cfg(codspeed)",
] }
[dev-dependencies]
assert_cmd = { version = "2.0.4" }
criterion = { version = "0.4.0" }
insta = { version = "1.19.1", features = ["yaml"] }
test-case = { version = "2.2.2" }
[workspace.lints.clippy]
pedantic = { level = "warn", priority = -2 }
# Allowed pedantic lints
char_lit_as_u8 = "allow"
collapsible_else_if = "allow"
collapsible_if = "allow"
implicit_hasher = "allow"
map_unwrap_or = "allow"
match_same_arms = "allow"
missing_errors_doc = "allow"
missing_panics_doc = "allow"
module_name_repetitions = "allow"
must_use_candidate = "allow"
similar_names = "allow"
single_match_else = "allow"
too_many_lines = "allow"
needless_continue = "allow" # An explicit continue can be more readable, especially if the alternative is an empty block.
# Without the hashes we run into a `rustfmt` bug in some snapshot tests, see #13250
needless_raw_string_hashes = "allow"
# Disallowed restriction lints
print_stdout = "warn"
print_stderr = "warn"
dbg_macro = "warn"
empty_drop = "warn"
empty_structs_with_brackets = "warn"
exit = "warn"
get_unwrap = "warn"
rc_buffer = "warn"
rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
# nursery rules
redundant_clone = "warn"
debug_assert_with_mut_call = "warn"
unused_peekable = "warn"
# Diagnostics are not actionable: Enable once https://github.com/rust-lang/rust-clippy/issues/13774 is resolved.
large_stack_arrays = "allow"
[features]
default = ["update-informer"]
update-informer = ["dep:update-informer"]
[profile.release]
# Note that we set these explicitly, and these values
# were chosen based on a trade-off between compile times
# and runtime performance[1].
#
# [1]: https://github.com/astral-sh/ruff/pull/9031
panic = "abort"
lto = "thin"
codegen-units = 16
# Some crates don't change as much but benefit more from
# more expensive optimization passes, so we selectively
# decrease codegen-units in some cases.
[profile.release.package.ruff_python_parser]
codegen-units = 1
[profile.release.package.ruff_python_ast]
codegen-units = 1
opt-level = 3
[profile.dev.package.insta]
opt-level = 3
@@ -254,87 +82,6 @@ opt-level = 3
[profile.dev.package.similar]
opt-level = 3
# Reduce complexity of a parser function that would trigger a locals limit in a wasm tool.
# https://github.com/bytecodealliance/wasm-tools/blob/b5c3d98e40590512a3b12470ef358d5c7b983b15/crates/wasmparser/src/limits.rs#L29
[profile.dev.package.ruff_python_parser]
opt-level = 1
# Use the `--profile profiling` flag to show symbols in release mode.
# e.g. `cargo build --profile profiling`
[profile.profiling]
inherits = "release"
debug = 1
# The profile that 'cargo dist' will build with.
[profile.dist]
inherits = "release"
# Config for 'dist'
[workspace.metadata.dist]
# The preferred dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.28.4-prerelease.1"
# CI backends to support
ci = "github"
# The installers to generate for each app
installers = ["shell", "powershell"]
# The archive format to use for windows builds (defaults .zip)
windows-archive = ".zip"
# The archive format to use for non-windows builds (defaults .tar.xz)
unix-archive = ".tar.gz"
# Target platforms to build apps for (Rust target-triple syntax)
targets = [
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"arm-unknown-linux-musleabihf",
"armv7-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabihf",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
]
# Whether to auto-include files like READMEs, LICENSEs, and CHANGELOGs (default true)
auto-includes = false
# Whether dist should create a Github Release or use an existing draft
create-release = true
# Which actions to run on pull requests
pr-run-mode = "skip"
# Whether CI should trigger releases with dispatches instead of tag pushes
dispatch-releases = true
# Which phase dist should use to create the GitHub release
github-release = "announce"
# Whether CI should include auto-generated code to build local artifacts
build-local-artifacts = false
# Local artifacts jobs to run in CI
local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Post-announce jobs to run in CI
post-announce-jobs = [
"./notify-dependents",
"./publish-docs",
"./publish-playground",
]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program
install-updater = false
# Path that installers should place binaries in
install-path = ["$XDG_BIN_HOME/", "$XDG_DATA_HOME/../bin", "~/.local/bin"]
[workspace.metadata.dist.github-custom-runners]
global = "depot-ubuntu-latest-4"
[workspace.metadata.dist.github-action-commits]
"actions/checkout" = "11bd71901bbe5b1630ceea73d27597364c9af683" # v4
"actions/upload-artifact" = "ea165f8d65b6e75b540449e92b4886f43607fa02" # v4.6.2
"actions/download-artifact" = "95815c38cf2ff2164869cbab79da8d1f422bc89e" # v4.2.1
"actions/attest-build-provenance" = "c074443f1aee8d4aeeae555aebba3282517141b2" #v2.2.3
[[bench]]
name = "source_code_locator"
harness = false

View File

@@ -1,38 +0,0 @@
FROM --platform=$BUILDPLATFORM ubuntu AS build
ENV HOME="/root"
WORKDIR $HOME
RUN apt update && apt install -y build-essential curl python3-venv
# Setup zig as cross compiling linker
RUN python3 -m venv $HOME/.venv
RUN .venv/bin/pip install cargo-zigbuild
ENV PATH="$HOME/.venv/bin:$PATH"
# Install rust
ARG TARGETPLATFORM
RUN case "$TARGETPLATFORM" in \
"linux/arm64") echo "aarch64-unknown-linux-musl" > rust_target.txt ;; \
"linux/amd64") echo "x86_64-unknown-linux-musl" > rust_target.txt ;; \
*) exit 1 ;; \
esac
# Update rustup whenever we bump the rust version
COPY rust-toolchain.toml rust-toolchain.toml
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --target $(cat rust_target.txt) --profile minimal --default-toolchain none
ENV PATH="$HOME/.cargo/bin:$PATH"
# Installs the correct toolchain version from rust-toolchain.toml and then the musl target
RUN rustup target add $(cat rust_target.txt)
# Build
COPY crates crates
COPY Cargo.toml Cargo.toml
COPY Cargo.lock Cargo.lock
RUN cargo zigbuild --bin ruff --target $(cat rust_target.txt) --release
RUN cp target/$(cat rust_target.txt)/release/ruff /ruff
# TODO: Optimize binary size, with a version that also works when cross compiling
# RUN strip --strip-all /ruff
FROM scratch
COPY --from=build /ruff /ruff
WORKDIR /io
ENTRYPOINT ["/ruff"]

1305
LICENSE

File diff suppressed because it is too large Load Diff

1480
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +0,0 @@
# Security policy
## Reporting a vulnerability
If you have found a possible vulnerability, please email `security at astral dot sh`.
## Bug bounties
While we sincerely appreciate and encourage reports of suspected security problems, please note that
Astral does not currently run any bug bounty programs.
## Vulnerability disclosures
Critical vulnerabilities will be disclosed via GitHub's
[security advisory](https://github.com/astral-sh/ruff/security) system.

View File

@@ -1,33 +0,0 @@
[files]
# https://github.com/crate-ci/typos/issues/868
extend-exclude = [
"crates/red_knot_vendored/vendor/**/*",
"**/resources/**/*",
"**/snapshots/**/*",
]
[default.extend-words]
"arange" = "arange" # e.g. `numpy.arange`
hel = "hel"
whos = "whos"
spawnve = "spawnve"
ned = "ned"
pn = "pn" # `import panel as pn` is a thing
poit = "poit"
BA = "BA" # acronym for "Bad Allowed", used in testing.
jod = "jod" # e.g., `jod-thread`
Numer = "Numer" # Library name 'NumerBlox' in "Who's Using Ruff?"
[default]
extend-ignore-re = [
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
# Various third party dependencies uses `typ` as struct field names (e.g., lsp_types::LogMessageParams)
"typ",
# TODO: Remove this once the `TYP` redirects are removed from `rule_redirects.rs`
"TYP",
]
[default.extend-identifiers]
"FrIeNdLy" = "FrIeNdLy"

View File

@@ -1,8 +0,0 @@
{
"label": "code style",
"message": "Ruff",
"logoSvg": "<svg width=\"510\" height=\"622\" viewBox=\"0 0 510 622\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\"><path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M206.701 0C200.964 0 196.314 4.64131 196.314 10.3667V41.4667C196.314 47.192 191.663 51.8333 185.927 51.8333H156.843C151.107 51.8333 146.456 56.4746 146.456 62.2V145.133C146.456 150.859 141.806 155.5 136.069 155.5H106.986C101.249 155.5 96.5988 160.141 96.5988 165.867V222.883C96.5988 228.609 91.9484 233.25 86.2118 233.25H57.1283C51.3917 233.25 46.7413 237.891 46.7413 243.617V300.633C46.7413 306.359 42.0909 311 36.3544 311H10.387C4.6504 311 0 315.641 0 321.367V352.467C0 358.192 4.6504 362.833 10.387 362.833H145.418C151.154 362.833 155.804 367.475 155.804 373.2V430.217C155.804 435.942 151.154 440.583 145.418 440.583H116.334C110.597 440.583 105.947 445.225 105.947 450.95V507.967C105.947 513.692 101.297 518.333 95.5601 518.333H66.4766C60.74 518.333 56.0896 522.975 56.0896 528.7V611.633C56.0896 617.359 60.74 622 66.4766 622H149.572C155.309 622 159.959 617.359 159.959 611.633V570.167H201.507C207.244 570.167 211.894 565.525 211.894 559.8V528.7C211.894 522.975 216.544 518.333 222.281 518.333H251.365C257.101 518.333 261.752 513.692 261.752 507.967V476.867C261.752 471.141 266.402 466.5 272.138 466.5H301.222C306.959 466.5 311.609 461.859 311.609 456.133V425.033C311.609 419.308 316.259 414.667 321.996 414.667H351.079C356.816 414.667 361.466 410.025 361.466 404.3V373.2C361.466 367.475 366.117 362.833 371.853 362.833H400.937C406.673 362.833 411.324 358.192 411.324 352.467V321.367C411.324 315.641 415.974 311 421.711 311H450.794C456.531 311 461.181 306.359 461.181 300.633V217.7C461.181 211.975 456.531 207.333 450.794 207.333H420.672C414.936 207.333 410.285 202.692 410.285 196.967V165.867C410.285 160.141 414.936 155.5 420.672 155.5H449.756C455.492 155.5 460.143 150.859 460.143 145.133V114.033C460.143 108.308 464.793 103.667 470.53 103.667H499.613C505.35 103.667 510 99.0253 510 93.3V10.3667C510 4.64132 505.35 0 499.613 0H206.701ZM168.269 440.583C162.532 440.583 157.882 445.225 157.882 450.95V507.967C157.882 513.692 153.231 518.333 147.495 518.333H118.411C112.675 518.333 108.024 522.975 108.024 528.7V559.8C108.024 565.525 112.675 570.167 118.411 570.167H159.959V528.7C159.959 522.975 164.61 518.333 170.346 518.333H199.43C205.166 518.333 209.817 513.692 209.817 507.967V476.867C209.817 471.141 214.467 466.5 220.204 466.5H249.287C255.024 466.5 259.674 461.859 259.674 456.133V425.033C259.674 419.308 264.325 414.667 270.061 414.667H299.145C304.881 414.667 309.532 410.025 309.532 404.3V373.2C309.532 367.475 314.182 362.833 319.919 362.833H349.002C354.739 362.833 359.389 358.192 359.389 352.467V321.367C359.389 315.641 364.039 311 369.776 311H398.859C404.596 311 409.246 306.359 409.246 300.633V269.533C409.246 263.808 404.596 259.167 398.859 259.167H318.88C313.143 259.167 308.493 254.525 308.493 248.8V217.7C308.493 211.975 313.143 207.333 318.88 207.333H347.963C353.7 207.333 358.35 202.692 358.35 196.967V165.867C358.35 160.141 363.001 155.5 368.737 155.5H397.821C403.557 155.5 408.208 150.859 408.208 145.133V114.033C408.208 108.308 412.858 103.667 418.595 103.667H447.678C453.415 103.667 458.065 99.0253 458.065 93.3V62.2C458.065 56.4746 453.415 51.8333 447.678 51.8333H208.778C203.041 51.8333 198.391 56.4746 198.391 62.2V145.133C198.391 150.859 193.741 155.5 188.004 155.5H158.921C153.184 155.5 148.534 160.141 148.534 165.867V222.883C148.534 228.609 143.883 233.25 138.147 233.25H109.063C103.327 233.25 98.6762 237.891 98.6762 243.617V300.633C98.6762 306.359 103.327 311 109.063 311H197.352C203.089 311 207.739 315.641 207.739 321.367V430.217C207.739 435.942 203.089 440.583 197.352 440.583H168.269Z\" fill=\"#D7FF64\"/></svg>",
"logoWidth": 10,
"labelColor": "grey",
"color": "#261230"
}

View File

@@ -1,8 +0,0 @@
{
"label": "",
"message": "Ruff",
"logoSvg": "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"128\" height=\"128\"><path d=\"M115.36 61.84 70.22 50.49 114.45 2.4a1.222 1.222 0 0 0-1.54-1.87L12.3 61.98c-.41.25-.64.72-.57 1.2.06.48.4.87.87 1.01l45.07 13.25-44.29 48.16c-.42.46-.44 1.15-.04 1.61.24.29.58.44.94.44.22 0 .45-.06.65-.19l100.78-63.41c.42-.26.64-.75.56-1.22-.08-.49-.43-.88-.91-.99z\" style=\"fill:#fcc21b\"/></svg>",
"logoWidth": 10,
"labelColor": "grey",
"color": "#E15759"
}

View File

@@ -1,8 +0,0 @@
{
"label": "",
"message": "Ruff",
"logoSvg": "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"109\" height=\"132\" fill=\"none\"><g filter=\"url(#a)\"><path fill=\"#FCC21B\" d=\"m103.642 61.492-45.14-11.35 44.23-48.09a1.222 1.222 0 0 0-1.54-1.87L.582 61.632c-.41.25-.64.72-.57 1.2.06.48.4.87.87 1.01l45.07 13.25-44.29 48.16c-.42.46-.44 1.15-.04 1.61.24.29.58.44.94.44.22 0 .45-.06.65-.19l100.78-63.41c.42-.26.64-.75.56-1.22-.08-.49-.43-.88-.91-.99Z\"/></g><defs><filter id=\"a\" width=\"108.569\" height=\"131.302\" x=\"0\" y=\"0\" color-interpolation-filters=\"sRGB\" filterUnits=\"userSpaceOnUse\"><feFlood flood-opacity=\"0\" result=\"BackgroundImageFix\"/><feColorMatrix in=\"SourceAlpha\" result=\"hardAlpha\" values=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0\"/><feOffset dx=\"4\" dy=\"4\"/><feComposite in2=\"hardAlpha\" operator=\"out\"/><feColorMatrix values=\"0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0\"/><feBlend in2=\"BackgroundImageFix\" result=\"effect1_dropShadow_7_4\"/><feBlend in=\"SourceGraphic\" in2=\"effect1_dropShadow_7_4\" result=\"shape\"/></filter></defs></svg>",
"logoWidth": 10,
"labelColor": "grey",
"color": "#E15759"
}

View File

@@ -1,8 +0,0 @@
{
"label": "",
"message": "Ruff",
"logoSvg": "<svg width=\"510\" height=\"622\" viewBox=\"0 0 510 622\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\"><path fill-rule=\"evenodd\" clip-rule=\"evenodd\" d=\"M206.701 0C200.964 0 196.314 4.64131 196.314 10.3667V41.4667C196.314 47.192 191.663 51.8333 185.927 51.8333H156.843C151.107 51.8333 146.456 56.4746 146.456 62.2V145.133C146.456 150.859 141.806 155.5 136.069 155.5H106.986C101.249 155.5 96.5988 160.141 96.5988 165.867V222.883C96.5988 228.609 91.9484 233.25 86.2118 233.25H57.1283C51.3917 233.25 46.7413 237.891 46.7413 243.617V300.633C46.7413 306.359 42.0909 311 36.3544 311H10.387C4.6504 311 0 315.641 0 321.367V352.467C0 358.192 4.6504 362.833 10.387 362.833H145.418C151.154 362.833 155.804 367.475 155.804 373.2V430.217C155.804 435.942 151.154 440.583 145.418 440.583H116.334C110.597 440.583 105.947 445.225 105.947 450.95V507.967C105.947 513.692 101.297 518.333 95.5601 518.333H66.4766C60.74 518.333 56.0896 522.975 56.0896 528.7V611.633C56.0896 617.359 60.74 622 66.4766 622H149.572C155.309 622 159.959 617.359 159.959 611.633V570.167H201.507C207.244 570.167 211.894 565.525 211.894 559.8V528.7C211.894 522.975 216.544 518.333 222.281 518.333H251.365C257.101 518.333 261.752 513.692 261.752 507.967V476.867C261.752 471.141 266.402 466.5 272.138 466.5H301.222C306.959 466.5 311.609 461.859 311.609 456.133V425.033C311.609 419.308 316.259 414.667 321.996 414.667H351.079C356.816 414.667 361.466 410.025 361.466 404.3V373.2C361.466 367.475 366.117 362.833 371.853 362.833H400.937C406.673 362.833 411.324 358.192 411.324 352.467V321.367C411.324 315.641 415.974 311 421.711 311H450.794C456.531 311 461.181 306.359 461.181 300.633V217.7C461.181 211.975 456.531 207.333 450.794 207.333H420.672C414.936 207.333 410.285 202.692 410.285 196.967V165.867C410.285 160.141 414.936 155.5 420.672 155.5H449.756C455.492 155.5 460.143 150.859 460.143 145.133V114.033C460.143 108.308 464.793 103.667 470.53 103.667H499.613C505.35 103.667 510 99.0253 510 93.3V10.3667C510 4.64132 505.35 0 499.613 0H206.701ZM168.269 440.583C162.532 440.583 157.882 445.225 157.882 450.95V507.967C157.882 513.692 153.231 518.333 147.495 518.333H118.411C112.675 518.333 108.024 522.975 108.024 528.7V559.8C108.024 565.525 112.675 570.167 118.411 570.167H159.959V528.7C159.959 522.975 164.61 518.333 170.346 518.333H199.43C205.166 518.333 209.817 513.692 209.817 507.967V476.867C209.817 471.141 214.467 466.5 220.204 466.5H249.287C255.024 466.5 259.674 461.859 259.674 456.133V425.033C259.674 419.308 264.325 414.667 270.061 414.667H299.145C304.881 414.667 309.532 410.025 309.532 404.3V373.2C309.532 367.475 314.182 362.833 319.919 362.833H349.002C354.739 362.833 359.389 358.192 359.389 352.467V321.367C359.389 315.641 364.039 311 369.776 311H398.859C404.596 311 409.246 306.359 409.246 300.633V269.533C409.246 263.808 404.596 259.167 398.859 259.167H318.88C313.143 259.167 308.493 254.525 308.493 248.8V217.7C308.493 211.975 313.143 207.333 318.88 207.333H347.963C353.7 207.333 358.35 202.692 358.35 196.967V165.867C358.35 160.141 363.001 155.5 368.737 155.5H397.821C403.557 155.5 408.208 150.859 408.208 145.133V114.033C408.208 108.308 412.858 103.667 418.595 103.667H447.678C453.415 103.667 458.065 99.0253 458.065 93.3V62.2C458.065 56.4746 453.415 51.8333 447.678 51.8333H208.778C203.041 51.8333 198.391 56.4746 198.391 62.2V145.133C198.391 150.859 193.741 155.5 188.004 155.5H158.921C153.184 155.5 148.534 160.141 148.534 165.867V222.883C148.534 228.609 143.883 233.25 138.147 233.25H109.063C103.327 233.25 98.6762 237.891 98.6762 243.617V300.633C98.6762 306.359 103.327 311 109.063 311H197.352C203.089 311 207.739 315.641 207.739 321.367V430.217C207.739 435.942 203.089 440.583 197.352 440.583H168.269Z\" fill=\"#D7FF64\"/></svg>",
"logoWidth": 10,
"labelColor": "grey",
"color": "#261230"
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.8 KiB

View File

@@ -1,24 +0,0 @@
<svg width="139" height="24" viewBox="0 0 139 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect width="138.764" height="24" rx="2.18182" fill="#261230"/>
<path
d="M8.72798 15.2726H9.91316V11.8697L9.6887 10.4062L9.8952 10.3343L12.1309 15.1649L14.3486 10.3343L14.5461 10.4062L14.3486 11.8607V15.2726H15.5248V8.72714H13.9535L12.2117 12.7137H12.0142L10.2723 8.72714H8.72798V15.2726Z"
fill="#D7FF64"/>
<path
d="M22.3432 15.2726H23.6631L21.3017 8.72714H19.7574L17.4589 15.2726H18.7069L19.1558 13.9797H21.9033L22.3432 15.2726ZM19.497 13.0279L19.901 11.8607L20.4308 10.0021H20.6463L21.176 11.8607L21.5711 13.0279H19.497Z"
fill="#D7FF64"/>
<path
d="M25.4209 15.2726H28.1234C30.1077 15.2726 30.9876 14.1413 30.9876 12.0044C30.9876 9.92131 30.1706 8.72714 28.1234 8.72714H25.4209V15.2726ZM26.624 14.2131V9.77765H28.0965C29.147 9.77765 29.7306 10.1907 29.7306 11.4477V12.5521C29.7306 13.6923 29.2817 14.2131 28.0965 14.2131H26.624Z"
fill="#D7FF64"/>
<path
d="M33.079 15.2726H37.6491V14.2131H34.2822V12.3815H37.2002V11.3938H34.2822V9.77765H37.6491V8.72714H33.079V15.2726Z"
fill="#D7FF64"/>
<path
d="M42.923 15.2726H46.2451C47.4572 15.2726 48.2025 14.5812 48.2025 13.5487C48.2025 12.7675 47.8343 12.175 47.0532 11.9954V11.7799C47.6637 11.5734 48.0319 11.0436 48.0319 10.3433C48.0319 9.38259 47.4572 8.72714 46.281 8.72714H42.923V15.2726ZM44.0992 11.4746V9.65195H45.9578C46.4875 9.65195 46.7928 9.92131 46.7928 10.3523V10.7653C46.7928 11.1873 46.4965 11.4746 45.9758 11.4746H44.0992ZM44.0992 14.3388V12.3904H46.0296C46.5863 12.3904 46.9365 12.6418 46.9365 13.1806V13.5666C46.9365 14.0425 46.5684 14.3388 45.9309 14.3388H44.0992Z"
fill="#D7FF64"/>
<path
d="M49.6959 8.72714L52.174 12.579V14.1952H50.1898V15.2726H53.3772V12.579L55.8553 8.72714H54.4456L53.5119 10.2535L52.8744 11.3759H52.6679L52.0483 10.2715L51.1056 8.72714H49.6959Z"
fill="#D7FF64"/>
<path fill-rule="evenodd" clip-rule="evenodd"
d="M74.1824 7.63626C74.1824 7.03377 74.6708 6.54535 75.2733 6.54535H84.0006C84.6031 6.54535 85.0915 7.03377 85.0915 7.63626V9.81808H80.0733V8.94535H79.2006V10.6908H84.0006C84.6031 10.6908 85.0915 11.1792 85.0915 11.7817V16.3635C85.0915 16.966 84.6031 17.4544 84.0006 17.4544H75.2733C74.6708 17.4544 74.1824 16.966 74.1824 16.3635V14.1817L79.2006 14.1817V15.0544H80.0733V13.309L75.2733 13.309C74.6708 13.309 74.1824 12.8206 74.1824 12.2181V7.63626ZM63.4912 6.54545C62.8887 6.54545 62.4003 7.03387 62.4003 7.63636V17.4545H67.4185V14.1818H68.2912V17.4545H73.3094V7.63636C73.3094 7.03387 72.821 6.54545 72.2185 6.54545H63.4912ZM69.164 10.6909V11.5636H66.5458V10.6909H69.164ZM110.619 6.54545C110.016 6.54545 109.528 7.03387 109.528 7.63636V17.4545H114.546V14.1818H115.419V17.4545H120.437V7.63636C120.437 7.03387 119.948 6.54545 119.346 6.54545H110.619ZM116.291 10.6909V11.5636H113.673V10.6909H116.291ZM91.8549 8.29091H96.8731V11.3455C96.8731 11.9479 96.3847 12.4364 95.7822 12.4364H91.8549V13.3091H96.8731V17.4545H87.9276C87.3251 17.4545 86.8367 16.9661 86.8367 16.3636V12.4364H85.964V8.29091H86.8367V6.54545H91.8549V8.29091ZM108.655 7.63636C108.655 7.03387 108.166 6.54545 107.564 6.54545H97.7458V17.4545H102.764V14.1818H103.637V17.4545H108.655V13.3091H106.473V12.4364H107.564C108.166 12.4364 108.655 11.9479 108.655 11.3455V7.63636ZM104.509 10.6909V11.5636H101.891V10.6909H104.509ZM132.218 13.3091L126.327 13.3091V6.54547L121.309 6.54547V17.4546H132.218V13.3091Z"
fill="#D7FF64"/>
</svg>

Before

Width:  |  Height:  |  Size: 3.4 KiB

View File

@@ -0,0 +1,18 @@
use std::fs;
use std::path::Path;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ropey::Rope;
fn criterion_benchmark(c: &mut Criterion) {
let contents = fs::read_to_string(Path::new("resources/test/fixtures/D.py")).unwrap();
c.bench_function("rope", |b| {
b.iter(|| {
let rope = Rope::from_str(black_box(&contents));
rope.line_to_char(black_box(4));
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -1,25 +0,0 @@
doc-valid-idents = [
"..",
"CodeQL",
"FastAPI",
"IPython",
"LangChain",
"LibCST",
"McCabe",
"NumPy",
"SCREAMING_SNAKE_CASE",
"SQLAlchemy",
"StackOverflow",
"PyCharm",
"SNMPv1",
"SNMPv2",
"SNMPv3",
"PyFlakes"
]
ignore-interior-mutability = [
# Interned is read-only. The wrapped `Rc` never gets updated.
"ruff_formatter::format_element::Interned",
# The expression is read-only.
"ruff_python_ast::hashable::HashableExpr",
]

View File

@@ -1,49 +0,0 @@
[package]
name = "red_knot"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_project = { workspace = true, features = ["zstd"] }
red_knot_server = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
ruff_python_ast = { workspace = true }
anyhow = { workspace = true }
argfile = { workspace = true }
clap = { workspace = true, features = ["wrap_help"] }
colored = { workspace = true }
countme = { workspace = true, features = ["enable"] }
crossbeam = { workspace = true }
ctrlc = { version = "3.4.4" }
jiff = { workspace = true }
rayon = { workspace = true }
salsa = { workspace = true }
tracing = { workspace = true, features = ["release_max_level_debug"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
tracing-flame = { workspace = true }
tracing-tree = { workspace = true }
wild = { workspace = true }
[dev-dependencies]
ruff_db = { workspace = true, features = ["testing"] }
ruff_python_trivia = { workspace = true }
insta = { workspace = true, features = ["filters"] }
insta-cmd = { workspace = true }
filetime = { workspace = true }
regex = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
[lints]
workspace = true

View File

@@ -1,25 +0,0 @@
# Red Knot
Red Knot is an extremely fast type checker.
Currently, it is a work-in-progress and not ready for user testing.
Red Knot is designed to prioritize good type inference, even in unannotated code,
and aims to avoid false positives.
While Red Knot will produce similar results to mypy and pyright on many codebases,
100% compatibility with these tools is a non-goal.
On some codebases, Red Knot's design decisions lead to different outcomes
than you would get from running one of these more established tools.
## Contributing
Core type checking tests are written as Markdown code blocks.
They can be found in [`red_knot_python_semantic/resources/mdtest`][resources-mdtest].
See [`red_knot_test/README.md`][mdtest-readme] for more information
on the test framework itself.
The list of open issues can be found [here][open-issues].
[mdtest-readme]: ../red_knot_test/README.md
[open-issues]: https://github.com/astral-sh/ruff/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3Ared-knot
[resources-mdtest]: ../red_knot_python_semantic/resources/mdtest

View File

@@ -1,104 +0,0 @@
use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
fn main() {
// The workspace root directory is not available without walking up the tree
// https://github.com/rust-lang/cargo/issues/3946
let workspace_root = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap())
.join("..")
.join("..");
commit_info(&workspace_root);
#[allow(clippy::disallowed_methods)]
let target = std::env::var("TARGET").unwrap();
println!("cargo::rustc-env=RUST_HOST_TARGET={target}");
}
fn commit_info(workspace_root: &Path) {
// If not in a git repository, do not attempt to retrieve commit information
let git_dir = workspace_root.join(".git");
if !git_dir.exists() {
return;
}
if let Some(git_head_path) = git_head(&git_dir) {
println!("cargo:rerun-if-changed={}", git_head_path.display());
let git_head_contents = fs::read_to_string(git_head_path);
if let Ok(git_head_contents) = git_head_contents {
// The contents are either a commit or a reference in the following formats
// - "<commit>" when the head is detached
// - "ref <ref>" when working on a branch
// If a commit, checking if the HEAD file has changed is sufficient
// If a ref, we need to add the head file for that ref to rebuild on commit
let mut git_ref_parts = git_head_contents.split_whitespace();
git_ref_parts.next();
if let Some(git_ref) = git_ref_parts.next() {
let git_ref_path = git_dir.join(git_ref);
println!("cargo:rerun-if-changed={}", git_ref_path.display());
}
}
}
let output = match Command::new("git")
.arg("log")
.arg("-1")
.arg("--date=short")
.arg("--abbrev=9")
.arg("--format=%H %h %cd %(describe)")
.output()
{
Ok(output) if output.status.success() => output,
_ => return,
};
let stdout = String::from_utf8(output.stdout).unwrap();
let mut parts = stdout.split_whitespace();
let mut next = || parts.next().unwrap();
let _commit_hash = next();
println!("cargo::rustc-env=RED_KNOT_COMMIT_SHORT_HASH={}", next());
println!("cargo::rustc-env=RED_KNOT_COMMIT_DATE={}", next());
// Describe can fail for some commits
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emdescribeoptionsem
if let Some(describe) = parts.next() {
let mut describe_parts = describe.split('-');
let _last_tag = describe_parts.next().unwrap();
// If this is the tagged commit, this component will be missing
println!(
"cargo::rustc-env=RED_KNOT_LAST_TAG_DISTANCE={}",
describe_parts.next().unwrap_or("0")
);
}
}
fn git_head(git_dir: &Path) -> Option<PathBuf> {
// The typical case is a standard git repository.
let git_head_path = git_dir.join("HEAD");
if git_head_path.exists() {
return Some(git_head_path);
}
if !git_dir.is_file() {
return None;
}
// If `.git/HEAD` doesn't exist and `.git` is actually a file,
// then let's try to attempt to read it as a worktree. If it's
// a worktree, then its contents will look like this, e.g.:
//
// gitdir: /home/andrew/astral/uv/main/.git/worktrees/pr2
//
// And the HEAD file we want to watch will be at:
//
// /home/andrew/astral/uv/main/.git/worktrees/pr2/HEAD
let contents = fs::read_to_string(git_dir).ok()?;
let (label, worktree_path) = contents.split_once(':')?;
if label != "gitdir" {
return None;
}
let worktree_path = worktree_path.trim();
Some(PathBuf::from(worktree_path))
}

View File

@@ -1,61 +0,0 @@
# Running `mypy_primer`
## Basics
For now, we use our own [fork of mypy primer]. It can be run using `uvx --from "…" mypy_primer`. For example, to see the help message, run:
```sh
uvx --from "git+https://github.com/astral-sh/mypy_primer.git@add-red-knot-support" mypy_primer -h
```
Alternatively, you can install the forked version of `mypy_primer` using:
```sh
uv tool install "git+https://github.com/astral-sh/mypy_primer.git@add-red-knot-support"
```
and then run it using `uvx mypy_primer` or just `mypy_primer`, if your `PATH` is set up accordingly (see: [Tool executables]).
## Showing the diagnostics diff between two Git revisions
To show the diagnostics diff between two Git revisions (e.g. your feature branch and `main`), run:
```sh
mypy_primer \
--type-checker knot \
--old origin/main \
--new my/feature \
--debug \
--output concise \
--project-selector '/black$'
```
This will show the diagnostics diff for the `black` project between the `main` branch and your `my/feature` branch. To run the
diff for all projects we currently enable in CI, use `--project-selector "/($(paste -s -d'|' crates/red_knot_python_semantic/resources/primer/good.txt))\$"`.
You can also take a look at the [full list of ecosystem projects]. Note that some of them might still need a `knot_paths` configuration
option to work correctly.
## Avoiding recompilation
If you want to run `mypy_primer` repeatedly, e.g. for different projects, but for the same combination of `--old` and `--new`, you
can use set the `MYPY_PRIMER_NO_REBUILD` environment variable to avoid recompilation of Red Knot:
```sh
MYPY_PRIMER_NO_REBUILD=1 mypy_primer …
```
## Running from a local copy of the repository
If you are working on a local branch, you can use `mypy_primer`'s `--repo` option to specify the path to your local copy of the `ruff` repository.
This allows `mypy_primer` to check out local branches:
```sh
mypy_primer --repo /path/to/ruff --old origin/main --new my/local-branch …
```
Note that you might need to clean up `/tmp/mypy_primer` in order for this to work correctly.
[fork of mypy primer]: https://github.com/astral-sh/mypy_primer/tree/add-red-knot-support
[full list of ecosystem projects]: https://github.com/astral-sh/mypy_primer/blob/add-red-knot-support/mypy_primer/projects.py
[tool executables]: https://docs.astral.sh/uv/concepts/tools/#tool-executables

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -1,128 +0,0 @@
# Tracing
Traces are a useful tool to narrow down the location of a bug or, at least, to understand why the compiler is doing a particular thing.
Note, tracing messages with severity `debug` or greater are user-facing. They should be phrased accordingly.
Tracing spans are only shown when using `-vvv`.
## Verbosity levels
The CLI supports different verbosity levels.
- default: Only show errors and warnings.
- `-v` activates `info!`: Show generally useful information such as paths of configuration files, detected platform, etc., but it's not a lot of messages, it's something you'll activate in CI by default. cargo build e.g. shows you which packages are fresh.
- `-vv` activates `debug!` and timestamps: This should be enough information to get to the bottom of bug reports. When you're processing many packages or files, you'll get pages and pages of output, but each line is link to a specific action or state change.
- `-vvv` activates `trace!` (only in debug builds) and shows tracing-spans: At this level, you're logging everything. Most of this is wasted, it's really slow, we dump e.g. the entire resolution graph. Only useful to developers, and you almost certainly want to use `RED_KNOT_LOG` to filter it down to the area your investigating.
## Better logging with `RED_KNOT_LOG` and `RAYON_NUM_THREADS`
By default, the CLI shows messages from the `ruff` and `red_knot` crates. Tracing messages from other crates are not shown.
The `RED_KNOT_LOG` environment variable allows you to customize which messages are shown by specifying one
or more [filter directives](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives).
The `RAYON_NUM_THREADS` environment variable, meanwhile, can be used to control the level of concurrency red-knot uses.
By default, red-knot will attempt to parallelize its work so that multiple files are checked simultaneously,
but this can result in a confused logging output where messages from different threads are intertwined.
To switch off concurrency entirely and have more readable logs, use `RAYON_NUM_THREADS=1`.
### Examples
#### Show all debug messages
Shows debug messages from all crates.
```bash
RED_KNOT_LOG=debug
```
#### Show salsa query execution messages
Show the salsa `execute: my_query` messages in addition to all red knot messages.
```bash
RED_KNOT_LOG=ruff=trace,red_knot=trace,salsa=info
```
#### Show typing traces
Only show traces for the `red_knot_python_semantic::types` module.
```bash
RED_KNOT_LOG="red_knot_python_semantic::types"
```
Note: Ensure that you use `-vvv` to see tracing spans.
#### Show messages for a single file
Shows all messages that are inside of a span for a specific file.
```bash
RED_KNOT_LOG=red_knot[{file=/home/micha/astral/test/x.py}]=trace
```
**Note**: Tracing still shows all spans because tracing can't know at the time of entering the span
whether one if its children has the file `x.py`.
**Note**: Salsa currently logs the entire memoized values. In our case, the source text and parsed AST.
This very quickly leads to extremely long outputs.
## Tracing and Salsa
Be mindful about using `tracing` in Salsa queries, especially when using `warn` or `error` because it isn't guaranteed
that the query will execute after restoring from a persistent cache. In which case the user won't see the message.
For example, don't use `tracing` to show the user a message when generating a lint violation failed
because the message would only be shown when linting the file the first time, but not on subsequent analysis
runs or when restoring from a persistent cache. This can be confusing for users because they
don't understand why a specific lint violation isn't raised. Instead, change your
query to return the failure as part of the query's result or use a Salsa accumulator.
## Tracing in tests
You can use `ruff_db::testing::setup_logging` or `ruff_db::testing::setup_logging_with_filter` to set up logging in tests.
```rust
use ruff_db::testing::setup_logging;
#[test]
fn test() {
let _logging = setup_logging();
tracing::info!("This message will be printed to stderr");
}
```
Note: Most test runners capture stderr and only show its output when a test fails.
Note also that `setup_logging` only sets up logging for the current thread because [`set_global_default`](https://docs.rs/tracing/latest/tracing/subscriber/fn.set_global_default.html) can only be
called **once**.
## Release builds
`trace!` events are removed in release builds.
## Profiling
Red Knot generates a folded stack trace to the current directory named `tracing.folded` when setting the environment variable `RED_KNOT_LOG_PROFILE` to `1` or `true`.
```bash
RED_KNOT_LOG_PROFILE=1 red_knot -- --current-directory=../test -vvv
```
You can convert the textual representation into a visual one using `inferno`.
```shell
cargo install inferno
```
```shell
# flamegraph
cat tracing.folded | inferno-flamegraph > tracing-flamegraph.svg
# flamechart
cat tracing.folded | inferno-flamegraph --flamechart > tracing-flamechart.svg
```
![Example flamegraph](./tracing-flamegraph.png)
See [`tracing-flame`](https://crates.io/crates/tracing-flame) for more details.

View File

@@ -1,280 +0,0 @@
use crate::logging::Verbosity;
use crate::python_version::PythonVersion;
use clap::{ArgAction, ArgMatches, Error, Parser};
use red_knot_project::metadata::options::{EnvironmentOptions, Options, TerminalOptions};
use red_knot_project::metadata::value::{RangedValue, RelativePathBuf};
use red_knot_python_semantic::lint;
use ruff_db::system::SystemPathBuf;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
pub(crate) struct Args {
#[command(subcommand)]
pub(crate) command: Command,
}
#[derive(Debug, clap::Subcommand)]
pub(crate) enum Command {
/// Check a project for type errors.
Check(CheckCommand),
/// Start the language server
Server,
/// Display Red Knot's version
Version,
}
#[derive(Debug, Parser)]
pub(crate) struct CheckCommand {
/// List of files or directories to check.
#[clap(
help = "List of files or directories to check [default: the project root]",
value_name = "PATH"
)]
pub paths: Vec<SystemPathBuf>,
/// Run the command within the given project directory.
///
/// All `pyproject.toml` files will be discovered by walking up the directory tree from the given project directory,
/// as will the project's virtual environment (`.venv`) unless the `venv-path` option is set.
///
/// Other command-line arguments (such as relative paths) will be resolved relative to the current working directory.
#[arg(long, value_name = "PROJECT")]
pub(crate) project: Option<SystemPathBuf>,
/// Path to the Python installation from which Red Knot resolves type information and third-party dependencies.
///
/// If not specified, Red Knot will look at the `VIRTUAL_ENV` environment variable.
///
/// Red Knot will search in the path's `site-packages` directories for type information and
/// third-party imports.
///
/// This option is commonly used to specify the path to a virtual environment.
#[arg(long, value_name = "PATH")]
pub(crate) python: Option<SystemPathBuf>,
/// Custom directory to use for stdlib typeshed stubs.
#[arg(long, value_name = "PATH", alias = "custom-typeshed-dir")]
pub(crate) typeshed: Option<SystemPathBuf>,
/// Additional path to use as a module-resolution source (can be passed multiple times).
#[arg(long, value_name = "PATH")]
pub(crate) extra_search_path: Option<Vec<SystemPathBuf>>,
/// Python version to assume when resolving types.
#[arg(long, value_name = "VERSION", alias = "target-version")]
pub(crate) python_version: Option<PythonVersion>,
/// Target platform to assume when resolving types.
///
/// This is used to specialize the type of `sys.platform` and will affect the visibility
/// of platform-specific functions and attributes. If the value is set to `all`, no
/// assumptions are made about the target platform. If unspecified, the current system's
/// platform will be used.
#[arg(long, value_name = "PLATFORM", alias = "platform")]
pub(crate) python_platform: Option<String>,
#[clap(flatten)]
pub(crate) verbosity: Verbosity,
#[clap(flatten)]
pub(crate) rules: RulesArg,
/// The format to use for printing diagnostic messages.
#[arg(long)]
pub(crate) output_format: Option<OutputFormat>,
/// Control when colored output is used.
#[arg(long, value_name = "WHEN")]
pub(crate) color: Option<TerminalColor>,
/// Use exit code 1 if there are any warning-level diagnostics.
#[arg(long, conflicts_with = "exit_zero", default_missing_value = "true", num_args=0..1)]
pub(crate) error_on_warning: Option<bool>,
/// Always use exit code 0, even when there are error-level diagnostics.
#[arg(long)]
pub(crate) exit_zero: bool,
/// Watch files for changes and recheck files related to the changed files.
#[arg(long, short = 'W')]
pub(crate) watch: bool,
}
impl CheckCommand {
pub(crate) fn into_options(self) -> Options {
let rules = if self.rules.is_empty() {
None
} else {
Some(
self.rules
.into_iter()
.map(|(rule, level)| (RangedValue::cli(rule), RangedValue::cli(level)))
.collect(),
)
};
Options {
environment: Some(EnvironmentOptions {
python_version: self
.python_version
.map(|version| RangedValue::cli(version.into())),
python_platform: self
.python_platform
.map(|platform| RangedValue::cli(platform.into())),
python: self.python.map(RelativePathBuf::cli),
typeshed: self.typeshed.map(RelativePathBuf::cli),
extra_paths: self.extra_search_path.map(|extra_search_paths| {
extra_search_paths
.into_iter()
.map(RelativePathBuf::cli)
.collect()
}),
}),
terminal: Some(TerminalOptions {
output_format: self
.output_format
.map(|output_format| RangedValue::cli(output_format.into())),
error_on_warning: self.error_on_warning,
}),
rules,
..Default::default()
}
}
}
/// A list of rules to enable or disable with a given severity.
///
/// This type is used to parse the `--error`, `--warn`, and `--ignore` arguments
/// while preserving the order in which they were specified (arguments last override previous severities).
#[derive(Debug)]
pub(crate) struct RulesArg(Vec<(String, lint::Level)>);
impl RulesArg {
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn into_iter(self) -> impl Iterator<Item = (String, lint::Level)> {
self.0.into_iter()
}
}
impl clap::FromArgMatches for RulesArg {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut rules = Vec::new();
for (level, arg_id) in [
(lint::Level::Ignore, "ignore"),
(lint::Level::Warn, "warn"),
(lint::Level::Error, "error"),
] {
let indices = matches.indices_of(arg_id).into_iter().flatten();
let levels = matches.get_many::<String>(arg_id).into_iter().flatten();
rules.extend(
indices
.zip(levels)
.map(|(index, rule)| (index, rule, level)),
);
}
// Sort by their index so that values specified later override earlier ones.
rules.sort_by_key(|(index, _, _)| *index);
Ok(Self(
rules
.into_iter()
.map(|(_, rule, level)| (rule.to_owned(), level))
.collect(),
))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
self.0 = Self::from_arg_matches(matches)?.0;
Ok(())
}
}
impl clap::Args for RulesArg {
fn augment_args(cmd: clap::Command) -> clap::Command {
const HELP_HEADING: &str = "Enabling / disabling rules";
cmd.arg(
clap::Arg::new("error")
.long("error")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'error'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("warn")
.long("warn")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'warn'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("ignore")
.long("ignore")
.action(ArgAction::Append)
.help("Disables the rule. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
}
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
}
/// The diagnostic output format.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum OutputFormat {
/// Print diagnostics verbosely, with context and helpful hints.
///
/// Diagnostic messages may include additional context and
/// annotations on the input to help understand the message.
#[default]
#[value(name = "full")]
Full,
/// Print diagnostics concisely, one per line.
///
/// This will guarantee that each diagnostic is printed on
/// a single line. Only the most important or primary aspects
/// of the diagnostic are included. Contextual information is
/// dropped.
#[value(name = "concise")]
Concise,
}
impl From<OutputFormat> for ruff_db::diagnostic::DiagnosticFormat {
fn from(format: OutputFormat) -> ruff_db::diagnostic::DiagnosticFormat {
match format {
OutputFormat::Full => Self::Full,
OutputFormat::Concise => Self::Concise,
}
}
}
/// Control when colored output is used.
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub(crate) enum TerminalColor {
/// Display colors if the output goes to an interactive terminal.
#[default]
Auto,
/// Always display colors.
Always,
/// Never display colors.
Never,
}

View File

@@ -1,254 +0,0 @@
//! Sets up logging for Red Knot
use anyhow::Context;
use colored::Colorize;
use std::fmt;
use std::fs::File;
use std::io::BufWriter;
use tracing::{Event, Subscriber};
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields};
use tracing_subscriber::registry::LookupSpan;
use tracing_subscriber::EnvFilter;
/// Logging flags to `#[command(flatten)]` into your CLI
#[derive(clap::Args, Debug, Clone, Default)]
#[command(about = None, long_about = None)]
pub(crate) struct Verbosity {
#[arg(
long,
short = 'v',
help = "Use verbose output (or `-vv` and `-vvv` for more verbose output)",
action = clap::ArgAction::Count,
global = true,
)]
verbose: u8,
}
impl Verbosity {
/// Returns the verbosity level based on the number of `-v` flags.
///
/// Returns `None` if the user did not specify any verbosity flags.
pub(crate) fn level(&self) -> VerbosityLevel {
match self.verbose {
0 => VerbosityLevel::Default,
1 => VerbosityLevel::Verbose,
2 => VerbosityLevel::ExtraVerbose,
_ => VerbosityLevel::Trace,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub(crate) enum VerbosityLevel {
/// Default output level. Only shows Ruff and Red Knot events up to the [`WARN`](tracing::Level::WARN).
Default,
/// Enables verbose output. Emits Ruff and Red Knot events up to the [`INFO`](tracing::Level::INFO).
/// Corresponds to `-v`.
Verbose,
/// Enables a more verbose tracing format and emits Ruff and Red Knot events up to [`DEBUG`](tracing::Level::DEBUG).
/// Corresponds to `-vv`
ExtraVerbose,
/// Enables all tracing events and uses a tree-like output format. Corresponds to `-vvv`.
Trace,
}
impl VerbosityLevel {
const fn level_filter(self) -> LevelFilter {
match self {
VerbosityLevel::Default => LevelFilter::WARN,
VerbosityLevel::Verbose => LevelFilter::INFO,
VerbosityLevel::ExtraVerbose => LevelFilter::DEBUG,
VerbosityLevel::Trace => LevelFilter::TRACE,
}
}
pub(crate) const fn is_trace(self) -> bool {
matches!(self, VerbosityLevel::Trace)
}
pub(crate) const fn is_extra_verbose(self) -> bool {
matches!(self, VerbosityLevel::ExtraVerbose)
}
}
pub(crate) fn setup_tracing(level: VerbosityLevel) -> anyhow::Result<TracingGuard> {
use tracing_subscriber::prelude::*;
// The `RED_KNOT_LOG` environment variable overrides the default log level.
let filter = if let Ok(log_env_variable) = std::env::var("RED_KNOT_LOG") {
EnvFilter::builder()
.parse(log_env_variable)
.context("Failed to parse directives specified in RED_KNOT_LOG environment variable.")?
} else {
match level {
VerbosityLevel::Default => {
// Show warning traces
EnvFilter::default().add_directive(LevelFilter::WARN.into())
}
level => {
let level_filter = level.level_filter();
// Show info|debug|trace events, but allow `RED_KNOT_LOG` to override
let filter = EnvFilter::default().add_directive(
format!("red_knot={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
);
filter.add_directive(
format!("ruff={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
)
}
}
};
let (profiling_layer, guard) = setup_profile();
let registry = tracing_subscriber::registry()
.with(filter)
.with(profiling_layer);
if level.is_trace() {
let subscriber = registry.with(
tracing_tree::HierarchicalLayer::default()
.with_indent_lines(true)
.with_indent_amount(2)
.with_bracketed_fields(true)
.with_thread_ids(true)
.with_targets(true)
.with_writer(std::io::stderr)
.with_timer(tracing_tree::time::Uptime::default()),
);
subscriber.init();
} else {
let subscriber = registry.with(
tracing_subscriber::fmt::layer()
.event_format(RedKnotFormat {
display_level: true,
display_timestamp: level.is_extra_verbose(),
show_spans: false,
})
.with_writer(std::io::stderr),
);
subscriber.init();
}
Ok(TracingGuard {
_flame_guard: guard,
})
}
#[allow(clippy::type_complexity)]
fn setup_profile<S>() -> (
Option<tracing_flame::FlameLayer<S, BufWriter<File>>>,
Option<tracing_flame::FlushGuard<BufWriter<File>>>,
)
where
S: Subscriber + for<'span> LookupSpan<'span>,
{
if let Ok("1" | "true") = std::env::var("RED_KNOT_LOG_PROFILE").as_deref() {
let (layer, guard) = tracing_flame::FlameLayer::with_file("tracing.folded")
.expect("Flame layer to be created");
(Some(layer), Some(guard))
} else {
(None, None)
}
}
pub(crate) struct TracingGuard {
_flame_guard: Option<tracing_flame::FlushGuard<BufWriter<File>>>,
}
struct RedKnotFormat {
display_timestamp: bool,
display_level: bool,
show_spans: bool,
}
/// See <https://docs.rs/tracing-subscriber/0.3.18/src/tracing_subscriber/fmt/format/mod.rs.html#1026-1156>
impl<S, N> FormatEvent<S, N> for RedKnotFormat
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
let ansi = writer.has_ansi_escapes();
if self.display_timestamp {
let timestamp = jiff::Zoned::now()
.strftime("%Y-%m-%d %H:%M:%S.%f")
.to_string();
if ansi {
write!(writer, "{} ", timestamp.dimmed())?;
} else {
write!(
writer,
"{} ",
jiff::Zoned::now().strftime("%Y-%m-%d %H:%M:%S.%f")
)?;
}
}
if self.display_level {
let level = meta.level();
// Same colors as tracing
if ansi {
let formatted_level = level.to_string();
match *level {
tracing::Level::TRACE => {
write!(writer, "{} ", formatted_level.purple().bold())?;
}
tracing::Level::DEBUG => write!(writer, "{} ", formatted_level.blue().bold())?,
tracing::Level::INFO => write!(writer, "{} ", formatted_level.green().bold())?,
tracing::Level::WARN => write!(writer, "{} ", formatted_level.yellow().bold())?,
tracing::Level::ERROR => write!(writer, "{} ", level.to_string().red().bold())?,
}
} else {
write!(writer, "{level} ")?;
}
}
if self.show_spans {
let span = event.parent();
let mut seen = false;
let span = span
.and_then(|id| ctx.span(id))
.or_else(|| ctx.lookup_current());
let scope = span.into_iter().flat_map(|span| span.scope().from_root());
for span in scope {
seen = true;
if ansi {
write!(writer, "{}:", span.metadata().name().bold())?;
} else {
write!(writer, "{}:", span.metadata().name())?;
}
}
if seen {
writer.write_char(' ')?;
}
}
ctx.field_format().format_fields(writer.by_ref(), event)?;
writeln!(writer)
}
}

View File

@@ -1,385 +0,0 @@
use std::io::{self, stdout, BufWriter, Write};
use std::process::{ExitCode, Termination};
use anyhow::Result;
use std::sync::Mutex;
use crate::args::{Args, CheckCommand, Command, TerminalColor};
use crate::logging::setup_tracing;
use anyhow::{anyhow, Context};
use clap::Parser;
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use red_knot_project::metadata::options::Options;
use red_knot_project::watch::ProjectWatcher;
use red_knot_project::{watch, Db};
use red_knot_project::{ProjectDatabase, ProjectMetadata};
use red_knot_server::run_server;
use ruff_db::diagnostic::{Diagnostic, DisplayDiagnosticConfig, Severity};
use ruff_db::system::{OsSystem, SystemPath, SystemPathBuf};
use salsa::plumbing::ZalsaDatabase;
mod args;
mod logging;
mod python_version;
mod version;
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
pub fn main() -> ExitStatus {
run().unwrap_or_else(|error| {
use std::io::Write;
// Use `writeln` instead of `eprintln` to avoid panicking when the stderr pipe is broken.
let mut stderr = std::io::stderr().lock();
// This communicates that this isn't a linter error but Red Knot itself hard-errored for
// some reason (e.g. failed to resolve the configuration)
writeln!(stderr, "{}", "Red Knot failed".red().bold()).ok();
// Currently we generally only see one error, but e.g. with io errors when resolving
// the configuration it is help to chain errors ("resolving configuration failed" ->
// "failed to read file: subdir/pyproject.toml")
for cause in error.chain() {
// Exit "gracefully" on broken pipe errors.
//
// See: https://github.com/BurntSushi/ripgrep/blob/bf63fe8f258afc09bae6caa48f0ae35eaf115005/crates/core/main.rs#L47C1-L61C14
if let Some(ioerr) = cause.downcast_ref::<io::Error>() {
if ioerr.kind() == io::ErrorKind::BrokenPipe {
return ExitStatus::Success;
}
}
writeln!(stderr, " {} {cause}", "Cause:".bold()).ok();
}
ExitStatus::Error
})
}
fn run() -> anyhow::Result<ExitStatus> {
let args = wild::args_os();
let args = argfile::expand_args_from(args, argfile::parse_fromfile, argfile::PREFIX)
.context("Failed to read CLI arguments from file")?;
let args = Args::parse_from(args);
match args.command {
Command::Server => run_server().map(|()| ExitStatus::Success),
Command::Check(check_args) => run_check(check_args),
Command::Version => version().map(|()| ExitStatus::Success),
}
}
pub(crate) fn version() -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
let version_info = crate::version::version();
writeln!(stdout, "red knot {}", &version_info)?;
Ok(())
}
fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
set_colored_override(args.color);
let verbosity = args.verbosity.level();
countme::enable(verbosity.is_trace());
let _guard = setup_tracing(verbosity)?;
tracing::debug!("Version: {}", version::version());
// The base path to which all CLI arguments are relative to.
let cwd = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd)
.map_err(|path| {
anyhow!(
"The current working directory `{}` contains non-Unicode characters. Red Knot only supports Unicode paths.",
path.display()
)
})?
};
let project_path = args
.project
.as_ref()
.map(|project| {
if project.as_std_path().is_dir() {
Ok(SystemPath::absolute(project, &cwd))
} else {
Err(anyhow!(
"Provided project path `{project}` is not a directory"
))
}
})
.transpose()?
.unwrap_or_else(|| cwd.clone());
let check_paths: Vec<_> = args
.paths
.iter()
.map(|path| SystemPath::absolute(path, &cwd))
.collect();
let system = OsSystem::new(cwd);
let watch = args.watch;
let exit_zero = args.exit_zero;
let cli_options = args.into_options();
let mut project_metadata = ProjectMetadata::discover(&project_path, &system)?;
project_metadata.apply_cli_options(cli_options.clone());
project_metadata.apply_configuration_files(&system)?;
let mut db = ProjectDatabase::new(project_metadata, system)?;
if !check_paths.is_empty() {
db.project().set_included_paths(&mut db, check_paths);
}
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_options);
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
ctrlc::set_handler(move || {
let mut lock = main_loop_cancellation_token.lock().unwrap();
if let Some(token) = lock.take() {
token.stop();
}
})?;
let exit_status = if watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)?
};
tracing::trace!("Counts for entire CLI run:\n{}", countme::get_all());
std::mem::forget(db);
if exit_zero {
Ok(ExitStatus::Success)
} else {
Ok(exit_status)
}
}
#[derive(Copy, Clone)]
pub enum ExitStatus {
/// Checking was successful and there were no errors.
Success = 0,
/// Checking was successful but there were errors.
Failure = 1,
/// Checking failed.
Error = 2,
}
impl Termination for ExitStatus {
fn report(self) -> ExitCode {
ExitCode::from(self as u8)
}
}
struct MainLoop {
/// Sender that can be used to send messages to the main loop.
sender: crossbeam_channel::Sender<MainLoopMessage>,
/// Receiver for the messages sent **to** the main loop.
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
/// The file system watcher, if running in watch mode.
watcher: Option<ProjectWatcher>,
cli_options: Options,
}
impl MainLoop {
fn new(cli_options: Options) -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
(
Self {
sender: sender.clone(),
receiver,
watcher: None,
cli_options,
},
MainLoopCancellationToken { sender },
)
}
fn watch(mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
tracing::debug!("Starting watch mode");
let sender = self.sender.clone();
let watcher = watch::directory_watcher(move |event| {
sender.send(MainLoopMessage::ApplyChanges(event)).unwrap();
})?;
self.watcher = Some(ProjectWatcher::new(watcher, db));
self.run(db)?;
Ok(ExitStatus::Success)
}
fn run(mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
let result = self.main_loop(db);
tracing::debug!("Exiting main loop");
result
}
fn main_loop(&mut self, db: &mut ProjectDatabase) -> Result<ExitStatus> {
// Schedule the first check.
tracing::debug!("Starting main loop");
let mut revision = 0u64;
while let Ok(message) = self.receiver.recv() {
match message {
MainLoopMessage::CheckWorkspace => {
let db = db.clone();
let sender = self.sender.clone();
// Spawn a new task that checks the project. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
if let Ok(result) = db.check() {
// Send the result back to the main loop for printing.
sender
.send(MainLoopMessage::CheckCompleted { result, revision })
.unwrap();
}
});
}
MainLoopMessage::CheckCompleted {
result,
revision: check_revision,
} => {
let terminal_settings = db.project().settings(db).terminal();
let display_config = DisplayDiagnosticConfig::default()
.format(terminal_settings.output_format)
.color(colored::control::SHOULD_COLORIZE.should_colorize());
let min_error_severity = if terminal_settings.error_on_warning {
Severity::Warning
} else {
Severity::Error
};
if check_revision == revision {
if db.project().files(db).is_empty() {
tracing::warn!("No python files found under the given path(s)");
}
let mut stdout = stdout().lock();
if result.is_empty() {
writeln!(stdout, "All checks passed!")?;
if self.watcher.is_none() {
return Ok(ExitStatus::Success);
}
} else {
let mut failed = false;
let diagnostics_count = result.len();
for diagnostic in result {
write!(stdout, "{}", diagnostic.display(db, &display_config))?;
failed |= diagnostic.severity() >= min_error_severity;
}
writeln!(
stdout,
"Found {} diagnostic{}",
diagnostics_count,
if diagnostics_count > 1 { "s" } else { "" }
)?;
if self.watcher.is_none() {
return Ok(if failed {
ExitStatus::Failure
} else {
ExitStatus::Success
});
}
}
} else {
tracing::debug!(
"Discarding check result for outdated revision: current: {revision}, result revision: {check_revision}"
);
}
tracing::trace!("Counts after last check:\n{}", countme::get_all());
}
MainLoopMessage::ApplyChanges(changes) => {
revision += 1;
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes, Some(&self.cli_options));
if let Some(watcher) = self.watcher.as_mut() {
watcher.update(db);
}
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
}
MainLoopMessage::Exit => {
// Cancel any pending queries and wait for them to complete.
// TODO: Don't use Salsa internal APIs
// [Zulip-Thread](https://salsa.zulipchat.com/#narrow/stream/333573-salsa-3.2E0/topic/Expose.20an.20API.20to.20cancel.20other.20queries)
let _ = db.zalsa_mut();
return Ok(ExitStatus::Success);
}
}
tracing::debug!("Waiting for next main loop message.");
}
Ok(ExitStatus::Success)
}
}
#[derive(Debug)]
struct MainLoopCancellationToken {
sender: crossbeam_channel::Sender<MainLoopMessage>,
}
impl MainLoopCancellationToken {
fn stop(self) {
self.sender.send(MainLoopMessage::Exit).unwrap();
}
}
/// Message sent from the orchestrator to the main loop.
#[derive(Debug)]
enum MainLoopMessage {
CheckWorkspace,
CheckCompleted {
/// The diagnostics that were found during the check.
result: Vec<Diagnostic>,
revision: u64,
},
ApplyChanges(Vec<watch::ChangeEvent>),
Exit,
}
fn set_colored_override(color: Option<TerminalColor>) {
let Some(color) = color else {
return;
};
match color {
TerminalColor::Auto => {
colored::control::unset_override();
}
TerminalColor::Always => {
colored::control::set_override(true);
}
TerminalColor::Never => {
colored::control::set_override(false);
}
}
}

View File

@@ -1,68 +0,0 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum PythonVersion {
#[value(name = "3.7")]
Py37,
#[value(name = "3.8")]
Py38,
#[default]
#[value(name = "3.9")]
Py39,
#[value(name = "3.10")]
Py310,
#[value(name = "3.11")]
Py311,
#[value(name = "3.12")]
Py312,
#[value(name = "3.13")]
Py313,
}
impl PythonVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "3.7",
Self::Py38 => "3.8",
Self::Py39 => "3.9",
Self::Py310 => "3.10",
Self::Py311 => "3.11",
Self::Py312 => "3.12",
Self::Py313 => "3.13",
}
}
}
impl std::fmt::Display for PythonVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<PythonVersion> for ruff_python_ast::PythonVersion {
fn from(value: PythonVersion) -> Self {
match value {
PythonVersion::Py37 => Self::PY37,
PythonVersion::Py38 => Self::PY38,
PythonVersion::Py39 => Self::PY39,
PythonVersion::Py310 => Self::PY310,
PythonVersion::Py311 => Self::PY311,
PythonVersion::Py312 => Self::PY312,
PythonVersion::Py313 => Self::PY313,
}
}
}
#[cfg(test)]
mod tests {
use crate::python_version::PythonVersion;
#[test]
fn same_default_as_python_version() {
assert_eq!(
ruff_python_ast::PythonVersion::from(PythonVersion::default()),
ruff_python_ast::PythonVersion::default()
);
}
}

View File

@@ -1,105 +0,0 @@
//! Code for representing Red Knot's release version number.
use std::fmt;
/// Information about the git repository where Red Knot was built from.
pub(crate) struct CommitInfo {
short_commit_hash: String,
commit_date: String,
commits_since_last_tag: u32,
}
/// Red Knot's version.
pub(crate) struct VersionInfo {
/// Red Knot's version, such as "0.5.1"
version: String,
/// Information about the git commit we may have been built from.
///
/// `None` if not built from a git repo or if retrieval failed.
commit_info: Option<CommitInfo>,
}
impl fmt::Display for VersionInfo {
/// Formatted version information: `<version>[+<commits>] (<commit> <date>)`
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.version)?;
if let Some(ref ci) = self.commit_info {
if ci.commits_since_last_tag > 0 {
write!(f, "+{}", ci.commits_since_last_tag)?;
}
write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?;
}
Ok(())
}
}
/// Returns information about Red Knot's version.
pub(crate) fn version() -> VersionInfo {
// Environment variables are only read at compile-time
macro_rules! option_env_str {
($name:expr) => {
option_env!($name).map(|s| s.to_string())
};
}
// This version is pulled from Cargo.toml and set by Cargo
let version = option_env_str!("CARGO_PKG_VERSION").unwrap();
// Commit info is pulled from git and set by `build.rs`
let commit_info =
option_env_str!("RED_KNOT_COMMIT_SHORT_HASH").map(|short_commit_hash| CommitInfo {
short_commit_hash,
commit_date: option_env_str!("RED_KNOT_COMMIT_DATE").unwrap(),
commits_since_last_tag: option_env_str!("RED_KNOT_LAST_TAG_DISTANCE")
.as_deref()
.map_or(0, |value| value.parse::<u32>().unwrap_or(0)),
});
VersionInfo {
version,
commit_info,
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use super::{CommitInfo, VersionInfo};
#[test]
fn version_formatting() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: None,
};
assert_snapshot!(version, @"0.0.0");
}
#[test]
fn version_formatting_with_commit_info() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
}),
};
assert_snapshot!(version, @"0.0.0 (53b0f5d92 2023-10-19)");
}
#[test]
fn version_formatting_with_commits_since_last_tag() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 24,
}),
};
assert_snapshot!(version, @"0.0.0+24 (53b0f5d92 2023-10-19)");
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +0,0 @@
[package]
name = "red_knot_ide"
version = "0.0.0"
publish = false
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff_db = { workspace = true }
ruff_python_ast = { workspace = true }
ruff_python_parser = { workspace = true }
ruff_text_size = { workspace = true }
red_knot_python_semantic = { workspace = true }
rustc-hash = { workspace = true }
salsa = { workspace = true }
smallvec = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
red_knot_vendored = { workspace = true }
insta = { workspace = true, features = ["filters"] }
[lints]
workspace = true

View File

@@ -1,138 +0,0 @@
use red_knot_python_semantic::Db as SemanticDb;
use ruff_db::{Db as SourceDb, Upcast};
#[salsa::db]
pub trait Db: SemanticDb + Upcast<dyn SemanticDb> + Upcast<dyn SourceDb> {}
#[cfg(test)]
pub(crate) mod tests {
use std::sync::Arc;
use super::Db;
use red_knot_python_semantic::lint::{LintRegistry, RuleSelection};
use red_knot_python_semantic::{default_lint_registry, Db as SemanticDb, Program};
use ruff_db::files::{File, Files};
use ruff_db::system::{DbWithTestSystem, System, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
use ruff_db::{Db as SourceDb, Upcast};
#[salsa::db]
#[derive(Clone)]
pub(crate) struct TestDb {
storage: salsa::Storage<Self>,
files: Files,
system: TestSystem,
vendored: VendoredFileSystem,
events: Arc<std::sync::Mutex<Vec<salsa::Event>>>,
rule_selection: Arc<RuleSelection>,
}
#[allow(dead_code)]
impl TestDb {
pub(crate) fn new() -> Self {
Self {
storage: salsa::Storage::default(),
system: TestSystem::default(),
vendored: red_knot_vendored::file_system().clone(),
events: Arc::default(),
files: Files::default(),
rule_selection: Arc::new(RuleSelection::from_registry(default_lint_registry())),
}
}
/// Takes the salsa events.
///
/// ## Panics
/// If there are any pending salsa snapshots.
pub(crate) fn take_salsa_events(&mut self) -> Vec<salsa::Event> {
let inner = Arc::get_mut(&mut self.events).expect("no pending salsa snapshots");
let events = inner.get_mut().unwrap();
std::mem::take(&mut *events)
}
/// Clears the salsa events.
///
/// ## Panics
/// If there are any pending salsa snapshots.
pub(crate) fn clear_salsa_events(&mut self) {
self.take_salsa_events();
}
}
impl DbWithTestSystem for TestDb {
fn test_system(&self) -> &TestSystem {
&self.system
}
fn test_system_mut(&mut self) -> &mut TestSystem {
&mut self.system
}
}
#[salsa::db]
impl SourceDb for TestDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
fn python_version(&self) -> ruff_python_ast::PythonVersion {
Program::get(self).python_version(self)
}
}
impl Upcast<dyn SourceDb> for TestDb {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
fn upcast_mut(&mut self) -> &mut (dyn SourceDb + 'static) {
self
}
}
impl Upcast<dyn SemanticDb> for TestDb {
fn upcast(&self) -> &(dyn SemanticDb + 'static) {
self
}
fn upcast_mut(&mut self) -> &mut dyn SemanticDb {
self
}
}
#[salsa::db]
impl SemanticDb for TestDb {
fn is_file_open(&self, file: File) -> bool {
!file.path(self).is_vendored_path()
}
fn rule_selection(&self) -> Arc<RuleSelection> {
self.rule_selection.clone()
}
fn lint_registry(&self) -> &LintRegistry {
default_lint_registry()
}
}
#[salsa::db]
impl Db for TestDb {}
#[salsa::db]
impl salsa::Database for TestDb {
fn salsa_event(&self, event: &dyn Fn() -> salsa::Event) {
let event = event();
tracing::trace!("event: {event:?}");
let mut events = self.events.lock().unwrap();
events.push(event);
}
}
}

View File

@@ -1,106 +0,0 @@
use ruff_python_ast::visitor::source_order::{SourceOrderVisitor, TraversalSignal};
use ruff_python_ast::AnyNodeRef;
use ruff_text_size::{Ranged, TextRange};
use std::fmt;
use std::fmt::Formatter;
/// Returns the node with a minimal range that fully contains `range`.
///
/// If `range` is empty and falls within a parser *synthesized* node generated during error recovery,
/// then the first node with the given range is returned.
///
/// ## Panics
/// Panics if `range` is not contained within `root`.
pub(crate) fn covering_node(root: AnyNodeRef, range: TextRange) -> CoveringNode {
struct Visitor<'a> {
range: TextRange,
found: bool,
ancestors: Vec<AnyNodeRef<'a>>,
}
impl<'a> SourceOrderVisitor<'a> for Visitor<'a> {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
// If the node fully contains the range, than it is a possible match but traverse into its children
// to see if there's a node with a narrower range.
if !self.found && node.range().contains_range(self.range) {
self.ancestors.push(node);
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn leave_node(&mut self, node: AnyNodeRef<'a>) {
if !self.found && self.ancestors.last() == Some(&node) {
self.found = true;
}
}
}
assert!(
root.range().contains_range(range),
"Range is not contained within root"
);
let mut visitor = Visitor {
range,
found: false,
ancestors: Vec::new(),
};
root.visit_source_order(&mut visitor);
let minimal = visitor.ancestors.pop().unwrap_or(root);
CoveringNode {
node: minimal,
ancestors: visitor.ancestors,
}
}
/// The node with a minimal range that fully contains the search range.
pub(crate) struct CoveringNode<'a> {
/// The node with a minimal range that fully contains the search range.
node: AnyNodeRef<'a>,
/// The node's ancestor (the spine up to the root).
ancestors: Vec<AnyNodeRef<'a>>,
}
impl<'a> CoveringNode<'a> {
pub(crate) fn node(&self) -> AnyNodeRef<'a> {
self.node
}
/// Returns the node's parent.
pub(crate) fn parent(&self) -> Option<AnyNodeRef<'a>> {
self.ancestors.last().copied()
}
/// Finds the minimal node that fully covers the range and fulfills the given predicate.
pub(crate) fn find(mut self, f: impl Fn(AnyNodeRef<'a>) -> bool) -> Result<Self, Self> {
if f(self.node) {
return Ok(self);
}
match self.ancestors.iter().rposition(|node| f(*node)) {
Some(index) => {
let node = self.ancestors[index];
self.ancestors.truncate(index);
Ok(Self {
node,
ancestors: self.ancestors,
})
}
None => Err(self),
}
}
}
impl fmt::Debug for CoveringNode<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_tuple("NodeWithAncestors")
.field(&self.node)
.finish()
}
}

View File

@@ -1,851 +0,0 @@
use crate::find_node::covering_node;
use crate::{Db, HasNavigationTargets, NavigationTargets, RangedValue};
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::{HasType, SemanticModel};
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::{parsed_module, ParsedModule};
use ruff_python_ast::{self as ast, AnyNodeRef};
use ruff_python_parser::TokenKind;
use ruff_text_size::{Ranged, TextRange, TextSize};
pub fn goto_type_definition(
db: &dyn Db,
file: File,
offset: TextSize,
) -> Option<RangedValue<NavigationTargets>> {
let parsed = parsed_module(db.upcast(), file);
let goto_target = find_goto_target(parsed, offset)?;
let model = SemanticModel::new(db.upcast(), file);
let ty = goto_target.inferred_type(&model)?;
tracing::debug!(
"Inferred type of covering node is {}",
ty.display(db.upcast())
);
let navigation_targets = ty.navigation_targets(db);
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: navigation_targets,
})
}
#[derive(Clone, Copy, Debug)]
pub(crate) enum GotoTarget<'a> {
Expression(ast::ExprRef<'a>),
FunctionDef(&'a ast::StmtFunctionDef),
ClassDef(&'a ast::StmtClassDef),
Parameter(&'a ast::Parameter),
Alias(&'a ast::Alias),
/// Go to on the module name of an import from
/// ```py
/// from foo import bar
/// ^^^
/// ```
ImportedModule(&'a ast::StmtImportFrom),
/// Go to on the exception handler variable
/// ```py
/// try: ...
/// except Exception as e: ...
/// ^
/// ```
ExceptVariable(&'a ast::ExceptHandlerExceptHandler),
/// Go to on a keyword argument
/// ```py
/// test(a = 1)
/// ^
/// ```
KeywordArgument(&'a ast::Keyword),
/// Go to on the rest parameter of a pattern match
///
/// ```py
/// match x:
/// case {"a": a, "b": b, **rest}: ...
/// ^^^^
/// ```
PatternMatchRest(&'a ast::PatternMatchMapping),
/// Go to on a keyword argument of a class pattern
///
/// ```py
/// match Point3D(0, 0, 0):
/// case Point3D(x=0, y=0, z=0): ...
/// ^ ^ ^
/// ```
PatternKeywordArgument(&'a ast::PatternKeyword),
/// Go to on a pattern star argument
///
/// ```py
/// match array:
/// case [*args]: ...
/// ^^^^
PatternMatchStarName(&'a ast::PatternMatchStar),
/// Go to on the name of a pattern match as pattern
///
/// ```py
/// match x:
/// case [x] as y: ...
/// ^
PatternMatchAsName(&'a ast::PatternMatchAs),
/// Go to on the name of a type variable
///
/// ```py
/// type Alias[T: int = bool] = list[T]
/// ^
/// ```
TypeParamTypeVarName(&'a ast::TypeParamTypeVar),
/// Go to on the name of a type param spec
///
/// ```py
/// type Alias[**P = [int, str]] = Callable[P, int]
/// ^
/// ```
TypeParamParamSpecName(&'a ast::TypeParamParamSpec),
/// Go to on the name of a type var tuple
///
/// ```py
/// type Alias[*Ts = ()] = tuple[*Ts]
/// ^^
/// ```
TypeParamTypeVarTupleName(&'a ast::TypeParamTypeVarTuple),
NonLocal {
identifier: &'a ast::Identifier,
},
Globals {
identifier: &'a ast::Identifier,
},
}
impl<'db> GotoTarget<'db> {
pub(crate) fn inferred_type(self, model: &SemanticModel<'db>) -> Option<Type<'db>> {
let ty = match self {
GotoTarget::Expression(expression) => expression.inferred_type(model),
GotoTarget::FunctionDef(function) => function.inferred_type(model),
GotoTarget::ClassDef(class) => class.inferred_type(model),
GotoTarget::Parameter(parameter) => parameter.inferred_type(model),
GotoTarget::Alias(alias) => alias.inferred_type(model),
GotoTarget::ExceptVariable(except) => except.inferred_type(model),
GotoTarget::KeywordArgument(argument) => {
// TODO: Pyright resolves the declared type of the matching parameter. This seems more accurate
// than using the inferred value.
argument.value.inferred_type(model)
}
// TODO: Support identifier targets
GotoTarget::PatternMatchRest(_)
| GotoTarget::PatternKeywordArgument(_)
| GotoTarget::PatternMatchStarName(_)
| GotoTarget::PatternMatchAsName(_)
| GotoTarget::ImportedModule(_)
| GotoTarget::TypeParamTypeVarName(_)
| GotoTarget::TypeParamParamSpecName(_)
| GotoTarget::TypeParamTypeVarTupleName(_)
| GotoTarget::NonLocal { .. }
| GotoTarget::Globals { .. } => return None,
};
Some(ty)
}
}
impl Ranged for GotoTarget<'_> {
fn range(&self) -> TextRange {
match self {
GotoTarget::Expression(expression) => expression.range(),
GotoTarget::FunctionDef(function) => function.name.range,
GotoTarget::ClassDef(class) => class.name.range,
GotoTarget::Parameter(parameter) => parameter.name.range,
GotoTarget::Alias(alias) => alias.name.range,
GotoTarget::ImportedModule(module) => module.module.as_ref().unwrap().range,
GotoTarget::ExceptVariable(except) => except.name.as_ref().unwrap().range,
GotoTarget::KeywordArgument(keyword) => keyword.arg.as_ref().unwrap().range,
GotoTarget::PatternMatchRest(rest) => rest.rest.as_ref().unwrap().range,
GotoTarget::PatternKeywordArgument(keyword) => keyword.attr.range,
GotoTarget::PatternMatchStarName(star) => star.name.as_ref().unwrap().range,
GotoTarget::PatternMatchAsName(as_name) => as_name.name.as_ref().unwrap().range,
GotoTarget::TypeParamTypeVarName(type_var) => type_var.name.range,
GotoTarget::TypeParamParamSpecName(spec) => spec.name.range,
GotoTarget::TypeParamTypeVarTupleName(tuple) => tuple.name.range,
GotoTarget::NonLocal { identifier, .. } => identifier.range,
GotoTarget::Globals { identifier, .. } => identifier.range,
}
}
}
pub(crate) fn find_goto_target(parsed: &ParsedModule, offset: TextSize) -> Option<GotoTarget> {
let token = parsed
.tokens()
.at_offset(offset)
.max_by_key(|token| match token.kind() {
TokenKind::Name
| TokenKind::String
| TokenKind::Complex
| TokenKind::Float
| TokenKind::Int => 1,
_ => 0,
})?;
let covering_node = covering_node(parsed.syntax().into(), token.range())
.find(|node| node.is_identifier() || node.is_expression())
.ok()?;
tracing::trace!("Covering node is of kind {:?}", covering_node.node().kind());
match covering_node.node() {
AnyNodeRef::Identifier(identifier) => match covering_node.parent() {
Some(AnyNodeRef::StmtFunctionDef(function)) => Some(GotoTarget::FunctionDef(function)),
Some(AnyNodeRef::StmtClassDef(class)) => Some(GotoTarget::ClassDef(class)),
Some(AnyNodeRef::Parameter(parameter)) => Some(GotoTarget::Parameter(parameter)),
Some(AnyNodeRef::Alias(alias)) => Some(GotoTarget::Alias(alias)),
Some(AnyNodeRef::StmtImportFrom(from)) => Some(GotoTarget::ImportedModule(from)),
Some(AnyNodeRef::ExceptHandlerExceptHandler(handler)) => {
Some(GotoTarget::ExceptVariable(handler))
}
Some(AnyNodeRef::Keyword(keyword)) => Some(GotoTarget::KeywordArgument(keyword)),
Some(AnyNodeRef::PatternMatchMapping(mapping)) => {
Some(GotoTarget::PatternMatchRest(mapping))
}
Some(AnyNodeRef::PatternKeyword(keyword)) => {
Some(GotoTarget::PatternKeywordArgument(keyword))
}
Some(AnyNodeRef::PatternMatchStar(star)) => {
Some(GotoTarget::PatternMatchStarName(star))
}
Some(AnyNodeRef::PatternMatchAs(as_pattern)) => {
Some(GotoTarget::PatternMatchAsName(as_pattern))
}
Some(AnyNodeRef::TypeParamTypeVar(var)) => Some(GotoTarget::TypeParamTypeVarName(var)),
Some(AnyNodeRef::TypeParamParamSpec(bound)) => {
Some(GotoTarget::TypeParamParamSpecName(bound))
}
Some(AnyNodeRef::TypeParamTypeVarTuple(var_tuple)) => {
Some(GotoTarget::TypeParamTypeVarTupleName(var_tuple))
}
Some(AnyNodeRef::ExprAttribute(attribute)) => {
Some(GotoTarget::Expression(attribute.into()))
}
Some(AnyNodeRef::StmtNonlocal(_)) => Some(GotoTarget::NonLocal { identifier }),
Some(AnyNodeRef::StmtGlobal(_)) => Some(GotoTarget::Globals { identifier }),
None => None,
Some(parent) => {
tracing::debug!(
"Missing `GoToTarget` for identifier with parent {:?}",
parent.kind()
);
None
}
},
node => node.as_expr_ref().map(GotoTarget::Expression),
}
}
#[cfg(test)]
mod tests {
use crate::tests::{cursor_test, CursorTest, IntoDiagnostic};
use crate::{goto_type_definition, NavigationTarget};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticId, LintName, Severity, Span, SubDiagnostic,
};
use ruff_db::files::FileRange;
use ruff_text_size::Ranged;
#[test]
fn goto_type_of_expression_with_class_type() {
let test = cursor_test(
r#"
class Test: ...
a<CURSOR>b = Test()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:19
|
2 | class Test: ...
| ^^^^
3 |
4 | ab = Test()
|
info: Source
--> /main.py:4:13
|
2 | class Test: ...
3 |
4 | ab = Test()
| ^^
|
"###);
}
#[test]
fn goto_type_of_expression_with_function_type() {
let test = cursor_test(
r#"
def foo(a, b): ...
ab = foo
a<CURSOR>b
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:17
|
2 | def foo(a, b): ...
| ^^^
3 |
4 | ab = foo
|
info: Source
--> /main.py:6:13
|
4 | ab = foo
5 |
6 | ab
| ^^
|
"###);
}
#[test]
fn goto_type_of_expression_with_union_type() {
let test = cursor_test(
r#"
def foo(a, b): ...
def bar(a, b): ...
if random.choice():
a = foo
else:
a = bar
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> /main.py:3:17
|
3 | def foo(a, b): ...
| ^^^
4 |
5 | def bar(a, b): ...
|
info: Source
--> /main.py:12:13
|
10 | a = bar
11 |
12 | a
| ^
|
info: lint:goto-type-definition: Type definition
--> /main.py:5:17
|
3 | def foo(a, b): ...
4 |
5 | def bar(a, b): ...
| ^^^
6 |
7 | if random.choice():
|
info: Source
--> /main.py:12:13
|
10 | a = bar
11 |
12 | a
| ^
|
");
}
#[test]
fn goto_type_of_expression_with_module() {
let mut test = cursor_test(
r#"
import lib
lib<CURSOR>
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> /lib.py:1:1
|
1 | a = 10
| ^^^^^^
|
info: Source
--> /main.py:4:13
|
2 | import lib
3 |
4 | lib
| ^^^
|
");
}
#[test]
fn goto_type_of_expression_with_literal_type() {
let test = cursor_test(
r#"
a: str = "test"
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:4:13
|
2 | a: str = "test"
3 |
4 | a
| ^
|
"#);
}
#[test]
fn goto_type_of_expression_with_literal_node() {
let test = cursor_test(
r#"
a: str = "te<CURSOR>st"
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:2:22
|
2 | a: str = "test"
| ^^^^^^
|
"#);
}
#[test]
fn goto_type_of_expression_with_type_var_type() {
let test = cursor_test(
r#"
type Alias[T: int = bool] = list[T<CURSOR>]
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:24
|
2 | type Alias[T: int = bool] = list[T]
| ^
|
info: Source
--> /main.py:2:46
|
2 | type Alias[T: int = bool] = list[T]
| ^
|
"###);
}
#[test]
fn goto_type_of_expression_with_type_param_spec() {
let test = cursor_test(
r#"
type Alias[**P = [int, str]] = Callable[P<CURSOR>, int]
"#,
);
// TODO: Goto type definition currently doesn't work for type param specs
// because the inference doesn't support them yet.
// This snapshot should show a single target pointing to `T`
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_of_expression_with_type_var_tuple() {
let test = cursor_test(
r#"
type Alias[*Ts = ()] = tuple[*Ts<CURSOR>]
"#,
);
// TODO: Goto type definition currently doesn't work for type var tuples
// because the inference doesn't support them yet.
// This snapshot should show a single target pointing to `T`
assert_snapshot!(test.goto_type_definition(), @"No type definitions found");
}
#[test]
fn goto_type_on_keyword_argument() {
let test = cursor_test(
r#"
def test(a: str): ...
test(a<CURSOR>= "123")
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:4:18
|
2 | def test(a: str): ...
3 |
4 | test(a= "123")
| ^
|
"#);
}
#[test]
fn goto_type_on_incorrectly_typed_keyword_argument() {
let test = cursor_test(
r#"
def test(a: str): ...
test(a<CURSOR>= 123)
"#,
);
// TODO: This should jump to `str` and not `int` because
// the keyword is typed as a string. It's only the passed argument that
// is an int. Navigating to `str` would match pyright's behavior.
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:231:7
|
229 | _LiteralInteger = _PositiveInteger | _NegativeInteger | Literal[0] # noqa: Y026 # TODO: Use TypeAlias once mypy bugs are fixed
230 |
231 | class int:
| ^^^
232 | @overload
233 | def __new__(cls, x: ConvertibleToInt = ..., /) -> Self: ...
|
info: Source
--> /main.py:4:18
|
2 | def test(a: str): ...
3 |
4 | test(a= 123)
| ^
|
");
}
#[test]
fn goto_type_on_kwargs() {
let test = cursor_test(
r#"
def f(name: str): ...
kwargs = { "name": "test"}
f(**kwargs<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r#"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:1086:7
|
1084 | def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
1085 |
1086 | class dict(MutableMapping[_KT, _VT]):
| ^^^^
1087 | # __init__ should be kept roughly in line with `collections.UserDict.__init__`, which has similar semantics
1088 | # Also multiprocessing.managers.SyncManager.dict()
|
info: Source
--> /main.py:6:5
|
4 | kwargs = { "name": "test"}
5 |
6 | f(**kwargs)
| ^^^^^^
|
"#);
}
#[test]
fn goto_type_of_expression_with_builtin() {
let test = cursor_test(
r#"
def foo(a: str):
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:3:17
|
2 | def foo(a: str):
3 | a
| ^
|
");
}
#[test]
fn goto_type_definition_cursor_between_object_and_attribute() {
let test = cursor_test(
r#"
class X:
def foo(a, b): ...
x = X()
x<CURSOR>.foo()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:19
|
2 | class X:
| ^
3 | def foo(a, b): ...
|
info: Source
--> /main.py:7:13
|
5 | x = X()
6 |
7 | x.foo()
| ^
|
"###);
}
#[test]
fn goto_between_call_arguments() {
let test = cursor_test(
r#"
def foo(a, b): ...
foo<CURSOR>()
"#,
);
assert_snapshot!(test.goto_type_definition(), @r###"
info: lint:goto-type-definition: Type definition
--> /main.py:2:17
|
2 | def foo(a, b): ...
| ^^^
3 |
4 | foo()
|
info: Source
--> /main.py:4:13
|
2 | def foo(a, b): ...
3 |
4 | foo()
| ^^^
|
"###);
}
#[test]
fn goto_type_narrowing() {
let test = cursor_test(
r#"
def foo(a: str | None, b):
if a is not None:
print(a<CURSOR>)
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:4:27
|
2 | def foo(a: str | None, b):
3 | if a is not None:
4 | print(a)
| ^
|
");
}
#[test]
fn goto_type_none() {
let test = cursor_test(
r#"
def foo(a: str | None, b):
a<CURSOR>
"#,
);
assert_snapshot!(test.goto_type_definition(), @r"
info: lint:goto-type-definition: Type definition
--> stdlib/types.pyi:671:11
|
669 | if sys.version_info >= (3, 10):
670 | @final
671 | class NoneType:
| ^^^^^^^^
672 | def __bool__(self) -> Literal[False]: ...
|
info: Source
--> /main.py:3:17
|
2 | def foo(a: str | None, b):
3 | a
| ^
|
info: lint:goto-type-definition: Type definition
--> stdlib/builtins.pyi:438:7
|
436 | def __getitem__(self, key: int, /) -> str | int | None: ...
437 |
438 | class str(Sequence[str]):
| ^^^
439 | @overload
440 | def __new__(cls, object: object = ...) -> Self: ...
|
info: Source
--> /main.py:3:17
|
2 | def foo(a: str | None, b):
3 | a
| ^
|
");
}
impl CursorTest {
fn goto_type_definition(&self) -> String {
let Some(targets) = goto_type_definition(&self.db, self.file, self.cursor_offset)
else {
return "No goto target found".to_string();
};
if targets.is_empty() {
return "No type definitions found".to_string();
}
let source = targets.range;
self.render_diagnostics(
targets
.into_iter()
.map(|target| GotoTypeDefinitionDiagnostic::new(source, &target)),
)
}
}
struct GotoTypeDefinitionDiagnostic {
source: FileRange,
target: FileRange,
}
impl GotoTypeDefinitionDiagnostic {
fn new(source: FileRange, target: &NavigationTarget) -> Self {
Self {
source,
target: FileRange::new(target.file(), target.focus_range()),
}
}
}
impl IntoDiagnostic for GotoTypeDefinitionDiagnostic {
fn into_diagnostic(self) -> Diagnostic {
let mut source = SubDiagnostic::new(Severity::Info, "Source");
source.annotate(Annotation::primary(
Span::from(self.source.file()).with_range(self.source.range()),
));
let mut main = Diagnostic::new(
DiagnosticId::Lint(LintName::of("goto-type-definition")),
Severity::Info,
"Type definition".to_string(),
);
main.annotate(Annotation::primary(
Span::from(self.target.file()).with_range(self.target.range()),
));
main.sub(source);
main
}
}
}

View File

@@ -1,602 +0,0 @@
use crate::goto::{find_goto_target, GotoTarget};
use crate::{Db, MarkupKind, RangedValue};
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::SemanticModel;
use ruff_db::files::{File, FileRange};
use ruff_db::parsed::parsed_module;
use ruff_text_size::{Ranged, TextSize};
use std::fmt;
use std::fmt::Formatter;
pub fn hover(db: &dyn Db, file: File, offset: TextSize) -> Option<RangedValue<Hover>> {
let parsed = parsed_module(db.upcast(), file);
let goto_target = find_goto_target(parsed, offset)?;
if let GotoTarget::Expression(expr) = goto_target {
if expr.is_literal_expr() {
return None;
}
}
let model = SemanticModel::new(db.upcast(), file);
let ty = goto_target.inferred_type(&model)?;
tracing::debug!(
"Inferred type of covering node is {}",
ty.display(db.upcast())
);
// TODO: Add documentation of the symbol (not the type's definition).
// TODO: Render the symbol's signature instead of just its type.
let contents = vec![HoverContent::Type(ty)];
Some(RangedValue {
range: FileRange::new(file, goto_target.range()),
value: Hover { contents },
})
}
pub struct Hover<'db> {
contents: Vec<HoverContent<'db>>,
}
impl<'db> Hover<'db> {
/// Renders the hover to a string using the specified markup kind.
pub const fn display<'a>(&'a self, db: &'a dyn Db, kind: MarkupKind) -> DisplayHover<'a> {
DisplayHover {
db,
hover: self,
kind,
}
}
fn iter(&self) -> std::slice::Iter<'_, HoverContent<'db>> {
self.contents.iter()
}
}
impl<'db> IntoIterator for Hover<'db> {
type Item = HoverContent<'db>;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.contents.into_iter()
}
}
impl<'a, 'db> IntoIterator for &'a Hover<'db> {
type Item = &'a HoverContent<'db>;
type IntoIter = std::slice::Iter<'a, HoverContent<'db>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct DisplayHover<'a> {
db: &'a dyn Db,
hover: &'a Hover<'a>,
kind: MarkupKind,
}
impl fmt::Display for DisplayHover<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let mut first = true;
for content in &self.hover.contents {
if !first {
self.kind.horizontal_line().fmt(f)?;
}
content.display(self.db, self.kind).fmt(f)?;
first = false;
}
Ok(())
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HoverContent<'db> {
Type(Type<'db>),
}
impl<'db> HoverContent<'db> {
fn display(&self, db: &'db dyn Db, kind: MarkupKind) -> DisplayHoverContent<'_, 'db> {
DisplayHoverContent {
db,
content: self,
kind,
}
}
}
pub(crate) struct DisplayHoverContent<'a, 'db> {
db: &'db dyn Db,
content: &'a HoverContent<'db>,
kind: MarkupKind,
}
impl fmt::Display for DisplayHoverContent<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.content {
HoverContent::Type(ty) => self
.kind
.fenced_code_block(ty.display(self.db.upcast()), "text")
.fmt(f),
}
}
}
#[cfg(test)]
mod tests {
use crate::tests::{cursor_test, CursorTest};
use crate::{hover, MarkupKind};
use insta::assert_snapshot;
use ruff_db::diagnostic::{
Annotation, Diagnostic, DiagnosticFormat, DiagnosticId, DisplayDiagnosticConfig, LintName,
Severity, Span,
};
use ruff_text_size::{Ranged, TextRange};
#[test]
fn hover_basic() {
let test = cursor_test(
r#"
a = 10
a<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r"
Literal[10]
---------------------------------------------
```text
Literal[10]
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:9
|
2 | a = 10
3 |
4 | a
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_member() {
let test = cursor_test(
r#"
class Foo:
a: int = 10
def __init__(a: int, b: str):
self.a = a
self.b: str = b
foo = Foo()
foo.<CURSOR>a
"#,
);
assert_snapshot!(test.hover(), @r"
int
---------------------------------------------
```text
int
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:10:9
|
9 | foo = Foo()
10 | foo.a
| ^^^^-
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_function_typed_variable() {
let test = cursor_test(
r#"
def foo(a, b): ...
foo<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r###"
def foo(a, b) -> Unknown
---------------------------------------------
```text
def foo(a, b) -> Unknown
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:13
|
2 | def foo(a, b): ...
3 |
4 | foo
| ^^^- Cursor offset
| |
| source
|
"###);
}
#[test]
fn hover_binary_expression() {
let test = cursor_test(
r#"
def foo(a: int, b: int, c: int):
a + b ==<CURSOR> c
"#,
);
assert_snapshot!(test.hover(), @r"
bool
---------------------------------------------
```text
bool
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:3:17
|
2 | def foo(a: int, b: int, c: int):
3 | a + b == c
| ^^^^^^^^-^
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_keyword_parameter() {
let test = cursor_test(
r#"
def test(a: int): ...
test(a<CURSOR>= 123)
"#,
);
// TODO: This should reveal `int` because the user hovers over the parameter and not the value.
assert_snapshot!(test.hover(), @r"
Literal[123]
---------------------------------------------
```text
Literal[123]
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:18
|
2 | def test(a: int): ...
3 |
4 | test(a= 123)
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_union() {
let test = cursor_test(
r#"
def foo(a, b): ...
def bar(a, b): ...
if random.choice([True, False]):
a = foo
else:
a = bar
a<CURSOR>
"#,
);
assert_snapshot!(test.hover(), @r###"
(def foo(a, b) -> Unknown) | (def bar(a, b) -> Unknown)
---------------------------------------------
```text
(def foo(a, b) -> Unknown) | (def bar(a, b) -> Unknown)
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:12:13
|
10 | a = bar
11 |
12 | a
| ^- Cursor offset
| |
| source
|
"###);
}
#[test]
fn hover_module() {
let mut test = cursor_test(
r#"
import lib
li<CURSOR>b
"#,
);
test.write_file("lib.py", "a = 10").unwrap();
assert_snapshot!(test.hover(), @r"
<module 'lib'>
---------------------------------------------
```text
<module 'lib'>
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:13
|
2 | import lib
3 |
4 | lib
| ^^-
| | |
| | Cursor offset
| source
|
");
}
#[test]
fn hover_type_of_expression_with_type_var_type() {
let test = cursor_test(
r#"
type Alias[T: int = bool] = list[T<CURSOR>]
"#,
);
assert_snapshot!(test.hover(), @r"
T
---------------------------------------------
```text
T
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:2:46
|
2 | type Alias[T: int = bool] = list[T]
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_type_of_expression_with_type_param_spec() {
let test = cursor_test(
r#"
type Alias[**P = [int, str]] = Callable[P<CURSOR>, int]
"#,
);
assert_snapshot!(test.hover(), @r"
@Todo
---------------------------------------------
```text
@Todo
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:2:53
|
2 | type Alias[**P = [int, str]] = Callable[P, int]
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_type_of_expression_with_type_var_tuple() {
let test = cursor_test(
r#"
type Alias[*Ts = ()] = tuple[*Ts<CURSOR>]
"#,
);
assert_snapshot!(test.hover(), @r"
@Todo
---------------------------------------------
```text
@Todo
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:2:43
|
2 | type Alias[*Ts = ()] = tuple[*Ts]
| ^^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_class_member_declaration() {
let test = cursor_test(
r#"
class Foo:
a<CURSOR>: int
"#,
);
// TODO: This should be int and not `Never`, https://github.com/astral-sh/ruff/issues/17122
assert_snapshot!(test.hover(), @r"
Never
---------------------------------------------
```text
Never
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:3:13
|
2 | class Foo:
3 | a: int
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_type_narrowing() {
let test = cursor_test(
r#"
def foo(a: str | None, b):
if a is not None:
print(a<CURSOR>)
"#,
);
assert_snapshot!(test.hover(), @r"
str
---------------------------------------------
```text
str
```
---------------------------------------------
info: lint:hover: Hovered content is
--> /main.py:4:27
|
2 | def foo(a: str | None, b):
3 | if a is not None:
4 | print(a)
| ^- Cursor offset
| |
| source
|
");
}
#[test]
fn hover_whitespace() {
let test = cursor_test(
r#"
class C:
<CURSOR>
foo: str = 'bar'
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_literal_int() {
let test = cursor_test(
r#"
print(
0 + 1<CURSOR>
)
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_literal_ellipsis() {
let test = cursor_test(
r#"
print(
.<CURSOR>..
)
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
#[test]
fn hover_docstring() {
let test = cursor_test(
r#"
def f():
"""Lorem ipsum dolor sit amet.<CURSOR>"""
"#,
);
assert_snapshot!(test.hover(), @"Hover provided no content");
}
impl CursorTest {
fn hover(&self) -> String {
use std::fmt::Write;
let Some(hover) = hover(&self.db, self.file, self.cursor_offset) else {
return "Hover provided no content".to_string();
};
let source = hover.range;
let mut buf = String::new();
write!(
&mut buf,
"{plaintext}{line}{markdown}{line}",
plaintext = hover.display(&self.db, MarkupKind::PlainText),
line = MarkupKind::PlainText.horizontal_line(),
markdown = hover.display(&self.db, MarkupKind::Markdown),
)
.unwrap();
let config = DisplayDiagnosticConfig::default()
.color(false)
.format(DiagnosticFormat::Full);
let mut diagnostic = Diagnostic::new(
DiagnosticId::Lint(LintName::of("hover")),
Severity::Info,
"Hovered content is",
);
diagnostic.annotate(
Annotation::primary(Span::from(source.file()).with_range(source.range()))
.message("source"),
);
diagnostic.annotate(
Annotation::secondary(
Span::from(source.file()).with_range(TextRange::empty(self.cursor_offset)),
)
.message("Cursor offset"),
);
write!(buf, "{}", diagnostic.display(&self.db, &config)).unwrap();
buf
}
}
}

View File

@@ -1,279 +0,0 @@
use crate::Db;
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::{HasType, SemanticModel};
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::visitor::source_order::{self, SourceOrderVisitor, TraversalSignal};
use ruff_python_ast::{AnyNodeRef, Expr, Stmt};
use ruff_text_size::{Ranged, TextRange, TextSize};
use std::fmt;
use std::fmt::Formatter;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct InlayHint<'db> {
pub position: TextSize,
pub content: InlayHintContent<'db>,
}
impl<'db> InlayHint<'db> {
pub const fn display(&self, db: &'db dyn Db) -> DisplayInlayHint<'_, 'db> {
self.content.display(db)
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum InlayHintContent<'db> {
Type(Type<'db>),
ReturnType(Type<'db>),
}
impl<'db> InlayHintContent<'db> {
pub const fn display(&self, db: &'db dyn Db) -> DisplayInlayHint<'_, 'db> {
DisplayInlayHint { db, hint: self }
}
}
pub struct DisplayInlayHint<'a, 'db> {
db: &'db dyn Db,
hint: &'a InlayHintContent<'db>,
}
impl fmt::Display for DisplayInlayHint<'_, '_> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.hint {
InlayHintContent::Type(ty) => {
write!(f, ": {}", ty.display(self.db.upcast()))
}
InlayHintContent::ReturnType(ty) => {
write!(f, " -> {}", ty.display(self.db.upcast()))
}
}
}
}
pub fn inlay_hints(db: &dyn Db, file: File, range: TextRange) -> Vec<InlayHint<'_>> {
let mut visitor = InlayHintVisitor::new(db, file, range);
let ast = parsed_module(db.upcast(), file);
visitor.visit_body(ast.suite());
visitor.hints
}
struct InlayHintVisitor<'db> {
model: SemanticModel<'db>,
hints: Vec<InlayHint<'db>>,
in_assignment: bool,
range: TextRange,
}
impl<'db> InlayHintVisitor<'db> {
fn new(db: &'db dyn Db, file: File, range: TextRange) -> Self {
Self {
model: SemanticModel::new(db.upcast(), file),
hints: Vec::new(),
in_assignment: false,
range,
}
}
fn add_type_hint(&mut self, position: TextSize, ty: Type<'db>) {
self.hints.push(InlayHint {
position,
content: InlayHintContent::Type(ty),
});
}
}
impl SourceOrderVisitor<'_> for InlayHintVisitor<'_> {
fn enter_node(&mut self, node: AnyNodeRef<'_>) -> TraversalSignal {
if self.range.intersect(node.range()).is_some() {
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn visit_stmt(&mut self, stmt: &Stmt) {
let node = AnyNodeRef::from(stmt);
if !self.enter_node(node).is_traverse() {
return;
}
match stmt {
Stmt::Assign(assign) => {
self.in_assignment = true;
for target in &assign.targets {
self.visit_expr(target);
}
self.in_assignment = false;
return;
}
// TODO
Stmt::FunctionDef(_) => {}
Stmt::For(_) => {}
Stmt::Expr(_) => {
// Don't traverse into expression statements because we don't show any hints.
return;
}
_ => {}
}
source_order::walk_stmt(self, stmt);
}
fn visit_expr(&mut self, expr: &'_ Expr) {
if !self.in_assignment {
return;
}
match expr {
Expr::Name(name) => {
if name.ctx.is_store() {
let ty = expr.inferred_type(&self.model);
self.add_type_hint(expr.range().end(), ty);
}
}
_ => {
source_order::walk_expr(self, expr);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use insta::assert_snapshot;
use ruff_db::{
files::{system_path_to_file, File},
source::source_text,
};
use ruff_text_size::TextSize;
use crate::db::tests::TestDb;
use red_knot_python_semantic::{
Program, ProgramSettings, PythonPath, PythonPlatform, SearchPathSettings,
};
use ruff_db::system::{DbWithWritableSystem, SystemPathBuf};
use ruff_python_ast::PythonVersion;
pub(super) fn inlay_hint_test(source: &str) -> InlayHintTest {
const START: &str = "<START>";
const END: &str = "<END>";
let mut db = TestDb::new();
let start = source.find(START);
let end = source
.find(END)
.map(|x| if start.is_some() { x - START.len() } else { x })
.unwrap_or(source.len());
let range = TextRange::new(
TextSize::try_from(start.unwrap_or_default()).unwrap(),
TextSize::try_from(end).unwrap(),
);
let source = source.replace(START, "");
let source = source.replace(END, "");
db.write_file("main.py", source)
.expect("write to memory file system to be successful");
let file = system_path_to_file(&db, "main.py").expect("newly written file to existing");
Program::from_settings(
&db,
ProgramSettings {
python_version: PythonVersion::latest(),
python_platform: PythonPlatform::default(),
search_paths: SearchPathSettings {
extra_paths: vec![],
src_roots: vec![SystemPathBuf::from("/")],
custom_typeshed: None,
python_path: PythonPath::KnownSitePackages(vec![]),
},
},
)
.expect("Default settings to be valid");
InlayHintTest { db, file, range }
}
pub(super) struct InlayHintTest {
pub(super) db: TestDb,
pub(super) file: File,
pub(super) range: TextRange,
}
impl InlayHintTest {
fn inlay_hints(&self) -> String {
let hints = inlay_hints(&self.db, self.file, self.range);
let mut buf = source_text(&self.db, self.file).as_str().to_string();
let mut offset = 0;
for hint in hints {
let end_position = (hint.position.to_u32() as usize) + offset;
let hint_str = format!("[{}]", hint.display(&self.db));
buf.insert_str(end_position, &hint_str);
offset += hint_str.len();
}
buf
}
}
#[test]
fn test_assign_statement() {
let test = inlay_hint_test("x = 1");
assert_snapshot!(test.inlay_hints(), @r"
x[: Literal[1]] = 1
");
}
#[test]
fn test_tuple_assignment() {
let test = inlay_hint_test("x, y = (1, 'abc')");
assert_snapshot!(test.inlay_hints(), @r#"
x[: Literal[1]], y[: Literal["abc"]] = (1, 'abc')
"#);
}
#[test]
fn test_nested_tuple_assignment() {
let test = inlay_hint_test("x, (y, z) = (1, ('abc', 2))");
assert_snapshot!(test.inlay_hints(), @r#"
x[: Literal[1]], (y[: Literal["abc"]], z[: Literal[2]]) = (1, ('abc', 2))
"#);
}
#[test]
fn test_assign_statement_with_type_annotation() {
let test = inlay_hint_test("x: int = 1");
assert_snapshot!(test.inlay_hints(), @r"
x: int = 1
");
}
#[test]
fn test_assign_statement_out_of_range() {
let test = inlay_hint_test("<START>x = 1<END>\ny = 2");
assert_snapshot!(test.inlay_hints(), @r"
x[: Literal[1]] = 1
y = 2
");
}
}

View File

@@ -1,296 +0,0 @@
mod db;
mod find_node;
mod goto;
mod hover;
mod inlay_hints;
mod markup;
pub use db::Db;
pub use goto::goto_type_definition;
pub use hover::hover;
pub use inlay_hints::inlay_hints;
pub use markup::MarkupKind;
use rustc_hash::FxHashSet;
use std::ops::{Deref, DerefMut};
use red_knot_python_semantic::types::{Type, TypeDefinition};
use ruff_db::files::{File, FileRange};
use ruff_text_size::{Ranged, TextRange};
/// Information associated with a text range.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct RangedValue<T> {
pub range: FileRange,
pub value: T,
}
impl<T> RangedValue<T> {
pub fn file_range(&self) -> FileRange {
self.range
}
}
impl<T> Deref for RangedValue<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> DerefMut for RangedValue<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}
impl<T> IntoIterator for RangedValue<T>
where
T: IntoIterator,
{
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
/// Target to which the editor can navigate to.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct NavigationTarget {
file: File,
/// The range that should be focused when navigating to the target.
///
/// This is typically not the full range of the node. For example, it's the range of the class's name in a class definition.
///
/// The `focus_range` must be fully covered by `full_range`.
focus_range: TextRange,
/// The range covering the entire target.
full_range: TextRange,
}
impl NavigationTarget {
pub fn file(&self) -> File {
self.file
}
pub fn focus_range(&self) -> TextRange {
self.focus_range
}
pub fn full_range(&self) -> TextRange {
self.full_range
}
}
#[derive(Debug, Clone)]
pub struct NavigationTargets(smallvec::SmallVec<[NavigationTarget; 1]>);
impl NavigationTargets {
fn single(target: NavigationTarget) -> Self {
Self(smallvec::smallvec![target])
}
fn empty() -> Self {
Self(smallvec::SmallVec::new())
}
fn unique(targets: impl IntoIterator<Item = NavigationTarget>) -> Self {
let unique: FxHashSet<_> = targets.into_iter().collect();
if unique.is_empty() {
Self::empty()
} else {
let mut targets = unique.into_iter().collect::<Vec<_>>();
targets.sort_by_key(|target| (target.file, target.focus_range.start()));
Self(targets.into())
}
}
fn iter(&self) -> std::slice::Iter<'_, NavigationTarget> {
self.0.iter()
}
#[cfg(test)]
fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl IntoIterator for NavigationTargets {
type Item = NavigationTarget;
type IntoIter = smallvec::IntoIter<[NavigationTarget; 1]>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a> IntoIterator for &'a NavigationTargets {
type Item = &'a NavigationTarget;
type IntoIter = std::slice::Iter<'a, NavigationTarget>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl FromIterator<NavigationTarget> for NavigationTargets {
fn from_iter<T: IntoIterator<Item = NavigationTarget>>(iter: T) -> Self {
Self::unique(iter)
}
}
pub trait HasNavigationTargets {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets;
}
impl HasNavigationTargets for Type<'_> {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets {
match self {
Type::Union(union) => union
.iter(db.upcast())
.flat_map(|target| target.navigation_targets(db))
.collect(),
Type::Intersection(intersection) => {
// Only consider the positive elements because the negative elements are mainly from narrowing constraints.
let mut targets = intersection
.iter_positive(db.upcast())
.filter(|ty| !ty.is_unknown());
let Some(first) = targets.next() else {
return NavigationTargets::empty();
};
match targets.next() {
Some(_) => {
// If there are multiple types in the intersection, we can't navigate to a single one
// because the type is the intersection of all those types.
NavigationTargets::empty()
}
None => first.navigation_targets(db),
}
}
ty => ty
.definition(db.upcast())
.map(|definition| definition.navigation_targets(db))
.unwrap_or_else(NavigationTargets::empty),
}
}
}
impl HasNavigationTargets for TypeDefinition<'_> {
fn navigation_targets(&self, db: &dyn Db) -> NavigationTargets {
let full_range = self.full_range(db.upcast());
NavigationTargets::single(NavigationTarget {
file: full_range.file(),
focus_range: self.focus_range(db.upcast()).unwrap_or(full_range).range(),
full_range: full_range.range(),
})
}
}
#[cfg(test)]
mod tests {
use crate::db::tests::TestDb;
use insta::internals::SettingsBindDropGuard;
use red_knot_python_semantic::{
Program, ProgramSettings, PythonPath, PythonPlatform, SearchPathSettings,
};
use ruff_db::diagnostic::{Diagnostic, DiagnosticFormat, DisplayDiagnosticConfig};
use ruff_db::files::{system_path_to_file, File};
use ruff_db::system::{DbWithWritableSystem, SystemPath, SystemPathBuf};
use ruff_python_ast::PythonVersion;
use ruff_text_size::TextSize;
pub(super) fn cursor_test(source: &str) -> CursorTest {
let mut db = TestDb::new();
let cursor_offset = source.find("<CURSOR>").expect(
"`source`` should contain a `<CURSOR>` marker, indicating the position of the cursor.",
);
let mut content = source[..cursor_offset].to_string();
content.push_str(&source[cursor_offset + "<CURSOR>".len()..]);
db.write_file("main.py", &content)
.expect("write to memory file system to be successful");
let file = system_path_to_file(&db, "main.py").expect("newly written file to existing");
Program::from_settings(
&db,
ProgramSettings {
python_version: PythonVersion::latest(),
python_platform: PythonPlatform::default(),
search_paths: SearchPathSettings {
extra_paths: vec![],
src_roots: vec![SystemPathBuf::from("/")],
custom_typeshed: None,
python_path: PythonPath::KnownSitePackages(vec![]),
},
},
)
.expect("Default settings to be valid");
let mut insta_settings = insta::Settings::clone_current();
insta_settings.add_filter(r#"\\(\w\w|\s|\.|")"#, "/$1");
// Filter out TODO types because they are different between debug and release builds.
insta_settings.add_filter(r"@Todo\(.+\)", "@Todo");
let insta_settings_guard = insta_settings.bind_to_scope();
CursorTest {
db,
cursor_offset: TextSize::try_from(cursor_offset)
.expect("source to be smaller than 4GB"),
file,
_insta_settings_guard: insta_settings_guard,
}
}
pub(super) struct CursorTest {
pub(super) db: TestDb,
pub(super) cursor_offset: TextSize,
pub(super) file: File,
_insta_settings_guard: SettingsBindDropGuard,
}
impl CursorTest {
pub(super) fn write_file(
&mut self,
path: impl AsRef<SystemPath>,
content: &str,
) -> std::io::Result<()> {
self.db.write_file(path, content)
}
pub(super) fn render_diagnostics<I, D>(&self, diagnostics: I) -> String
where
I: IntoIterator<Item = D>,
D: IntoDiagnostic,
{
use std::fmt::Write;
let mut buf = String::new();
let config = DisplayDiagnosticConfig::default()
.color(false)
.format(DiagnosticFormat::Full);
for diagnostic in diagnostics {
let diag = diagnostic.into_diagnostic();
write!(buf, "{}", diag.display(&self.db, &config)).unwrap();
}
buf
}
}
pub(super) trait IntoDiagnostic {
fn into_diagnostic(self) -> Diagnostic;
}
}

View File

@@ -1,66 +0,0 @@
use std::fmt;
use std::fmt::Formatter;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum MarkupKind {
PlainText,
Markdown,
}
impl MarkupKind {
pub(crate) const fn fenced_code_block<T>(self, code: T, language: &str) -> FencedCodeBlock<T>
where
T: fmt::Display,
{
FencedCodeBlock {
language,
code,
kind: self,
}
}
pub(crate) const fn horizontal_line(self) -> HorizontalLine {
HorizontalLine { kind: self }
}
}
pub(crate) struct FencedCodeBlock<'a, T> {
language: &'a str,
code: T,
kind: MarkupKind,
}
impl<T> fmt::Display for FencedCodeBlock<'_, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
MarkupKind::PlainText => self.code.fmt(f),
MarkupKind::Markdown => write!(
f,
"```{language}\n{code}\n```",
language = self.language,
code = self.code
),
}
}
}
#[derive(Debug, Copy, Clone)]
pub(crate) struct HorizontalLine {
kind: MarkupKind,
}
impl fmt::Display for HorizontalLine {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.kind {
MarkupKind::PlainText => {
f.write_str("\n---------------------------------------------\n")
}
MarkupKind::Markdown => {
write!(f, "\n---\n")
}
}
}
}

View File

@@ -1,56 +0,0 @@
[package]
name = "red_knot_project"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
ruff_cache = { workspace = true }
ruff_db = { workspace = true, features = ["cache", "serde"] }
ruff_macros = { workspace = true }
ruff_python_ast = { workspace = true, features = ["serde"] }
ruff_python_formatter = { workspace = true, optional = true }
ruff_text_size = { workspace = true }
red_knot_ide = { workspace = true }
red_knot_python_semantic = { workspace = true, features = ["serde"] }
red_knot_vendored = { workspace = true }
anyhow = { workspace = true }
crossbeam = { workspace = true }
glob = { workspace = true }
notify = { workspace = true }
pep440_rs = { workspace = true, features = ["version-ranges"] }
rayon = { workspace = true }
rustc-hash = { workspace = true }
salsa = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true }
thiserror = { workspace = true }
toml = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
ruff_db = { workspace = true, features = ["testing"] }
glob = { workspace = true }
insta = { workspace = true, features = ["redactions", "ron"] }
[features]
default = ["zstd"]
deflate = ["red_knot_vendored/deflate"]
schemars = [
"dep:schemars",
"ruff_db/schemars",
"red_knot_python_semantic/schemars",
]
zstd = ["red_knot_vendored/zstd"]
format = ["ruff_python_formatter"]
[lints]
workspace = true

View File

@@ -1,5 +0,0 @@
None
False
True
Ellipsis
...

View File

@@ -1,4 +0,0 @@
"str"
1
1.1
b"bin"

View File

@@ -1,4 +0,0 @@
-a
~a
+a
not a

View File

@@ -1,2 +0,0 @@
a.b
a.b.c.d

View File

@@ -1,3 +0,0 @@
foo(
bar=1
).attr

View File

@@ -1,3 +0,0 @@
foo(
bar=1
).attr = 1

View File

@@ -1,6 +0,0 @@
a or b or c
a and b and c
a or b and c
#a and b or c

View File

@@ -1,14 +0,0 @@
a + b
a - b
a * b
a @ b
a / b
a // b
a % b
a ** b
a << b
a >> b
a | b
a ^ b
a & b

View File

@@ -1,6 +0,0 @@
x == y or (
x is not None and x == z
)
x == y or \
x <= 65 or x >= 102

View File

@@ -1,5 +0,0 @@
(
(a and aa) or
(b and bb) or
(c and cc)
)

View File

@@ -1,10 +0,0 @@
a == b
a != b
a < b
a <= b
a > b
a >= b
a is b
a is not b
a in b
a not in b

View File

@@ -1,2 +0,0 @@
a == b == 0
0 < a < 10

View File

@@ -1,2 +0,0 @@
a[b]
a[b][c]

View File

@@ -1 +0,0 @@
{a: 1, b:2}

View File

@@ -1 +0,0 @@
{1: 2, **a, 3: 4, 5: 6}

View File

@@ -1,20 +0,0 @@
DATA = {
'a': 1,
'b': 1,
'c': 1,
'd': 1,
'e': 1,
'f': 1,
'g': 1,
'h': 1,
'i': 1,
'j': 1,
'k': 1,
'l': 1,
'm': 1,
'n': 1,
'o': 1,
'p': 1,
'q': 1,
'r': 1,
}

View File

@@ -1,2 +0,0 @@
{**{0:1},**{1:2},**{2:3},**{3:4},**{4:5},**{5:6},**{6:7},**{7:8},**{8:9},**{9:10},**{10:11},**{11:12},**{12:13},**{13:14},**{14:15},**{15:16},**{16:17},**{17:18},**{18:19},**{19:20},**{20:21},**{21:22},**{22:23},**{23:24},**{24:25},**{25:26},**{26:27},**{27:28},**{28:29},**{29:30},**{30:31},**{31:32},**{32:33},**{33:34},**{34:35},**{35:36},**{36:37},**{37:38},**{38:39},**{39:40},**{40:41},**{41:42},**{42:43},**{43:44},**{44:45},**{45:46},**{46:47},**{47:48},**{48:49},**{49:50},**{50:51},**{51:52},**{52:53},**{53:54},**{54:55},**{55:56},**{56:57},**{57:58},**{58:59},**{59:60},**{60:61},**{61:62},**{62:63},**{63:64},**{64:65},**{65:66},**{66:67},**{67:68},**{68:69},**{69:70},**{70:71},**{71:72},**{72:73},**{73:74},**{74:75},**{75:76},**{76:77},**{77:78},**{78:79},**{79:80},**{80:81},**{81:82},**{82:83},**{83:84},**{84:85},**{85:86},**{86:87},**{87:88},**{88:89},**{89:90},**{90:91},**{91:92},**{92:93},**{93:94},**{94:95},**{95:96},**{96:97},**{97:98},**{98:99},**{99:100},**{100:101},**{101:102},**{102:103},**{103:104},**{104:105},**{105:106},**{106:107},**{107:108},**{108:109},**{109:110},**{110:111},**{111:112},**{112:113},**{113:114},**{114:115},**{115:116},**{116:117},**{117:118},**{118:119},**{119:120},**{120:121},**{121:122},**{122:123},**{123:124},**{124:125},**{125:126},**{126:127},**{127:128},**{128:129},**{129:130},**{130:131},**{131:132},**{132:133},**{133:134},**{134:135},**{135:136},**{136:137},**{137:138},**{138:139},**{139:140},**{140:141},**{141:142},**{142:143},**{143:144},**{144:145},**{145:146},**{146:147},**{147:148},**{148:149},**{149:150},**{150:151},**{151:152},**{152:153},**{153:154},**{154:155},**{155:156},**{156:157},**{157:158},**{158:159},**{159:160},**{160:161},**{161:162},**{162:163},**{163:164},**{164:165},**{165:166},**{166:167},**{167:168},**{168:169},**{169:170},**{170:171},**{171:172},**{172:173},**{173:174},**{174:175},**{175:176},**{176:177},**{177:178},**{178:179},**{179:180},**{180:181},**{181:182},**{182:183},**{183:184},**{184:185},**{185:186},**{186:187},**{187:188},**{188:189},**{189:190},**{190:191},**{191:192},**{192:193},**{193:194},**{194:195},**{195:196},**{196:197},**{197:198},**{198:199},**{199:200},**{200:201},**{201:202},**{202:203},**{203:204},**{204:205},**{205:206},**{206:207},**{207:208},**{208:209},**{209:210},**{210:211},**{211:212},**{212:213},**{213:214},**{214:215},**{215:216},**{216:217},**{217:218},**{218:219},**{219:220},**{220:221},**{221:222},**{222:223},**{223:224},**{224:225},**{225:226},**{226:227},**{227:228},**{228:229},**{229:230},**{230:231},**{231:232},**{232:233},**{233:234},**{234:235},**{235:236},**{236:237},**{237:238},**{238:239},**{239:240},**{240:241},**{241:242},**{242:243},**{243:244},**{244:245},**{245:246},**{246:247},**{247:248},**{248:249},**{249:250},**{250:251},**{251:252},**{252:253},**{253:254},**{254:255},**{255:256},**{256:257},**{257:258},**{258:259},**{259:260}}

Some files were not shown because too many files have changed in this diff Show More