Compare commits

..

2 Commits

Author SHA1 Message Date
Dhruv Manilawala
ce4d4ae6ac Add methods to iter over format spec elements 2024-05-13 14:17:23 +05:30
Dhruv Manilawala
128414cd95 Add Iterator impl for StringLike parts 2024-05-13 13:48:54 +05:30
2115 changed files with 35779 additions and 70558 deletions

View File

@@ -1,10 +1,3 @@
[alias]
dev = "run --package ruff_dev --bin ruff_dev"
benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# statically link the C runtime so the executable does not depend on
# that shared/dynamic library.
#
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]

4
.gitattributes vendored
View File

@@ -8,10 +8,6 @@ crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_3.py text eol=crlf
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_crlf.py text eol=crlf
crates/ruff_python_formatter/tests/snapshots/format@docstring_code_examples_crlf.py.snap text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lexing/line_continuation_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_mac_eol.py text eol=cr
crates/ruff_python_parser/resources/inline linguist-generated=true
ruff.schema.json linguist-generated=true text=auto eol=lf

4
.github/CODEOWNERS vendored
View File

@@ -15,7 +15,3 @@
# Script for fuzzing the parser
/scripts/fuzz-parser/ @AlexWaygood
# red-knot
/crates/red_knot* @carljm @MichaReiser @AlexWaygood
/crates/ruff_db/ @carljm @MichaReiser @AlexWaygood

View File

@@ -41,13 +41,6 @@
description: "Disable PRs updating GitHub runners (e.g. 'runs-on: macos-14')",
enabled: false,
},
{
// Disable updates of `zip-rs`; intentionally pinned for now due to ownership change
// See: https://github.com/astral-sh/uv/issues/3642
matchPackagePatterns: ["zip"],
matchManagers: ["cargo"],
enabled: false,
},
{
groupName: "pre-commit dependencies",
matchManagers: ["pre-commit"],

View File

@@ -1,68 +0,0 @@
# Build and publish a Docker image.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
#
# TODO(charlie): Ideally, the publish step would happen as a publish job within `cargo-dist`, but
# sharing the built image as an artifact between jobs is challenging.
name: "[ruff] Build Docker image"
on:
workflow_call:
inputs:
plan:
required: true
type: string
pull_request:
paths:
- .github/workflows/build-docker.yml
jobs:
docker-publish:
name: Build Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/astral-sh/ruff
- name: Check tag consistency
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ fromJson(inputs.plan).announcement_tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ fromJson(inputs.plan).announcement_tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: "Build and push Docker image"
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
# Reuse the builder
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
tags: ghcr.io/astral-sh/ruff:latest,ghcr.io/astral-sh/ruff:${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || 'dry-run' }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -167,9 +167,6 @@ jobs:
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
# Workaround for <https://github.com/nextest-rs/nextest/issues/1493>.
RUSTUP_WINDOWS_PATH_ADD_BIN: 1
run: |
cargo nextest run --all-features --profile ci
cargo test --all-features --doc
@@ -212,38 +209,6 @@ jobs:
- name: "Build"
run: cargo build --release --locked
cargo-build-msrv:
name: "cargo build (msrv)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- uses: SebRollen/toml-action@v1.2.0
id: msrv
with:
file: "Cargo.toml"
field: "workspace.package.rust-version"
- name: "Install Rust toolchain"
run: rustup default ${{ steps.msrv.outputs.value }}
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo +${{ steps.msrv.outputs.value }} insta test --all-features --unreferenced reject --test-runner nextest
cargo-fuzz:
name: "cargo fuzz"
runs-on: ubuntu-latest
@@ -257,13 +222,10 @@ jobs:
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@main
- name: "Install cargo-fuzz"
uses: taiki-e/install-action@v2
with:
tool: cargo-fuzz@0.11.2
- name: "Install cargo-fuzz"
# Download the latest version from quick install and not the github releases because github releases only has MUSL targets.
run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm
- run: cargo fuzz build -s none
fuzz-parser:
@@ -341,7 +303,7 @@ jobs:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v3
name: Download baseline Ruff binary
with:
name: ruff

55
.github/workflows/docs.yaml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
release:
types: [published]
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v5
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.5.0
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy site --project-name=astral-docs --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -1,29 +0,0 @@
# Notify downstream repositories of a new release.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[ruff] Notify dependents"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
update-dependents:
name: Notify dependents
runs-on: ubuntu-latest
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v7
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

View File

@@ -1,16 +1,9 @@
# Publish the Ruff playground.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[Playground] Release"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
release:
types: [published]
env:
CARGO_INCREMENTAL: 0
@@ -47,7 +40,7 @@ jobs:
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.7.0
uses: cloudflare/wrangler-action@v3.5.0
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}

View File

@@ -17,7 +17,7 @@ jobs:
comment:
runs-on: ubuntu-latest
steps:
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v3
name: Download pull request number
with:
name: pr-number
@@ -32,7 +32,7 @@ jobs:
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v3
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
@@ -48,14 +48,6 @@ jobs:
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Guard against malicious ecosystem results that symlink to a secret
# file on this runner
if [[ -L pr/ecosystem/ecosystem-result ]]
then
echo "Error: ecosystem-result cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt

View File

@@ -1,151 +0,0 @@
# Publish the Ruff documentation.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v5
with:
python-version: 3.12
- name: "Set docs version"
run: |
version="${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}"
# if version is missing, exit with error
if [[ -z "$version" ]]; then
echo "Can't build docs without a version."
exit 1
fi
# Use version as display name for now
display_name="$version"
echo "version=$version" >> $GITHUB_ENV
echo "display_name=$display_name" >> $GITHUB_ENV
- name: "Set branch name"
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
timestamp="$(date +%s)"
# create branch_display_name from display_name by replacing all
# characters disallowed in git branch names with hyphens
branch_display_name="$(echo "$display_name" | tr -c '[:alnum:]._' '-' | tr -s '-')"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> $GITHUB_ENV
echo "timestamp=$timestamp" >> $GITHUB_ENV
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo"
run: |
version="${{ env.version }}"
git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
- name: "Copy docs"
run: rm -rf astral-docs/site/ruff && mkdir -p astral-docs/site && cp -r site/ruff astral-docs/site/
- name: "Commit docs"
working-directory: astral-docs
run: |
branch_name="${{ env.branch_name }}"
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git checkout -b $branch_name
git add site/ruff
git commit -m "Update ruff documentation for $version"
- name: "Create Pull Request"
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
branch_name="${{ env.branch_name }}"
# set the PR title
pull_request_title="Update ruff documentation for $display_name"
# Delete any existing pull requests that are open for this version
# by checking against pull_request_title because the new PR will
# supersede the old one.
gh pr list --state open --json title --jq '.[] | select(.title == "$pull_request_title") | .number' | \
xargs -I {} gh pr close {}
# push the branch to GitHub
git push origin $branch_name
# create the PR
gh pr create --base main --head $branch_name \
--title "$pull_request_title" \
--body "Automated documentation update for $display_name" \
--label "documentation"
- name: "Merge Pull Request"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
branch_name="${{ env.branch_name }}"
# auto-merge the PR if the build was triggered by a release. Manual builds should be reviewed by a human.
# give the PR a few seconds to be created before trying to auto-merge it
sleep 10
gh pr merge --squash $branch_name

View File

@@ -1,34 +0,0 @@
# Publish a release to PyPI.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish job
# within `cargo-dist`.
name: "[ruff] Publish to PyPI"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
pypi-publish:
name: Upload to PyPI
runs-on: ubuntu-latest
environment:
name: release
permissions:
# For PyPI's trusted publishing.
id-token: write
steps:
- uses: actions/download-artifact@v4
with:
pattern: wheels-*
path: wheels
merge-multiple: true
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true

View File

@@ -1,55 +0,0 @@
# Build and publish ruff-api for wasm.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish
# job within `cargo-dist`.
name: "Build and publish wasm"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
ruff_wasm:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
strategy:
matrix:
target: [web, bundler, nodejs]
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack build"
run: wasm-pack build --target ${{ matrix.target }} crates/ruff_wasm
- name: "Rename generated package"
run: | # Replace the package name w/ jq
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@v4
with:
node-version: 18
registry-url: "https://registry.npmjs.org"
- name: "Publish (dry-run)"
if: ${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --dry-run crates/ruff_wasm/pkg
- name: "Publish"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --provenance --access public crates/ruff_wasm/pkg
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -1,23 +1,21 @@
# Build ruff on all platforms.
#
# Generates both wheels (for PyPI) and archived binaries (for GitHub releases).
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
name: "Build binaries"
name: "[ruff] Release"
on:
workflow_call:
workflow_dispatch:
inputs:
plan:
required: true
tag:
description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)."
type: string
sha:
description: "The full sha of the commit to be released. If omitted, the latest commit on the default branch will be used."
default: ""
type: string
pull_request:
paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work.
# When we change pyproject.toml, we want to ensure that the maturin builds still work
- pyproject.toml
# And when we change this workflow itself...
- .github/workflows/build-binaries.yml
- .github/workflows/release.yaml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -25,7 +23,6 @@ concurrency:
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
@@ -34,12 +31,11 @@ env:
jobs:
sdist:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -53,8 +49,8 @@ jobs:
- name: "Test sdist"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
ruff --help
python -m ruff --help
- name: "Upload sdist"
uses: actions/upload-artifact@v4
with:
@@ -62,12 +58,11 @@ jobs:
path: dist
macos-x86_64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-12
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -79,6 +74,11 @@ jobs:
with:
target: x86_64
args: --release --locked --out dist
- name: "Test wheel - x86_64"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
@@ -86,29 +86,23 @@ jobs:
path: dist
- name: "Archive binary"
run: |
TARGET=x86_64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-x86_64-apple-darwin.tar.gz
tar czvf $ARCHIVE_FILE -C target/x86_64-apple-darwin/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-macos-x86_64
name: binaries-macos-x86_64
path: |
*.tar.gz
*.sha256
macos-aarch64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -132,24 +126,18 @@ jobs:
path: dist
- name: "Archive binary"
run: |
TARGET=aarch64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-aarch64-apple-darwin.tar.gz
tar czvf $ARCHIVE_FILE -C target/aarch64-apple-darwin/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-aarch64-apple-darwin
name: binaries-aarch64-apple-darwin
path: |
*.tar.gz
*.sha256
windows:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: windows-latest
strategy:
matrix:
@@ -163,7 +151,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -175,16 +163,13 @@ jobs:
with:
target: ${{ matrix.platform.target }}
args: --release --locked --out dist
env:
# aarch64 build fails, see https://github.com/PyO3/maturin/issues/2110
XWIN_VERSION: 16
- name: "Test wheel"
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
@@ -193,19 +178,18 @@ jobs:
- name: "Archive binary"
shell: bash
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.zip
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.platform.target }}.zip
7z a $ARCHIVE_FILE ./target/${{ matrix.platform.target }}/release/ruff.exe
sha256sum $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
name: binaries-${{ matrix.platform.target }}
path: |
*.zip
*.sha256
linux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -215,7 +199,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -232,36 +216,27 @@ jobs:
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.target }}
name: binaries-${{ matrix.target }}
path: |
*.tar.gz
*.sha256
linux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -283,13 +258,11 @@ jobs:
arch: ppc64
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: arm-unknown-linux-musleabihf
arch: arm
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -306,8 +279,8 @@ jobs:
if: matrix.platform.arch != 'ppc64'
name: Test wheel
with:
arch: ${{ matrix.platform.arch == 'arm' && 'armv6' || matrix.platform.arch }}
distro: ${{ matrix.platform.arch == 'arm' && 'bullseye' || 'ubuntu20.04' }}
arch: ${{ matrix.platform.arch }}
distro: ubuntu20.04
githubToken: ${{ github.token }}
install: |
apt-get update
@@ -322,28 +295,19 @@ jobs:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.platform.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.platform.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
name: binaries-${{ matrix.platform.target }}
path: |
*.tar.gz
*.sha256
musllinux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -353,7 +317,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -376,35 +340,26 @@ jobs:
apk add python3
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
.venv/bin/ruff check --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.target }}
name: binaries-${{ matrix.target }}
path: |
*.tar.gz
*.sha256
musllinux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -418,7 +373,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -442,29 +397,204 @@ jobs:
run: |
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
.venv/bin/ruff check --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.platform.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.platform.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
name: binaries-${{ matrix.platform.target }}
path: |
*.tar.gz
*.sha256
validate-tag:
name: Validate tag
runs-on: ubuntu-latest
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
steps:
- uses: actions/checkout@v4
with:
ref: main # We checkout the main branch to check for the commit
- name: Check main branch
if: ${{ inputs.sha }}
run: |
# Fetch the main branch since a shallow checkout is used by default
git fetch origin main --unshallow
if ! git branch --contains ${{ inputs.sha }} | grep -E '(^|\s)main$'; then
echo "The specified sha is not on the main branch" >&2
exit 1
fi
- name: Check tag consistency
run: |
# Switch to the commit we want to release
git checkout ${{ inputs.sha }}
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ inputs.tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ inputs.tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
upload-release:
name: Upload to PyPI
runs-on: ubuntu-latest
needs:
- macos-aarch64
- macos-x86_64
- windows
- linux
- linux-cross
- musllinux
- musllinux-cross
- validate-tag
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
environment:
name: release
permissions:
# For pypi trusted publishing
id-token: write
steps:
- uses: actions/download-artifact@v4
with:
pattern: wheels-*
path: wheels
merge-multiple: true
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true
tag-release:
name: Tag release
runs-on: ubuntu-latest
needs: upload-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For git tag
contents: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- name: git tag
run: |
git config user.email "hey@astral.sh"
git config user.name "Ruff Release CI"
git tag -m "v${{ inputs.tag }}" "v${{ inputs.tag }}"
# If there is duplicate tag, this will fail. The publish to pypi action will have been a noop (due to skip
# existing), so we make a non-destructive exit here
git push --tags
publish-release:
name: Publish to GitHub
runs-on: ubuntu-latest
needs: tag-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For GitHub release publishing
contents: write
steps:
- uses: actions/download-artifact@v4
with:
pattern: binaries-*
path: binaries
merge-multiple: true
- name: "Publish to GitHub"
uses: softprops/action-gh-release@v2
with:
draft: true
files: binaries/*
tag_name: v${{ inputs.tag }}
docker-publish:
# This action doesn't need to wait on any other task, it's easy to re-tag if something failed and we're validating
# the tag here also
name: Push Docker image ghcr.io/astral-sh/ruff
runs-on: ubuntu-latest
environment:
name: release
permissions:
# For the docker push
packages: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/astral-sh/ruff
- name: Check tag consistency
# Unlike validate-tag we don't check if the commit is on the main branch, but it seems good enough since we can
# change docker tags
if: ${{ inputs.tag }}
run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ inputs.tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ inputs.tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: "Build and push Docker image"
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
# Reuse the builder
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ inputs.tag != '' }}
tags: ghcr.io/astral-sh/ruff:latest,ghcr.io/astral-sh/ruff:${{ inputs.tag || 'dry-run' }}
labels: ${{ steps.meta.outputs.labels }}
# After the release has been published, we update downstream repositories
# This is separate because if this fails the release is still fine, we just need to do some manual workflow triggers
update-dependents:
name: Update dependents
runs-on: ubuntu-latest
needs: publish-release
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v7
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

View File

@@ -1,298 +0,0 @@
# Copyright 2022-2024, axodotdev
# SPDX-License-Identifier: MIT or Apache-2.0
#
# CI that:
#
# * checks for a Git Tag that looks like a release
# * builds artifacts with cargo-dist (archives, installers, hashes)
# * uploads those artifacts to temporary workflow zip
# * on success, uploads the artifacts to a GitHub Release
#
# Note that the GitHub Release will be created with a generated
# title/body based on your changelogs.
name: Release
permissions:
"contents": "write"
# This task will run whenever you workflow_dispatch with a tag that looks like a version
# like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc.
# Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where
# PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION
# must be a Cargo-style SemVer Version (must have at least major.minor.patch).
#
# If PACKAGE_NAME is specified, then the announcement will be for that
# package (erroring out if it doesn't have the given version or isn't cargo-dist-able).
#
# If PACKAGE_NAME isn't specified, then the announcement will be for all
# (cargo-dist-able) packages in the workspace with that version (this mode is
# intended for workspaces with only one dist-able package, or with all dist-able
# packages versioned/released in lockstep).
#
# If you push multiple tags at once, separate instances of this workflow will
# spin up, creating an independent announcement for each one. However, GitHub
# will hard limit this to 3 tags per commit, as it will assume more tags is a
# mistake.
#
# If there's a prerelease-style suffix to the version, then the release(s)
# will be marked as a prerelease.
on:
workflow_dispatch:
inputs:
tag:
description: Release Tag
required: true
default: dry-run
type: string
jobs:
# Run 'cargo dist plan' (or host) to determine what tasks we need to do
plan:
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.plan.outputs.manifest }}
tag: ${{ (inputs.tag != 'dry-run' && inputs.tag) || '' }}
tag-flag: ${{ inputs.tag && inputs.tag != 'dry-run' && format('--tag={0}', inputs.tag) || '' }}
publishing: ${{ inputs.tag && inputs.tag != 'dry-run' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cargo-dist
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.18.0/cargo-dist-installer.sh | sh"
- name: Cache cargo-dist
uses: actions/upload-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/cargo-dist
# sure would be cool if github gave us proper conditionals...
# so here's a doubly-nested ternary-via-truthiness to try to provide the best possible
# functionality based on whether this is a pull_request, and whether it's from a fork.
# (PRs run on the *source* but secrets are usually on the *target* -- that's *good*
# but also really annoying to build CI around when it needs secrets to work right.)
- id: plan
run: |
cargo dist ${{ (inputs.tag && inputs.tag != 'dry-run' && format('host --steps=create --tag={0}', inputs.tag)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "cargo dist ran successfully"
cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json
custom-build-binaries:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-binaries.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-build-docker:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-docker.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
permissions:
"contents": "read"
"packages": "write"
# Build and package all the platform-agnostic(ish) things
build-global-artifacts:
needs:
- plan
- custom-build-binaries
- custom-build-docker
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/cargo-dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- id: cargo-dist
shell: bash
run: |
cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "cargo dist ran successfully"
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@v4
with:
name: artifacts-build-global
path: |
${{ steps.cargo-dist.outputs.paths }}
${{ env.BUILD_MANIFEST_NAME }}
# Determines if we should publish/announce
host:
needs:
- plan
- custom-build-binaries
- custom-build-docker
- build-global-artifacts
# Only run if we're "publishing", and only if local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.host.outputs.manifest }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/cargo-dist
# Fetch artifacts from scratch-storage
- name: Fetch artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
# This is a harmless no-op for GitHub Releases, hosting for that happens in "announce"
- id: host
shell: bash
run: |
cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
echo "artifacts uploaded and released successfully"
cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
# Overwrite the previous copy
name: artifacts-dist-manifest
path: dist-manifest.json
custom-publish-pypi:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-pypi.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"id-token": "write"
"packages": "write"
custom-publish-wasm:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-wasm.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"contents": "read"
"id-token": "write"
"packages": "write"
# Create a GitHub Release while uploading all files to it
announce:
needs:
- plan
- host
- custom-publish-pypi
- custom-publish-wasm
# use "always() && ..." to allow us to wait for all publish jobs while
# still allowing individual publish jobs to skip themselves (for prereleases).
# "host" however must run to completion, no skipping allowed!
if: ${{ always() && needs.host.result == 'success' && (needs.custom-publish-pypi.result == 'skipped' || needs.custom-publish-pypi.result == 'success') && (needs.custom-publish-wasm.result == 'skipped' || needs.custom-publish-wasm.result == 'success') }}
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
# Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts"
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: artifacts
merge-multiple: true
- name: Cleanup
run: |
# Remove the granular manifests
rm -f artifacts/*-dist-manifest.json
- name: Create GitHub Release
env:
PRERELEASE_FLAG: "${{ fromJson(needs.host.outputs.val).announcement_is_prerelease && '--prerelease' || '' }}"
ANNOUNCEMENT_TITLE: "${{ fromJson(needs.host.outputs.val).announcement_title }}"
ANNOUNCEMENT_BODY: "${{ fromJson(needs.host.outputs.val).announcement_github_body }}"
RELEASE_COMMIT: "${{ github.sha }}"
run: |
# Write and read notes from a file to avoid quoting breaking things
echo "$ANNOUNCEMENT_BODY" > $RUNNER_TEMP/notes.txt
gh release create "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --title "$ANNOUNCEMENT_TITLE" --notes-file "$RUNNER_TEMP/notes.txt" artifacts/*
custom-notify-dependents:
needs:
- plan
- announce
uses: ./.github/workflows/notify-dependents.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-docs:
needs:
- plan
- announce
uses: ./.github/workflows/publish-docs.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-playground:
needs:
- plan
- announce
uses: ./.github/workflows/publish-playground.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit

View File

@@ -1,80 +0,0 @@
name: Sync typeshed
on:
workflow_dispatch:
schedule:
# Run on the 1st and the 15th of every month:
- cron: "0 0 1,15 * *"
env:
FORCE_COLOR: 1
GH_TOKEN: ${{ github.token }}
jobs:
sync:
name: Sync typeshed
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@v4
name: Checkout Ruff
with:
path: ruff
- uses: actions/checkout@v4
name: Checkout typeshed
with:
repository: python/typeshed
path: typeshed
- name: Setup git
run: |
git config --global user.name typeshedbot
git config --global user.email '<>'
- name: Sync typeshed
id: sync
run: |
rm -rf ruff/crates/red_knot_module_resolver/vendor/typeshed
mkdir ruff/crates/red_knot_module_resolver/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_module_resolver/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_module_resolver/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_module_resolver/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_module_resolver/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_module_resolver/vendor/typeshed/source_commit.txt
- name: Commit the changes
id: commit
if: ${{ steps.sync.outcome == 'success' }}
run: |
cd ruff
git checkout -b typeshedbot/sync-typeshed
git add .
git diff --staged --quiet || git commit -m "Sync typeshed. Source commit: https://github.com/python/typeshed/commit/$(git -C ../typeshed rev-parse HEAD)"
- name: Create a PR
if: ${{ steps.sync.outcome == 'success' && steps.commit.outcome == 'success' }}
run: |
cd ruff
git push --force origin typeshedbot/sync-typeshed
gh pr list --repo $GITHUB_REPOSITORY --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
create-issue-on-failure:
name: Create an issue if the typeshed sync failed
runs-on: ubuntu-latest
needs: [sync]
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.sync.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
body: "Runs are listed here: https://github.com/astral-sh/ruff/actions/workflows/sync_typeshed.yaml",
})

View File

@@ -2,7 +2,7 @@ fail_fast: true
exclude: |
(?x)^(
crates/red_knot_module_resolver/vendor/.*|
crates/red_knot/vendor/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff/resources/.*|
@@ -14,7 +14,7 @@ exclude: |
repos:
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.18
rev: v0.17
hooks:
- id: validate-pyproject
@@ -32,7 +32,7 @@ repos:
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.41.0
rev: v0.40.0
hooks:
- id: markdownlint-fix
exclude: |
@@ -42,7 +42,7 @@ repos:
)$
- repo: https://github.com/crate-ci/typos
rev: v1.23.2
rev: v1.21.0
hooks:
- id: typos
@@ -56,7 +56,7 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.2
rev: v0.4.4
hooks:
- id: ruff-format
- id: ruff

View File

@@ -1,2 +0,0 @@
# Auto-generated by `cargo-dist`.
.github/workflows/release.yml

View File

@@ -1,5 +0,0 @@
{
"recommendations": [
"rust-lang.rust-analyzer"
]
}

View File

@@ -1,6 +0,0 @@
{
"rust-analyzer.check.extraArgs": [
"--all-features"
],
"rust-analyzer.check.command": "clippy",
}

View File

@@ -1,12 +1,5 @@
# Breaking Changes
## 0.5.0
- Follow the XDG specification to discover user-level configurations on macOS (same as on other Unix platforms)
- Selecting `ALL` now excludes deprecated rules
- The released archives now include an extra level of nesting, which can be removed with `--strip-components=1` when untarring.
- The release artifact's file name no longer includes the version tag. This enables users to install via `/latest` URLs on GitHub.
## 0.3.0
### Ruff 2024.2 style

View File

@@ -1,478 +1,5 @@
# Changelog
## 0.5.3
**Ruff 0.5.3 marks the stable release of the Ruff language server and introduces revamped
[documentation](https://docs.astral.sh/ruff/editors), including [setup guides for your editor of
choice](https://docs.astral.sh/ruff/editors/setup) and [the language server
itself](https://docs.astral.sh/ruff/editors/settings)**.
### Preview features
- Formatter: Insert empty line between suite and alternative branch after function/class definition ([#12294](https://github.com/astral-sh/ruff/pull/12294))
- \[`pyupgrade`\] Implement `unnecessary-default-type-args` (`UP043`) ([#12371](https://github.com/astral-sh/ruff/pull/12371))
### Rule changes
- \[`flake8-bugbear`\] Detect enumerate iterations in `loop-iterator-mutation` (`B909`) ([#12366](https://github.com/astral-sh/ruff/pull/12366))
- \[`flake8-bugbear`\] Remove `discard`, `remove`, and `pop` allowance for `loop-iterator-mutation` (`B909`) ([#12365](https://github.com/astral-sh/ruff/pull/12365))
- \[`pylint`\] Allow `repeated-equality-comparison` for mixed operations (`PLR1714`) ([#12369](https://github.com/astral-sh/ruff/pull/12369))
- \[`pylint`\] Ignore `self` and `cls` when counting arguments (`PLR0913`) ([#12367](https://github.com/astral-sh/ruff/pull/12367))
- \[`pylint`\] Use UTF-8 as default encoding in `unspecified-encoding` fix (`PLW1514`) ([#12370](https://github.com/astral-sh/ruff/pull/12370))
### Server
- Build settings index in parallel for the native server ([#12299](https://github.com/astral-sh/ruff/pull/12299))
- Use fallback settings when indexing the project ([#12362](https://github.com/astral-sh/ruff/pull/12362))
- Consider `--preview` flag for `server` subcommand for the linter and formatter ([#12208](https://github.com/astral-sh/ruff/pull/12208))
### Bug fixes
- \[`flake8-comprehensions`\] Allow additional arguments for `sum` and `max` comprehensions (`C419`) ([#12364](https://github.com/astral-sh/ruff/pull/12364))
- \[`pylint`\] Avoid dropping extra boolean operations in `repeated-equality-comparison` (`PLR1714`) ([#12368](https://github.com/astral-sh/ruff/pull/12368))
- \[`pylint`\] Consider expression before statement when determining binding kind (`PLR1704`) ([#12346](https://github.com/astral-sh/ruff/pull/12346))
### Documentation
- Add docs for Ruff language server ([#12344](https://github.com/astral-sh/ruff/pull/12344))
- Migrate to standalone docs repo ([#12341](https://github.com/astral-sh/ruff/pull/12341))
- Update versioning policy for editor integration ([#12375](https://github.com/astral-sh/ruff/pull/12375))
### Other changes
- Publish Wasm API to npm ([#12317](https://github.com/astral-sh/ruff/pull/12317))
## 0.5.2
### Preview features
- Use `space` separator before parenthesized expressions in comprehensions with leading comments ([#12282](https://github.com/astral-sh/ruff/pull/12282))
- \[`flake8-async`\] Update `ASYNC100` to include `anyio` and `asyncio` ([#12221](https://github.com/astral-sh/ruff/pull/12221))
- \[`flake8-async`\] Update `ASYNC109` to include `anyio` and `asyncio` ([#12236](https://github.com/astral-sh/ruff/pull/12236))
- \[`flake8-async`\] Update `ASYNC110` to include `anyio` and `asyncio` ([#12261](https://github.com/astral-sh/ruff/pull/12261))
- \[`flake8-async`\] Update `ASYNC115` to include `anyio` and `asyncio` ([#12262](https://github.com/astral-sh/ruff/pull/12262))
- \[`flake8-async`\] Update `ASYNC116` to include `anyio` and `asyncio` ([#12266](https://github.com/astral-sh/ruff/pull/12266))
### Rule changes
- \[`flake8-return`\] Exempt properties from explicit return rule (`RET501`) ([#12243](https://github.com/astral-sh/ruff/pull/12243))
- \[`numpy`\] Add `np.NAN`-to-`np.nan` diagnostic ([#12292](https://github.com/astral-sh/ruff/pull/12292))
- \[`refurb`\] Make `list-reverse-copy` an unsafe fix ([#12303](https://github.com/astral-sh/ruff/pull/12303))
### Server
- Consider `include` and `extend-include` settings in native server ([#12252](https://github.com/astral-sh/ruff/pull/12252))
- Include nested configurations in settings reloading ([#12253](https://github.com/astral-sh/ruff/pull/12253))
### CLI
- Omit code frames for fixes with empty ranges ([#12304](https://github.com/astral-sh/ruff/pull/12304))
- Warn about formatter incompatibility for `D203` ([#12238](https://github.com/astral-sh/ruff/pull/12238))
### Bug fixes
- Make cache-write failures non-fatal on Windows ([#12302](https://github.com/astral-sh/ruff/pull/12302))
- Treat `not` operations as boolean tests ([#12301](https://github.com/astral-sh/ruff/pull/12301))
- \[`flake8-bandit`\] Avoid `S310` violations for HTTP-safe f-strings ([#12305](https://github.com/astral-sh/ruff/pull/12305))
- \[`flake8-bandit`\] Support explicit string concatenations in S310 HTTP detection ([#12315](https://github.com/astral-sh/ruff/pull/12315))
- \[`flake8-bandit`\] fix S113 false positive for httpx without `timeout` argument ([#12213](https://github.com/astral-sh/ruff/pull/12213))
- \[`pycodestyle`\] Remove "non-obvious" allowance for E721 ([#12300](https://github.com/astral-sh/ruff/pull/12300))
- \[`pyflakes`\] Consider `with` blocks as single-item branches for redefinition analysis ([#12311](https://github.com/astral-sh/ruff/pull/12311))
- \[`refurb`\] Restrict forwarding for `newline` argument in `open()` calls to Python versions >= 3.10 ([#12244](https://github.com/astral-sh/ruff/pull/12244))
### Documentation
- Update help and documentation to reflect `--output-format full` default ([#12248](https://github.com/astral-sh/ruff/pull/12248))
### Performance
- Use more threads when discovering Python files ([#12258](https://github.com/astral-sh/ruff/pull/12258))
## 0.5.1
### Preview features
- \[`flake8-bugbear`\] Implement mutable-contextvar-default (B039) ([#12113](https://github.com/astral-sh/ruff/pull/12113))
- \[`pycodestyle`\] Whitespace after decorator (`E204`) ([#12140](https://github.com/astral-sh/ruff/pull/12140))
- \[`pytest`\] Reverse `PT001` and `PT0023` defaults ([#12106](https://github.com/astral-sh/ruff/pull/12106))
### Rule changes
- Enable token-based rules on source with syntax errors ([#11950](https://github.com/astral-sh/ruff/pull/11950))
- \[`flake8-bandit`\] Detect `httpx` for `S113` ([#12174](https://github.com/astral-sh/ruff/pull/12174))
- \[`numpy`\] Update `NPY201` to include exception deprecations ([#12065](https://github.com/astral-sh/ruff/pull/12065))
- \[`pylint`\] Generate autofix for `duplicate-bases` (`PLE0241`) ([#12105](https://github.com/astral-sh/ruff/pull/12105))
### Server
- Avoid syntax error notification for source code actions ([#12148](https://github.com/astral-sh/ruff/pull/12148))
- Consider the content of the new cells during notebook sync ([#12203](https://github.com/astral-sh/ruff/pull/12203))
- Fix replacement edit range computation ([#12171](https://github.com/astral-sh/ruff/pull/12171))
### Bug fixes
- Disable auto-fix when source has syntax errors ([#12134](https://github.com/astral-sh/ruff/pull/12134))
- Fix cache key collisions for paths with separators ([#12159](https://github.com/astral-sh/ruff/pull/12159))
- Make `requires-python` inference robust to `==` ([#12091](https://github.com/astral-sh/ruff/pull/12091))
- Use char-wise width instead of `str`-width ([#12135](https://github.com/astral-sh/ruff/pull/12135))
- \[`pycodestyle`\] Avoid `E275` if keyword followed by comma ([#12136](https://github.com/astral-sh/ruff/pull/12136))
- \[`pycodestyle`\] Avoid `E275` if keyword is followed by a semicolon ([#12095](https://github.com/astral-sh/ruff/pull/12095))
- \[`pylint`\] Skip [dummy variables](https://docs.astral.sh/ruff/settings/#lint_dummy-variable-rgx) for `PLR1704` ([#12190](https://github.com/astral-sh/ruff/pull/12190))
### Performance
- Remove allocation in `parse_identifier` ([#12103](https://github.com/astral-sh/ruff/pull/12103))
- Use `CompactString` for `Identifier` AST node ([#12101](https://github.com/astral-sh/ruff/pull/12101))
## 0.5.0
Check out the [blog post](https://astral.sh/blog/ruff-v0.5.0) for a migration guide and overview of the changes!
### Breaking changes
See also, the "Remapped rules" section which may result in disabled rules.
- Follow the XDG specification to discover user-level configurations on macOS (same as on other Unix platforms)
- Selecting `ALL` now excludes deprecated rules
- The released archives now include an extra level of nesting, which can be removed with `--strip-components=1` when untarring.
- The release artifact's file name no longer includes the version tag. This enables users to install via `/latest` URLs on GitHub.
- The diagnostic ranges for some `flake8-bandit` rules were modified ([#10667](https://github.com/astral-sh/ruff/pull/10667)).
### Deprecations
The following rules are now deprecated:
- [`syntax-error`](https://docs.astral.sh/ruff/rules/syntax-error/) (`E999`): Syntax errors are now always shown
### Remapped rules
The following rules have been remapped to new rule codes:
- [`blocking-http-call-in-async-function`](https://docs.astral.sh/ruff/rules/blocking-http-call-in-async-function/): `ASYNC100` to `ASYNC210`
- [`open-sleep-or-subprocess-in-async-function`](https://docs.astral.sh/ruff/rules/open-sleep-or-subprocess-in-async-function/): `ASYNC101` split into `ASYNC220`, `ASYNC221`, `ASYNC230`, and `ASYNC251`
- [`blocking-os-call-in-async-function`](https://docs.astral.sh/ruff/rules/blocking-os-call-in-async-function/): `ASYNC102` has been merged into `ASYNC220` and `ASYNC221`
- [`trio-timeout-without-await`](https://docs.astral.sh/ruff/rules/trio-timeout-without-await/): `TRIO100` to `ASYNC100`
- [`trio-sync-call`](https://docs.astral.sh/ruff/rules/trio-sync-call/): `TRIO105` to `ASYNC105`
- [`trio-async-function-with-timeout`](https://docs.astral.sh/ruff/rules/trio-async-function-with-timeout/): `TRIO109` to `ASYNC109`
- [`trio-unneeded-sleep`](https://docs.astral.sh/ruff/rules/trio-unneeded-sleep/): `TRIO110` to `ASYNC110`
- [`trio-zero-sleep-call`](https://docs.astral.sh/ruff/rules/trio-zero-sleep-call/): `TRIO115` to `ASYNC115`
- [`repeated-isinstance-calls`](https://docs.astral.sh/ruff/rules/repeated-isinstance-calls/): `PLR1701` to `SIM101`
### Stabilization
The following rules have been stabilized and are no longer in preview:
- [`mutable-fromkeys-value`](https://docs.astral.sh/ruff/rules/mutable-fromkeys-value/) (`RUF024`)
- [`default-factory-kwarg`](https://docs.astral.sh/ruff/rules/default-factory-kwarg/) (`RUF026`)
- [`django-extra`](https://docs.astral.sh/ruff/rules/django-extra/) (`S610`)
- [`manual-dict-comprehension`](https://docs.astral.sh/ruff/rules/manual-dict-comprehension/) (`PERF403`)
- [`print-empty-string`](https://docs.astral.sh/ruff/rules/print-empty-string/) (`FURB105`)
- [`readlines-in-for`](https://docs.astral.sh/ruff/rules/readlines-in-for/) (`FURB129`)
- [`if-expr-min-max`](https://docs.astral.sh/ruff/rules/if-expr-min-max/) (`FURB136`)
- [`bit-count`](https://docs.astral.sh/ruff/rules/bit-count/) (`FURB161`)
- [`redundant-log-base`](https://docs.astral.sh/ruff/rules/redundant-log-base/) (`FURB163`)
- [`regex-flag-alias`](https://docs.astral.sh/ruff/rules/regex-flag-alias/) (`FURB167`)
- [`isinstance-type-none`](https://docs.astral.sh/ruff/rules/isinstance-type-none/) (`FURB168`)
- [`type-none-comparison`](https://docs.astral.sh/ruff/rules/type-none-comparison/) (`FURB169`)
- [`implicit-cwd`](https://docs.astral.sh/ruff/rules/implicit-cwd/) (`FURB177`)
- [`hashlib-digest-hex`](https://docs.astral.sh/ruff/rules/hashlib-digest-hex/) (`FURB181`)
- [`list-reverse-copy`](https://docs.astral.sh/ruff/rules/list-reverse-copy/) (`FURB187`)
- [`bad-open-mode`](https://docs.astral.sh/ruff/rules/bad-open-mode/) (`PLW1501`)
- [`empty-comment`](https://docs.astral.sh/ruff/rules/empty-comment/) (`PLR2044`)
- [`global-at-module-level`](https://docs.astral.sh/ruff/rules/global-at-module-level/) (`PLW0604`)
- [`misplaced-bare-raise`](https://docs.astral.sh/ruff/rules/misplaced-bare-raise/) (`PLE0744`)
- [`non-ascii-import-name`](https://docs.astral.sh/ruff/rules/non-ascii-import-name/) (`PLC2403`)
- [`non-ascii-name`](https://docs.astral.sh/ruff/rules/non-ascii-name/) (`PLC2401`)
- [`nonlocal-and-global`](https://docs.astral.sh/ruff/rules/nonlocal-and-global/) (`PLE0115`)
- [`potential-index-error`](https://docs.astral.sh/ruff/rules/potential-index-error/) (`PLE0643`)
- [`redeclared-assigned-name`](https://docs.astral.sh/ruff/rules/redeclared-assigned-name/) (`PLW0128`)
- [`redefined-argument-from-local`](https://docs.astral.sh/ruff/rules/redefined-argument-from-local/) (`PLR1704`)
- [`repeated-keyword-argument`](https://docs.astral.sh/ruff/rules/repeated-keyword-argument/) (`PLE1132`)
- [`super-without-brackets`](https://docs.astral.sh/ruff/rules/super-without-brackets/) (`PLW0245`)
- [`unnecessary-list-index-lookup`](https://docs.astral.sh/ruff/rules/unnecessary-list-index-lookup/) (`PLR1736`)
- [`useless-exception-statement`](https://docs.astral.sh/ruff/rules/useless-exception-statement/) (`PLW0133`)
- [`useless-with-lock`](https://docs.astral.sh/ruff/rules/useless-with-lock/) (`PLW2101`)
The following behaviors have been stabilized:
- [`is-literal`](https://docs.astral.sh/ruff/rules/is-literal/) (`F632`) now warns for identity checks against list, set or dictionary literals
- [`needless-bool`](https://docs.astral.sh/ruff/rules/needless-bool/) (`SIM103`) now detects `if` expressions with implicit `else` branches
- [`module-import-not-at-top-of-file`](https://docs.astral.sh/ruff/rules/module-import-not-at-top-of-file/) (`E402`) now allows `os.environ` modifications between import statements
- [`type-comparison`](https://docs.astral.sh/ruff/rules/type-comparison/) (`E721`) now allows idioms such as `type(x) is int`
- [`yoda-condition`](https://docs.astral.sh/ruff/rules/yoda-conditions/) (`SIM300`) now flags a wider range of expressions
### Removals
The following deprecated settings have been removed:
- `output-format=text`; use `output-format=concise` or `output-format=full`
- `tab-size`; use `indent-width`
The following deprecated CLI options have been removed:
- `--show-source`; use `--output-format=full`
- `--no-show-source`; use `--output-format=concise`
The following deprecated CLI commands have been removed:
- `ruff <path>`; use `ruff check <path>`
- `ruff --clean`; use `ruff clean`
- `ruff --generate-shell-completion`; use `ruff generate-shell-completion`
### Preview features
- \[`ruff`\] Add `assert-with-print-message` rule ([#11981](https://github.com/astral-sh/ruff/pull/11981))
### CLI
- Use rule name rather than message in `--statistics` ([#11697](https://github.com/astral-sh/ruff/pull/11697))
- Use the output format `full` by default ([#12010](https://github.com/astral-sh/ruff/pull/12010))
- Don't log syntax errors to the console ([#11902](https://github.com/astral-sh/ruff/pull/11902))
### Rule changes
- \[`ruff`\] Fix false positives if `gettext` is imported using an alias (`RUF027`) ([#12025](https://github.com/astral-sh/ruff/pull/12025))
- \[`numpy`\] Update `trapz` and `in1d` deprecation (`NPY201`) ([#11948](https://github.com/astral-sh/ruff/pull/11948))
- \[`flake8-bandit`\] Modify diagnostic ranges for shell-related rules ([#10667](https://github.com/astral-sh/ruff/pull/10667))
### Server
- Closing an untitled, unsaved notebook document no longer throws an error ([#11942](https://github.com/astral-sh/ruff/pull/11942))
- Support the usage of tildes and environment variables in `logFile` ([#11945](https://github.com/astral-sh/ruff/pull/11945))
- Add option to configure whether to show syntax errors ([#12059](https://github.com/astral-sh/ruff/pull/12059))
### Bug fixes
- \[`pycodestyle`\] Avoid `E203` for f-string debug expression ([#12024](https://github.com/astral-sh/ruff/pull/12024))
- \[`pep8-naming`\] Match import-name ignores against both name and alias (`N812`, `N817`) ([#12033](https://github.com/astral-sh/ruff/pull/12033))
- \[`pyflakes`\] Detect assignments that shadow definitions (`F811`) ([#11961](https://github.com/astral-sh/ruff/pull/11961))
### Parser
- Emit a syntax error for an empty type parameter list ([#12030](https://github.com/astral-sh/ruff/pull/12030))
- Avoid consuming the newline for unterminated strings ([#12067](https://github.com/astral-sh/ruff/pull/12067))
- Do not include the newline in the unterminated string range ([#12017](https://github.com/astral-sh/ruff/pull/12017))
- Use the correct range to highlight line continuation errors ([#12016](https://github.com/astral-sh/ruff/pull/12016))
- Consider 2-character EOL before line continuations ([#12035](https://github.com/astral-sh/ruff/pull/12035))
- Consider line continuation character for re-lexing ([#12008](https://github.com/astral-sh/ruff/pull/12008))
### Other changes
- Upgrade the Unicode table used for measuring the line-length ([#11194](https://github.com/astral-sh/ruff/pull/11194))
- Remove the deprecation error message for the nursery selector ([#10172](https://github.com/astral-sh/ruff/pull/10172))
## 0.4.10
### Parser
- Implement re-lexing logic for better error recovery ([#11845](https://github.com/astral-sh/ruff/pull/11845))
### Rule changes
- \[`flake8-copyright`\] Update `CPY001` to check the first 4096 bytes instead of 1024 ([#11927](https://github.com/astral-sh/ruff/pull/11927))
- \[`pycodestyle`\] Update `E999` to show all syntax errors instead of just the first one ([#11900](https://github.com/astral-sh/ruff/pull/11900))
### Server
- Add tracing setup guide to Helix documentation ([#11883](https://github.com/astral-sh/ruff/pull/11883))
- Add tracing setup guide to Neovim documentation ([#11884](https://github.com/astral-sh/ruff/pull/11884))
- Defer notebook cell deletion to avoid an error message ([#11864](https://github.com/astral-sh/ruff/pull/11864))
### Security
- Guard against malicious ecosystem comment artifacts ([#11879](https://github.com/astral-sh/ruff/pull/11879))
## 0.4.9
### Preview features
- \[`pylint`\] Implement `consider-dict-items` (`C0206`) ([#11688](https://github.com/astral-sh/ruff/pull/11688))
- \[`refurb`\] Implement `repeated-global` (`FURB154`) ([#11187](https://github.com/astral-sh/ruff/pull/11187))
### Rule changes
- \[`pycodestyle`\] Adapt fix for `E203` to work identical to `ruff format` ([#10999](https://github.com/astral-sh/ruff/pull/10999))
### Formatter
- Fix formatter instability for lines only consisting of zero-width characters ([#11748](https://github.com/astral-sh/ruff/pull/11748))
### Server
- Add supported commands in server capabilities ([#11850](https://github.com/astral-sh/ruff/pull/11850))
- Use real file path when available in `ruff server` ([#11800](https://github.com/astral-sh/ruff/pull/11800))
- Improve error message when a command is run on an unavailable document ([#11823](https://github.com/astral-sh/ruff/pull/11823))
- Introduce the `ruff.printDebugInformation` command ([#11831](https://github.com/astral-sh/ruff/pull/11831))
- Tracing system now respects log level and trace level, with options to log to a file ([#11747](https://github.com/astral-sh/ruff/pull/11747))
### CLI
- Handle non-printable characters in diff view ([#11687](https://github.com/astral-sh/ruff/pull/11687))
### Bug fixes
- \[`refurb`\] Avoid suggesting starmap when arguments are used outside call (`FURB140`) ([#11830](https://github.com/astral-sh/ruff/pull/11830))
- \[`flake8-bugbear`\] Avoid panic in `B909` when checking large loop blocks ([#11772](https://github.com/astral-sh/ruff/pull/11772))
- \[`refurb`\] Fix misbehavior of `operator.itemgetter` when getter param is a tuple (`FURB118`) ([#11774](https://github.com/astral-sh/ruff/pull/11774))
## 0.4.8
### Performance
- Linter performance has been improved by around 10% on some microbenchmarks by refactoring the lexer and parser to maintain synchronicity between them ([#11457](https://github.com/astral-sh/ruff/pull/11457))
### Preview features
- \[`flake8-bugbear`\] Implement `return-in-generator` (`B901`) ([#11644](https://github.com/astral-sh/ruff/pull/11644))
- \[`flake8-pyi`\] Implement `PYI063` ([#11699](https://github.com/astral-sh/ruff/pull/11699))
- \[`pygrep_hooks`\] Check blanket ignores via file-level pragmas (`PGH004`) ([#11540](https://github.com/astral-sh/ruff/pull/11540))
### Rule changes
- \[`pyupgrade`\] Update `UP035` for Python 3.13 and the latest version of `typing_extensions` ([#11693](https://github.com/astral-sh/ruff/pull/11693))
- \[`numpy`\] Update `NPY001` rule for NumPy 2.0 ([#11735](https://github.com/astral-sh/ruff/pull/11735))
### Server
- Formatting a document with syntax problems no longer spams a visible error popup ([#11745](https://github.com/astral-sh/ruff/pull/11745))
### CLI
- Add RDJson support for `--output-format` flag ([#11682](https://github.com/astral-sh/ruff/pull/11682))
### Bug fixes
- \[`pyupgrade`\] Write empty string in lieu of panic when fixing `UP032` ([#11696](https://github.com/astral-sh/ruff/pull/11696))
- \[`flake8-simplify`\] Simplify double negatives in `SIM103` ([#11684](https://github.com/astral-sh/ruff/pull/11684))
- Ensure the expression generator adds a newline before `type` statements ([#11720](https://github.com/astral-sh/ruff/pull/11720))
- Respect per-file ignores for blanket and redirected noqa rules ([#11728](https://github.com/astral-sh/ruff/pull/11728))
## 0.4.7
### Preview features
- \[`flake8-pyi`\] Implement `PYI064` ([#11325](https://github.com/astral-sh/ruff/pull/11325))
- \[`flake8-pyi`\] Implement `PYI066` ([#11541](https://github.com/astral-sh/ruff/pull/11541))
- \[`flake8-pyi`\] Implement `PYI057` ([#11486](https://github.com/astral-sh/ruff/pull/11486))
- \[`pyflakes`\] Enable `F822` in `__init__.py` files by default ([#11370](https://github.com/astral-sh/ruff/pull/11370))
### Formatter
- Fix incorrect placement of trailing stub function comments ([#11632](https://github.com/astral-sh/ruff/pull/11632))
### Server
- Respect file exclusions in `ruff server` ([#11590](https://github.com/astral-sh/ruff/pull/11590))
- Add support for documents not exist on disk ([#11588](https://github.com/astral-sh/ruff/pull/11588))
- Add Vim and Kate setup guide for `ruff server` ([#11615](https://github.com/astral-sh/ruff/pull/11615))
### Bug fixes
- Avoid removing newlines between docstring headers and rST blocks ([#11609](https://github.com/astral-sh/ruff/pull/11609))
- Infer indentation with imports when logical indent is absent ([#11608](https://github.com/astral-sh/ruff/pull/11608))
- Use char index rather than position for indent slice ([#11645](https://github.com/astral-sh/ruff/pull/11645))
- \[`flake8-comprehension`\] Strip parentheses around generators in `C400` ([#11607](https://github.com/astral-sh/ruff/pull/11607))
- Mark `repeated-isinstance-calls` as unsafe on Python 3.10 and later ([#11622](https://github.com/astral-sh/ruff/pull/11622))
## 0.4.6
### Breaking changes
- Use project-relative paths when calculating GitLab fingerprints ([#11532](https://github.com/astral-sh/ruff/pull/11532))
- Bump minimum supported Windows version to Windows 10 ([#11613](https://github.com/astral-sh/ruff/pull/11613))
### Preview features
- \[`flake8-async`\] Sleep with >24 hour interval should usually sleep forever (`ASYNC116`) ([#11498](https://github.com/astral-sh/ruff/pull/11498))
### Rule changes
- \[`numpy`\] Add missing functions to NumPy 2.0 migration rule ([#11528](https://github.com/astral-sh/ruff/pull/11528))
- \[`mccabe`\] Consider irrefutable pattern similar to `if .. else` for `C901` ([#11565](https://github.com/astral-sh/ruff/pull/11565))
- Consider `match`-`case` statements for `C901`, `PLR0912`, and `PLR0915` ([#11521](https://github.com/astral-sh/ruff/pull/11521))
- Remove empty strings when converting to f-string (`UP032`) ([#11524](https://github.com/astral-sh/ruff/pull/11524))
- \[`flake8-bandit`\] `request-without-timeout` should warn for `requests.request` ([#11548](https://github.com/astral-sh/ruff/pull/11548))
- \[`flake8-self`\] Ignore sunder accesses in `flake8-self` rules ([#11546](https://github.com/astral-sh/ruff/pull/11546))
- \[`pyupgrade`\] Lint for `TypeAliasType` usages (`UP040`) ([#11530](https://github.com/astral-sh/ruff/pull/11530))
### Server
- Respect excludes in `ruff server` configuration discovery ([#11551](https://github.com/astral-sh/ruff/pull/11551))
- Use default settings if initialization options is empty or not provided ([#11566](https://github.com/astral-sh/ruff/pull/11566))
- `ruff server` correctly treats `.pyi` files as stub files ([#11535](https://github.com/astral-sh/ruff/pull/11535))
- `ruff server` searches for configuration in parent directories ([#11537](https://github.com/astral-sh/ruff/pull/11537))
- `ruff server`: An empty code action filter no longer returns notebook source actions ([#11526](https://github.com/astral-sh/ruff/pull/11526))
### Bug fixes
- \[`flake8-logging-format`\] Fix autofix title in `logging-warn` (`G010`) ([#11514](https://github.com/astral-sh/ruff/pull/11514))
- \[`refurb`\] Avoid recommending `operator.itemgetter` with dependence on lambda arguments ([#11574](https://github.com/astral-sh/ruff/pull/11574))
- \[`flake8-simplify`\] Avoid recommending context manager in `__enter__` implementations ([#11575](https://github.com/astral-sh/ruff/pull/11575))
- Create intermediary directories for `--output-file` ([#11550](https://github.com/astral-sh/ruff/pull/11550))
- Propagate reads on global variables ([#11584](https://github.com/astral-sh/ruff/pull/11584))
- Treat all `singledispatch` arguments as runtime-required ([#11523](https://github.com/astral-sh/ruff/pull/11523))
## 0.4.5
### Ruff's language server is now in Beta
`v0.4.5` marks the official Beta release of `ruff server`, an integrated language server built into Ruff.
`ruff server` supports the same feature set as `ruff-lsp`, powering linting, formatting, and
code fixes in Ruff's editor integrations -- but with superior performance and
no installation required. We'd love your feedback!
You can enable `ruff server` in the [VS Code extension](https://github.com/astral-sh/ruff-vscode?tab=readme-ov-file#enabling-the-rust-based-language-server) today.
To read more about this exciting milestone, check out our [blog post](https://astral.sh/blog/ruff-v0.4.5)!
### Rule changes
- \[`flake8-future-annotations`\] Reword `future-rewritable-type-annotation` (`FA100`) message ([#11381](https://github.com/astral-sh/ruff/pull/11381))
- \[`isort`\] Expanded the set of standard-library modules to include `_string`, etc. ([#11374](https://github.com/astral-sh/ruff/pull/11374))
- \[`pycodestyle`\] Consider soft keywords for `E27` rules ([#11446](https://github.com/astral-sh/ruff/pull/11446))
- \[`pyflakes`\] Recommend adding unused import bindings to `__all__` ([#11314](https://github.com/astral-sh/ruff/pull/11314))
- \[`pyflakes`\] Update documentation and deprecate `ignore_init_module_imports` ([#11436](https://github.com/astral-sh/ruff/pull/11436))
- \[`pyupgrade`\] Mark quotes as unnecessary for non-evaluated annotations ([#11485](https://github.com/astral-sh/ruff/pull/11485))
### Formatter
- Avoid multiline quotes warning with `quote-style = preserve` ([#11490](https://github.com/astral-sh/ruff/pull/11490))
### Server
- Support Jupyter Notebook files ([#11206](https://github.com/astral-sh/ruff/pull/11206))
- Support `noqa` comment code actions ([#11276](https://github.com/astral-sh/ruff/pull/11276))
- Fix automatic configuration reloading ([#11492](https://github.com/astral-sh/ruff/pull/11492))
- Fix several issues with configuration in Neovim and Helix ([#11497](https://github.com/astral-sh/ruff/pull/11497))
### CLI
- Add `--output-format` as a CLI option for `ruff config` ([#11438](https://github.com/astral-sh/ruff/pull/11438))
### Bug fixes
- Avoid `PLE0237` for property with setter ([#11377](https://github.com/astral-sh/ruff/pull/11377))
- Avoid `TCH005` for `if` stmt with `elif`/`else` block ([#11376](https://github.com/astral-sh/ruff/pull/11376))
- Avoid flagging `__future__` annotations as required for non-evaluated type annotations ([#11414](https://github.com/astral-sh/ruff/pull/11414))
- Check for ruff executable in 'bin' directory as installed by 'pip install --target'. ([#11450](https://github.com/astral-sh/ruff/pull/11450))
- Sort edits prior to deduplicating in quotation fix ([#11452](https://github.com/astral-sh/ruff/pull/11452))
- Treat escaped newline as valid sequence ([#11465](https://github.com/astral-sh/ruff/pull/11465))
- \[`flake8-pie`\] Preserve parentheses in `unnecessary-dict-kwargs` ([#11372](https://github.com/astral-sh/ruff/pull/11372))
- \[`pylint`\] Ignore `__slots__` with dynamic values ([#11488](https://github.com/astral-sh/ruff/pull/11488))
- \[`pylint`\] Remove `try` body from branch counting ([#11487](https://github.com/astral-sh/ruff/pull/11487))
- \[`refurb`\] Respect operator precedence in `FURB110` ([#11464](https://github.com/astral-sh/ruff/pull/11464))
### Documentation
- Add `--preview` to the README ([#11395](https://github.com/astral-sh/ruff/pull/11395))
- Add Python 3.13 to list of allowed Python versions ([#11411](https://github.com/astral-sh/ruff/pull/11411))
- Simplify Neovim setup documentation ([#11489](https://github.com/astral-sh/ruff/pull/11489))
- Update CONTRIBUTING.md to reflect the new parser ([#11434](https://github.com/astral-sh/ruff/pull/11434))
- Update server documentation with new migration guide ([#11499](https://github.com/astral-sh/ruff/pull/11499))
- \[`pycodestyle`\] Clarify motivation for `E713` and `E714` ([#11483](https://github.com/astral-sh/ruff/pull/11483))
- \[`pyflakes`\] Update docs to describe WAI behavior (F541) ([#11362](https://github.com/astral-sh/ruff/pull/11362))
- \[`pylint`\] Clearly indicate what is counted as a branch ([#11423](https://github.com/astral-sh/ruff/pull/11423))
## 0.4.4
### Preview features
@@ -547,10 +74,6 @@ To read more about this exciting milestone, check out our [blog post](https://as
- Avoid allocations for isort module names ([#11251](https://github.com/astral-sh/ruff/pull/11251))
- Build a separate ARM wheel for macOS ([#11149](https://github.com/astral-sh/ruff/pull/11149))
### Windows
- Increase the minimum requirement to Windows 10.
## 0.4.2
### Rule changes

View File

@@ -101,8 +101,6 @@ pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting,
These checks will run on GitHub Actions when you open your pull request, but running them locally
will save you time and expedite the merge process.
If you're using VS Code, you can also install the recommended [rust-analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) extension to get these checks while editing.
Note that many code changes also require updating the snapshot tests, which is done interactively
after running `cargo test` like so:
@@ -280,7 +278,7 @@ These represent, respectively: the schema used to parse the `pyproject.toml` fil
intermediate representation; and the final, internal representation used to power Ruff.
To add a new configuration option, you'll likely want to modify these latter few files (along with
`args.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`arg.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`dummy_variable_rgx`, which defines a regular expression to match against acceptable unused
variables (e.g., `_`).
@@ -333,7 +331,7 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
### Creating a new release
1. Install `uv`: `curl -LsSf https://astral.sh/uv/install.sh | sh`
1. Run `./scripts/release.sh`; this command will:
1. Run `./scripts/release/bump.sh`; this command will:
- Generate a temporary virtual environment with `rooster`
- Generate a changelog entry in `CHANGELOG.md`
- Update versions in `pyproject.toml` and `Cargo.toml`
@@ -346,21 +344,22 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
1. Run `cargo check`. This should update the lock file with new versions.
1. Create a pull request with the changelog and version updates
1. Merge the PR
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yml) with:
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yaml) with:
- The new version number (without starting `v`)
- The commit hash of the merged release pull request on `main`
1. The release workflow will do the following:
1. Build all the assets. If this fails (even though we tested in step 4), we haven't tagged or
uploaded anything, you can restart after pushing a fix. If you just need to rerun the build,
make sure you're [re-running all the failed
jobs](https://docs.github.com/en/actions/managing-workflow-runs/re-running-workflows-and-jobs#re-running-failed-jobs-in-a-workflow) and not just a single failed job.
uploaded anything, you can restart after pushing a fix.
1. Upload to PyPI.
1. Create and push the Git tag (as extracted from `pyproject.toml`). We create the Git tag only
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/astral-sh/ruff/issues/4468)).
1. Attach artifacts to draft GitHub release
1. Trigger downstream repositories. This can fail non-catastrophically, as we can run any
downstream jobs manually if needed.
1. Verify the GitHub release:
1. The Changelog should match the content of `CHANGELOG.md`
1. Publish the GitHub release
1. Open the draft release in the GitHub release section
1. Copy the changelog for the release into the GitHub release
- See previous releases for formatting of section headers
1. Append the contributors from the `bump.sh` script
1. If needed, [update the schemastore](https://github.com/astral-sh/ruff/blob/main/scripts/update_schemastore.py).
1. One can determine if an update is needed when
@@ -638,11 +637,11 @@ Otherwise, follow the instructions from the linux section.
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
utils with it:
- `cargo dev print-ast <file>`: Print the AST of a python file using Ruff's
[Python parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser).
For `if True: pass # comment`, you can see the syntax tree, the byte offsets for start and
stop of each node and also how the `:` token, the comment and whitespace are not represented
anymore:
- `cargo dev print-ast <file>`: Print the AST of a python file using the
[RustPython parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser) that is
mainly used in Ruff. For `if True: pass # comment`, you can see the syntax tree, the byte offsets
for start and stop of each node and also how the `:` token, the comment and whitespace are not
represented anymore:
```text
[

704
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ resolver = "2"
[workspace.package]
edition = "2021"
rust-version = "1.75"
rust-version = "1.71"
homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff"
@@ -12,33 +12,6 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db" }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_index = { path = "crates/ruff_index" }
ruff_linter = { path = "crates/ruff_linter" }
ruff_macros = { path = "crates/ruff_macros" }
ruff_notebook = { path = "crates/ruff_notebook" }
ruff_python_ast = { path = "crates/ruff_python_ast" }
ruff_python_codegen = { path = "crates/ruff_python_codegen" }
ruff_python_formatter = { path = "crates/ruff_python_formatter" }
ruff_python_index = { path = "crates/ruff_python_index" }
ruff_python_literal = { path = "crates/ruff_python_literal" }
ruff_python_parser = { path = "crates/ruff_python_parser" }
ruff_python_semantic = { path = "crates/ruff_python_semantic" }
ruff_python_stdlib = { path = "crates/ruff_python_stdlib" }
ruff_python_trivia = { path = "crates/ruff_python_trivia" }
ruff_server = { path = "crates/ruff_server" }
ruff_source_file = { path = "crates/ruff_source_file" }
ruff_text_size = { path = "crates/ruff_text_size" }
ruff_workspace = { path = "crates/ruff_workspace" }
red_knot = { path = "crates/red_knot" }
red_knot_module_resolver = { path = "crates/red_knot_module_resolver" }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
aho-corasick = { version = "1.1.3" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
anyhow = { version = "1.0.80" }
@@ -47,57 +20,56 @@ bincode = { version = "1.3.3" }
bitflags = { version = "2.5.0" }
bstr = { version = "1.9.1" }
cachedir = { version = "0.3.1" }
camino = { version = "1.1.7" }
chrono = { version = "0.4.35", default-features = false, features = ["clock"] }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete_command = { version = "0.6.0" }
clap_complete_command = { version = "0.5.1" }
clearscreen = { version = "3.0.0" }
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
colored = { version = "2.1.0" }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
compact_str = "0.8.0"
criterion = { version = "0.5.1", default-features = false }
crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" }
dashmap = { version = "5.5.3" }
dirs = { version = "5.0.0" }
drop_bomb = { version = "0.1.5" }
env_logger = { version = "0.11.0" }
etcetera = { version = "0.8.0" }
fern = { version = "0.6.1" }
filetime = { version = "0.2.23" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
hashbrown = "0.14.3"
hexf-parse = { version = "0.2.1" }
ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" }
imperative = { version = "1.0.4" }
indexmap = { version = "2.2.6" }
indicatif = { version = "0.17.8" }
indoc = { version = "2.0.4" }
insta = { version = "1.35.1" }
insta = { version = "1.35.1", feature = ["filters", "glob"] }
insta-cmd = { version = "0.6.0" }
is-macro = { version = "0.3.5" }
is-wsl = { version = "0.4.0" }
itertools = { version = "0.13.0" }
itertools = { version = "0.12.1" }
js-sys = { version = "0.3.69" }
jod-thread = { version = "0.1.2" }
lexical-parse-float = { version = "0.8.0", features = ["format"] }
libc = { version = "0.2.153" }
libcst = { version = "1.1.0", default-features = false }
log = { version = "0.4.17" }
lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed",
] }
lsp-types = { version = "0.95.0", features = ["proposed"] }
matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" }
notify = { version = "6.1.1" }
once_cell = { version = "1.19.0" }
ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" }
pathdiff = { version = "0.2.1" }
parking_lot = "0.12.1"
pep440_rs = { version = "0.6.0", features = ["serde"] }
pretty_assertions = "1.3.0"
proc-macro2 = { version = "1.0.79" }
@@ -107,17 +79,15 @@ quote = { version = "1.0.23" }
rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "a1bf3a613f451af7fc0a59411c56abc47fe8e8e1" }
result-like = { version = "0.5.0" }
rustc-hash = { version = "1.1.0" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }
serde-wasm-bindgen = { version = "0.6.4" }
serde_json = { version = "1.0.113" }
serde_test = { version = "1.0.152" }
serde_with = { version = "3.6.0", default-features = false, features = [
"macros",
] }
serde_with = { version = "3.6.0", default-features = false, features = ["macros"] }
shellexpand = { version = "3.0.0" }
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
@@ -128,7 +98,7 @@ syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" }
test-case = { version = "3.3.1" }
thiserror = { version = "1.0.58" }
tikv-jemallocator = { version = "0.6.0" }
tikv-jemallocator = { version = "0.5.0" }
toml = { version = "0.8.11" }
tracing = { version = "0.1.40" }
tracing-indicatif = { version = "0.3.6" }
@@ -142,17 +112,11 @@ unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
ureq = { version = "2.9.6" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
"fast-rng",
"macro-diagnostics",
"js",
] }
uuid = { version = "1.6.1", features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
walkdir = { version = "2.3.2" }
wasm-bindgen = { version = "0.2.92" }
wasm-bindgen-test = { version = "0.3.42" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false, features = ["zstd"] }
[workspace.lints.rust]
unsafe_code = "warn"
@@ -220,62 +184,3 @@ opt-level = 1
[profile.profiling]
inherits = "release"
debug = 1
# The profile that 'cargo dist' will build with.
[profile.dist]
inherits = "release"
# Config for 'cargo dist'
[workspace.metadata.dist]
# The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.18.0"
# CI backends to support
ci = ["github"]
# The installers to generate for each app
installers = ["shell", "powershell"]
# The archive format to use for windows builds (defaults .zip)
windows-archive = ".zip"
# The archive format to use for non-windows builds (defaults .tar.xz)
unix-archive = ".tar.gz"
# Target platforms to build apps for (Rust target-triple syntax)
targets = [
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"arm-unknown-linux-musleabihf",
"armv7-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabihf",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
]
# Whether to auto-include files like READMEs, LICENSEs, and CHANGELOGs (default true)
auto-includes = false
# Whether cargo-dist should create a GitHub Release or use an existing draft
create-release = true
# Publish jobs to run in CI
pr-run-mode = "skip"
# Whether CI should trigger releases with dispatches instead of tag pushes
dispatch-releases = true
# The stage during which the GitHub Release should be created
github-release = "announce"
# Whether CI should include auto-generated code to build local artifacts
build-local-artifacts = false
# Local artifacts jobs to run in CI
local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Announcement jobs to run in CI
post-announce-jobs = ["./notify-dependents", "./publish-docs", "./publish-playground"]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program
install-updater = false

View File

@@ -28,7 +28,7 @@ An extremely fast Python linter and code formatter, written in Rust.
- ⚡️ 10-100x faster than existing linters (like Flake8) and formatters (like Black)
- 🐍 Installable via `pip`
- 🛠️ `pyproject.toml` support
- 🤝 Python 3.13 compatibility
- 🤝 Python 3.12 compatibility
- ⚖️ Drop-in parity with [Flake8](https://docs.astral.sh/ruff/faq/#how-does-ruff-compare-to-flake8), isort, and Black
- 📦 Built-in caching, to avoid re-analyzing unchanged files
- 🔧 Fix support, for automatic error correction (e.g., automatically remove unused imports)
@@ -119,25 +119,7 @@ For more, see the [documentation](https://docs.astral.sh/ruff/).
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI:
```shell
# With pip.
pip install ruff
# With pipx.
pipx install ruff
```
Starting with version `0.5.0`, Ruff can be installed with our standalone installers:
```shell
# On macOS and Linux.
curl -LsSf https://astral.sh/ruff/install.sh | sh
# On Windows.
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.5.3/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.5.3/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -170,7 +152,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.5.3
rev: v0.4.4
hooks:
# Run the linter.
- id: ruff
@@ -284,11 +266,6 @@ The remaining configuration options can be provided through a catch-all `--confi
ruff check --config "lint.per-file-ignores = {'some_file.py' = ['F841']}"
```
To opt in to the latest lint rules, formatter style changes, interface updates, and more, enable
[preview mode](https://docs.astral.sh/ruff/rules/) by setting `preview = true` in your configuration
file or passing `--preview` on the command line. Preview mode enables a collection of unstable
features that may change prior to stabilization.
See `ruff help` for more on Ruff's top-level commands, or `ruff help check` and `ruff help format`
for more on the linting and formatting commands, respectively.
@@ -352,6 +329,7 @@ quality tools, including:
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-trio](https://pypi.org/project/flake8-trio/)
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/astral-sh/ruff/issues/2102))
@@ -425,7 +403,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Dagster](https://github.com/dagster-io/dagster)
- Databricks ([MLflow](https://github.com/mlflow/mlflow))
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Godot](https://github.com/godotengine/godot)
- [Gradio](https://github.com/gradio-app/gradio)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- [HTTPX](https://github.com/encode/httpx)
@@ -451,7 +428,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [Mypy](https://github.com/python/mypy)
- [Nautobot](https://github.com/nautobot/nautobot)
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [Nokia](https://nokia.com/)
@@ -459,7 +435,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [NumPyro](https://github.com/pyro-ppl/numpyro)
- [ONNX](https://github.com/onnx/onnx)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [Open Wine Components](https://github.com/Open-Wine-Components/umu-launcher)
- [PDM](https://github.com/pdm-project/pdm)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [Pandas](https://github.com/pandas-dev/pandas)
@@ -487,7 +462,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [Starlette](https://github.com/encode/starlette)
- [Streamlit](https://github.com/streamlit/streamlit)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Vega-Altair](https://github.com/altair-viz/altair)
- WordPress ([Openverse](https://github.com/WordPress/openverse))

View File

@@ -1,6 +1,6 @@
[files]
# https://github.com/crate-ci/typos/issues/868
extend-exclude = ["crates/red_knot_module_resolver/vendor/**/*", "**/resources/**/*", "**/snapshots/**/*"]
extend-exclude = ["crates/red_knot/vendor/**/*", "**/resources/**/*", "**/snapshots/**/*"]
[default.extend-words]
"arange" = "arange" # e.g. `numpy.arange`
@@ -16,6 +16,5 @@ jod = "jod" # e.g., `jod-thread`
[default]
extend-ignore-re = [
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
"(?Rm)^.*#\\s*spellchecker:disable-line$"
]

View File

@@ -1,6 +1,6 @@
[package]
name = "red_knot"
version = "0.0.0"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
@@ -12,25 +12,31 @@ license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
red_knot_module_resolver = { workspace = true }
red_knot_python_semantic = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
ruff_python_ast = { workspace = true }
ruff_python_parser = { path = "../ruff_python_parser" }
ruff_python_ast = { path = "../ruff_python_ast" }
ruff_text_size = { path = "../ruff_text_size" }
ruff_index = { path = "../ruff_index" }
ruff_notebook = { path = "../ruff_notebook" }
anyhow = { workspace = true }
clap = { workspace = true, features = ["wrap_help"] }
countme = { workspace = true, features = ["enable"] }
bitflags = { workspace = true }
crossbeam = { workspace = true }
ctrlc = { version = "3.4.4" }
dashmap = { workspace = true }
hashbrown = { workspace = true }
indexmap = { workspace = true }
notify = { workspace = true }
parking_lot = { workspace = true }
rayon = { workspace = true }
rustc-hash = { workspace = true }
salsa = { workspace = true }
smol_str = { version = "0.2.1" }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
tracing-tree = { workspace = true }
[dev-dependencies]
textwrap = { version = "0.16.1" }
tempfile = { workspace = true }
[lints]
workspace = true

18
crates/red_knot/README.md Normal file
View File

@@ -0,0 +1,18 @@
# Red Knot
The Red Knot crate contains code working towards multifile analysis, type inference and, ultimately, type-checking. It's very much a work in progress for now.
## Vendored types for the stdlib
Red Knot vendors [typeshed](https://github.com/python/typeshed)'s stubs for the standard library. The vendored stubs can be found in `crates/red_knot/vendor/typeshed`. The file `crates/red_knot/vendor/typeshed/source_commit.txt` tells you the typeshed commit that our vendored stdlib stubs currently correspond to.
Updating the vendored stubs is currently done manually. On a Unix machine, follow the following steps (if you have a typeshed clone in a `typeshed` directory, and a Ruff clone in a `ruff` directory):
```shell
rm -rf ruff/crates/red_knot/vendor/typeshed
mkdir ruff/crates/red_knot/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot/vendor/typeshed/stdlib
git -C typeshed rev-parse HEAD > ruff/crates/red_knot/vendor/typeshed/source_commit.txt
```

View File

@@ -0,0 +1,415 @@
use std::any::type_name;
use std::fmt::{Debug, Formatter};
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use rustc_hash::FxHashMap;
use ruff_index::{Idx, IndexVec};
use ruff_python_ast::visitor::preorder;
use ruff_python_ast::visitor::preorder::{PreorderVisitor, TraversalSignal};
use ruff_python_ast::{
AnyNodeRef, AstNode, ExceptHandler, ExceptHandlerExceptHandler, Expr, MatchCase, ModModule,
NodeKind, Parameter, Stmt, StmtAnnAssign, StmtAssign, StmtAugAssign, StmtClassDef,
StmtFunctionDef, StmtGlobal, StmtImport, StmtImportFrom, StmtNonlocal, StmtTypeAlias,
TypeParam, TypeParamParamSpec, TypeParamTypeVar, TypeParamTypeVarTuple, WithItem,
};
use ruff_text_size::{Ranged, TextRange};
/// A type agnostic ID that uniquely identifies an AST node in a file.
#[ruff_index::newtype_index]
pub struct AstId;
/// A typed ID that uniquely identifies an AST node in a file.
///
/// This is different from [`AstId`] in that it is a combination of ID and the type of the node the ID identifies.
/// Typing the ID prevents mixing IDs of different node types and allows to restrict the API to only accept
/// nodes for which an ID has been created (not all AST nodes get an ID).
pub struct TypedAstId<N: HasAstId> {
erased: AstId,
_marker: PhantomData<fn() -> N>,
}
impl<N: HasAstId> TypedAstId<N> {
/// Upcasts this ID from a more specific node type to a more general node type.
pub fn upcast<M: HasAstId>(self) -> TypedAstId<M>
where
N: Into<M>,
{
TypedAstId {
erased: self.erased,
_marker: PhantomData,
}
}
}
impl<N: HasAstId> Copy for TypedAstId<N> {}
impl<N: HasAstId> Clone for TypedAstId<N> {
fn clone(&self) -> Self {
*self
}
}
impl<N: HasAstId> PartialEq for TypedAstId<N> {
fn eq(&self, other: &Self) -> bool {
self.erased == other.erased
}
}
impl<N: HasAstId> Eq for TypedAstId<N> {}
impl<N: HasAstId> Hash for TypedAstId<N> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.erased.hash(state);
}
}
impl<N: HasAstId> Debug for TypedAstId<N> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("TypedAstId")
.field(&self.erased)
.field(&type_name::<N>())
.finish()
}
}
pub struct AstIds {
ids: IndexVec<AstId, NodeKey>,
reverse: FxHashMap<NodeKey, AstId>,
}
impl AstIds {
// TODO rust analyzer doesn't allocate an ID for every node. It only allocates ids for
// nodes with a corresponding HIR element, that is nodes that are definitions.
pub fn from_module(module: &ModModule) -> Self {
let mut visitor = AstIdsVisitor::default();
// TODO: visit_module?
// Make sure we visit the root
visitor.create_id(module);
visitor.visit_body(&module.body);
while let Some(deferred) = visitor.deferred.pop() {
match deferred {
DeferredNode::FunctionDefinition(def) => {
def.visit_preorder(&mut visitor);
}
DeferredNode::ClassDefinition(def) => def.visit_preorder(&mut visitor),
}
}
AstIds {
ids: visitor.ids,
reverse: visitor.reverse,
}
}
/// Returns the ID to the root node.
pub fn root(&self) -> NodeKey {
self.ids[AstId::new(0)]
}
/// Returns the [`TypedAstId`] for a node.
pub fn ast_id<N: HasAstId>(&self, node: &N) -> TypedAstId<N> {
let key = node.syntax_node_key();
TypedAstId {
erased: self.reverse.get(&key).copied().unwrap(),
_marker: PhantomData,
}
}
/// Returns the [`TypedAstId`] for the node identified with the given [`TypedNodeKey`].
pub fn ast_id_for_key<N: HasAstId>(&self, node: &TypedNodeKey<N>) -> TypedAstId<N> {
let ast_id = self.ast_id_for_node_key(node.inner);
TypedAstId {
erased: ast_id,
_marker: PhantomData,
}
}
/// Returns the untyped [`AstId`] for the node identified by the given `node` key.
pub fn ast_id_for_node_key(&self, node: NodeKey) -> AstId {
self.reverse
.get(&node)
.copied()
.expect("Can't find node in AstIds map.")
}
/// Returns the [`TypedNodeKey`] for the node identified by the given [`TypedAstId`].
pub fn key<N: HasAstId>(&self, id: TypedAstId<N>) -> TypedNodeKey<N> {
let syntax_key = self.ids[id.erased];
TypedNodeKey::new(syntax_key).unwrap()
}
pub fn node_key<H: HasAstId>(&self, id: TypedAstId<H>) -> NodeKey {
self.ids[id.erased]
}
}
impl std::fmt::Debug for AstIds {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut map = f.debug_map();
for (key, value) in self.ids.iter_enumerated() {
map.entry(&key, &value);
}
map.finish()
}
}
impl PartialEq for AstIds {
fn eq(&self, other: &Self) -> bool {
self.ids == other.ids
}
}
impl Eq for AstIds {}
#[derive(Default)]
struct AstIdsVisitor<'a> {
ids: IndexVec<AstId, NodeKey>,
reverse: FxHashMap<NodeKey, AstId>,
deferred: Vec<DeferredNode<'a>>,
}
impl<'a> AstIdsVisitor<'a> {
fn create_id<A: HasAstId>(&mut self, node: &A) {
let node_key = node.syntax_node_key();
let id = self.ids.push(node_key);
self.reverse.insert(node_key, id);
}
}
impl<'a> PreorderVisitor<'a> for AstIdsVisitor<'a> {
fn visit_stmt(&mut self, stmt: &'a Stmt) {
match stmt {
Stmt::FunctionDef(def) => {
self.create_id(def);
self.deferred.push(DeferredNode::FunctionDefinition(def));
return;
}
// TODO defer visiting the assignment body, type alias parameters etc?
Stmt::ClassDef(def) => {
self.create_id(def);
self.deferred.push(DeferredNode::ClassDefinition(def));
return;
}
Stmt::Expr(_) => {
// Skip
return;
}
Stmt::Return(_) => {}
Stmt::Delete(_) => {}
Stmt::Assign(assignment) => self.create_id(assignment),
Stmt::AugAssign(assignment) => {
self.create_id(assignment);
}
Stmt::AnnAssign(assignment) => self.create_id(assignment),
Stmt::TypeAlias(assignment) => self.create_id(assignment),
Stmt::For(_) => {}
Stmt::While(_) => {}
Stmt::If(_) => {}
Stmt::With(_) => {}
Stmt::Match(_) => {}
Stmt::Raise(_) => {}
Stmt::Try(_) => {}
Stmt::Assert(_) => {}
Stmt::Import(import) => self.create_id(import),
Stmt::ImportFrom(import_from) => self.create_id(import_from),
Stmt::Global(global) => self.create_id(global),
Stmt::Nonlocal(non_local) => self.create_id(non_local),
Stmt::Pass(_) => {}
Stmt::Break(_) => {}
Stmt::Continue(_) => {}
Stmt::IpyEscapeCommand(_) => {}
}
preorder::walk_stmt(self, stmt);
}
fn visit_expr(&mut self, _expr: &'a Expr) {}
fn visit_parameter(&mut self, parameter: &'a Parameter) {
self.create_id(parameter);
preorder::walk_parameter(self, parameter);
}
fn visit_except_handler(&mut self, except_handler: &'a ExceptHandler) {
match except_handler {
ExceptHandler::ExceptHandler(except_handler) => {
self.create_id(except_handler);
}
}
preorder::walk_except_handler(self, except_handler);
}
fn visit_with_item(&mut self, with_item: &'a WithItem) {
self.create_id(with_item);
preorder::walk_with_item(self, with_item);
}
fn visit_match_case(&mut self, match_case: &'a MatchCase) {
self.create_id(match_case);
preorder::walk_match_case(self, match_case);
}
fn visit_type_param(&mut self, type_param: &'a TypeParam) {
self.create_id(type_param);
}
}
enum DeferredNode<'a> {
FunctionDefinition(&'a StmtFunctionDef),
ClassDefinition(&'a StmtClassDef),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct TypedNodeKey<N: AstNode> {
/// The type erased node key.
inner: NodeKey,
_marker: PhantomData<fn() -> N>,
}
impl<N: AstNode> TypedNodeKey<N> {
pub fn from_node(node: &N) -> Self {
let inner = NodeKey {
kind: node.as_any_node_ref().kind(),
range: node.range(),
};
Self {
inner,
_marker: PhantomData,
}
}
pub fn new(node_key: NodeKey) -> Option<Self> {
N::can_cast(node_key.kind).then_some(TypedNodeKey {
inner: node_key,
_marker: PhantomData,
})
}
pub fn resolve<'a>(&self, root: AnyNodeRef<'a>) -> Option<N::Ref<'a>> {
let node_ref = self.inner.resolve(root)?;
Some(N::cast_ref(node_ref).unwrap())
}
pub fn resolve_unwrap<'a>(&self, root: AnyNodeRef<'a>) -> N::Ref<'a> {
self.resolve(root).expect("node should resolve")
}
pub fn erased(&self) -> &NodeKey {
&self.inner
}
}
struct FindNodeKeyVisitor<'a> {
key: NodeKey,
result: Option<AnyNodeRef<'a>>,
}
impl<'a> PreorderVisitor<'a> for FindNodeKeyVisitor<'a> {
fn enter_node(&mut self, node: AnyNodeRef<'a>) -> TraversalSignal {
if self.result.is_some() {
return TraversalSignal::Skip;
}
if node.range() == self.key.range && node.kind() == self.key.kind {
self.result = Some(node);
TraversalSignal::Skip
} else if node.range().contains_range(self.key.range) {
TraversalSignal::Traverse
} else {
TraversalSignal::Skip
}
}
fn visit_body(&mut self, body: &'a [Stmt]) {
// TODO it would be more efficient to use binary search instead of linear
for stmt in body {
if stmt.range().start() > self.key.range.end() {
break;
}
self.visit_stmt(stmt);
}
}
}
// TODO an alternative to this is to have a `NodeId` on each node (in increasing order depending on the position).
// This would allow to reduce the size of this to a u32.
// What would be nice if we could use an `Arc::weak_ref` here but that only works if we use
// `Arc` internally
// TODO: Implement the logic to resolve a node, given a db (and the correct file).
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct NodeKey {
kind: NodeKind,
range: TextRange,
}
impl NodeKey {
pub fn resolve<'a>(&self, root: AnyNodeRef<'a>) -> Option<AnyNodeRef<'a>> {
// We need to do a binary search here. Only traverse into a node if the range is withint the node
let mut visitor = FindNodeKeyVisitor {
key: *self,
result: None,
};
if visitor.enter_node(root) == TraversalSignal::Traverse {
root.visit_preorder(&mut visitor);
}
visitor.result
}
}
/// Marker trait implemented by AST nodes for which we extract the `AstId`.
pub trait HasAstId: AstNode {
fn node_key(&self) -> TypedNodeKey<Self>
where
Self: Sized,
{
TypedNodeKey {
inner: self.syntax_node_key(),
_marker: PhantomData,
}
}
fn syntax_node_key(&self) -> NodeKey {
NodeKey {
kind: self.as_any_node_ref().kind(),
range: self.range(),
}
}
}
impl HasAstId for StmtFunctionDef {}
impl HasAstId for StmtClassDef {}
impl HasAstId for StmtAnnAssign {}
impl HasAstId for StmtAugAssign {}
impl HasAstId for StmtAssign {}
impl HasAstId for StmtTypeAlias {}
impl HasAstId for ModModule {}
impl HasAstId for StmtImport {}
impl HasAstId for StmtImportFrom {}
impl HasAstId for Parameter {}
impl HasAstId for TypeParam {}
impl HasAstId for Stmt {}
impl HasAstId for TypeParamTypeVar {}
impl HasAstId for TypeParamTypeVarTuple {}
impl HasAstId for TypeParamParamSpec {}
impl HasAstId for StmtGlobal {}
impl HasAstId for StmtNonlocal {}
impl HasAstId for ExceptHandlerExceptHandler {}
impl HasAstId for WithItem {}
impl HasAstId for MatchCase {}

View File

@@ -0,0 +1,165 @@
use std::fmt::Formatter;
use std::hash::Hash;
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::db::QueryResult;
use dashmap::mapref::entry::Entry;
use crate::FxDashMap;
/// Simple key value cache that locks on a per-key level.
pub struct KeyValueCache<K, V> {
map: FxDashMap<K, V>,
statistics: CacheStatistics,
}
impl<K, V> KeyValueCache<K, V>
where
K: Eq + Hash + Clone,
V: Clone,
{
pub fn try_get(&self, key: &K) -> Option<V> {
if let Some(existing) = self.map.get(key) {
self.statistics.hit();
Some(existing.clone())
} else {
self.statistics.miss();
None
}
}
pub fn get<F>(&self, key: &K, compute: F) -> QueryResult<V>
where
F: FnOnce(&K) -> QueryResult<V>,
{
Ok(match self.map.entry(key.clone()) {
Entry::Occupied(cached) => {
self.statistics.hit();
cached.get().clone()
}
Entry::Vacant(vacant) => {
self.statistics.miss();
let value = compute(key)?;
vacant.insert(value.clone());
value
}
})
}
pub fn set(&mut self, key: K, value: V) {
self.map.insert(key, value);
}
pub fn remove(&mut self, key: &K) -> Option<V> {
self.map.remove(key).map(|(_, value)| value)
}
pub fn clear(&mut self) {
self.map.clear();
self.map.shrink_to_fit();
}
pub fn statistics(&self) -> Option<Statistics> {
self.statistics.to_statistics()
}
}
impl<K, V> Default for KeyValueCache<K, V>
where
K: Eq + Hash,
V: Clone,
{
fn default() -> Self {
Self {
map: FxDashMap::default(),
statistics: CacheStatistics::default(),
}
}
}
impl<K, V> std::fmt::Debug for KeyValueCache<K, V>
where
K: std::fmt::Debug + Eq + Hash,
V: std::fmt::Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut debug = f.debug_map();
for entry in &self.map {
debug.entry(&entry.value(), &entry.key());
}
debug.finish()
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Statistics {
pub hits: usize,
pub misses: usize,
}
impl Statistics {
#[allow(clippy::cast_precision_loss)]
pub fn hit_rate(&self) -> Option<f64> {
if self.hits + self.misses == 0 {
return None;
}
Some((self.hits as f64) / (self.hits + self.misses) as f64)
}
}
#[cfg(debug_assertions)]
pub type CacheStatistics = DebugStatistics;
#[cfg(not(debug_assertions))]
pub type CacheStatistics = ReleaseStatistics;
pub trait StatisticsRecorder {
fn hit(&self);
fn miss(&self);
fn to_statistics(&self) -> Option<Statistics>;
}
#[derive(Debug, Default)]
pub struct DebugStatistics {
hits: AtomicUsize,
misses: AtomicUsize,
}
impl StatisticsRecorder for DebugStatistics {
// TODO figure out appropriate Ordering
fn hit(&self) {
self.hits.fetch_add(1, Ordering::SeqCst);
}
fn miss(&self) {
self.misses.fetch_add(1, Ordering::SeqCst);
}
fn to_statistics(&self) -> Option<Statistics> {
let hits = self.hits.load(Ordering::SeqCst);
let misses = self.misses.load(Ordering::SeqCst);
Some(Statistics { hits, misses })
}
}
#[derive(Debug, Default)]
pub struct ReleaseStatistics;
impl StatisticsRecorder for ReleaseStatistics {
#[inline]
fn hit(&self) {}
#[inline]
fn miss(&self) {}
#[inline]
fn to_statistics(&self) -> Option<Statistics> {
None
}
}

View File

@@ -0,0 +1,42 @@
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
#[derive(Debug, Clone, Default)]
pub struct CancellationTokenSource {
signal: Arc<AtomicBool>,
}
impl CancellationTokenSource {
pub fn new() -> Self {
Self {
signal: Arc::new(AtomicBool::new(false)),
}
}
#[tracing::instrument(level = "trace", skip_all)]
pub fn cancel(&self) {
self.signal.store(true, std::sync::atomic::Ordering::SeqCst);
}
pub fn is_cancelled(&self) -> bool {
self.signal.load(std::sync::atomic::Ordering::SeqCst)
}
pub fn token(&self) -> CancellationToken {
CancellationToken {
signal: self.signal.clone(),
}
}
}
#[derive(Clone, Debug)]
pub struct CancellationToken {
signal: Arc<AtomicBool>,
}
impl CancellationToken {
/// Returns `true` if cancellation has been requested.
pub fn is_cancelled(&self) -> bool {
self.signal.load(std::sync::atomic::Ordering::SeqCst)
}
}

View File

@@ -1,2 +0,0 @@
pub(crate) mod target_version;
pub(crate) mod verbosity;

View File

@@ -1,34 +0,0 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum TargetVersion {
Py37,
#[default]
Py38,
Py39,
Py310,
Py311,
Py312,
Py313,
}
impl std::fmt::Display for TargetVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
ruff_db::program::TargetVersion::from(*self).fmt(f)
}
}
impl From<TargetVersion> for ruff_db::program::TargetVersion {
fn from(value: TargetVersion) -> Self {
match value {
TargetVersion::Py37 => Self::Py37,
TargetVersion::Py38 => Self::Py38,
TargetVersion::Py39 => Self::Py39,
TargetVersion::Py310 => Self::Py310,
TargetVersion::Py311 => Self::Py311,
TargetVersion::Py312 => Self::Py312,
TargetVersion::Py313 => Self::Py313,
}
}
}

View File

@@ -1,34 +0,0 @@
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub(crate) enum VerbosityLevel {
Info,
Debug,
Trace,
}
/// Logging flags to `#[command(flatten)]` into your CLI
#[derive(clap::Args, Debug, Clone, Default)]
#[command(about = None, long_about = None)]
pub(crate) struct Verbosity {
#[arg(
long,
short = 'v',
help = "Use verbose output (or `-vv` and `-vvv` for more verbose output)",
action = clap::ArgAction::Count,
global = true,
)]
verbose: u8,
}
impl Verbosity {
/// Returns the verbosity level based on the number of `-v` flags.
///
/// Returns `None` if the user did not specify any verbosity flags.
pub(crate) fn level(&self) -> Option<VerbosityLevel> {
match self.verbose {
0 => None,
1 => Some(VerbosityLevel::Info),
2 => Some(VerbosityLevel::Debug),
_ => Some(VerbosityLevel::Trace),
}
}
}

View File

@@ -1,200 +1,248 @@
use std::panic::{AssertUnwindSafe, RefUnwindSafe};
use std::sync::Arc;
use salsa::{Cancelled, Database, DbWithJar};
pub use jars::{HasJar, HasJars};
pub use query::{QueryError, QueryResult};
pub use runtime::DbRuntime;
pub use storage::JarsStorage;
use red_knot_module_resolver::{vendored_typeshed_stubs, Db as ResolverDb, Jar as ResolverJar};
use red_knot_python_semantic::{Db as SemanticDb, Jar as SemanticJar};
use ruff_db::files::{system_path_to_file, File, Files};
use ruff_db::program::{Program, ProgramSettings};
use ruff_db::system::System;
use ruff_db::vendored::VendoredFileSystem;
use ruff_db::{Db as SourceDb, Jar as SourceJar, Upcast};
use crate::files::FileId;
use crate::lint::{LintSemanticStorage, LintSyntaxStorage};
use crate::module::ModuleResolver;
use crate::parse::ParsedStorage;
use crate::source::SourceStorage;
use crate::symbols::SymbolTablesStorage;
use crate::types::TypeStore;
use crate::lint::{lint_semantic, lint_syntax, unwind_if_cancelled, Diagnostics};
use crate::watch::{FileChangeKind, FileWatcherChange};
use crate::workspace::{check_file, Package, Workspace, WorkspaceMetadata};
mod jars;
mod query;
mod runtime;
mod storage;
pub trait Db: DbWithJar<Jar> + SemanticDb + Upcast<dyn SemanticDb> {}
pub trait Database {
/// Returns a reference to the runtime of the current worker.
fn runtime(&self) -> &DbRuntime;
#[salsa::jar(db=Db)]
pub struct Jar(
Workspace,
Package,
lint_syntax,
lint_semantic,
unwind_if_cancelled,
);
/// Returns a mutable reference to the runtime. Only one worker can hold a mutable reference to the runtime.
fn runtime_mut(&mut self) -> &mut DbRuntime;
#[salsa::db(SourceJar, ResolverJar, SemanticJar, Jar)]
pub struct RootDatabase {
workspace: Option<Workspace>,
storage: salsa::Storage<RootDatabase>,
files: Files,
system: Arc<dyn System + Send + Sync + RefUnwindSafe>,
/// Returns `Ok` if the queries have not been cancelled and `Err(QueryError::Cancelled)` otherwise.
fn cancelled(&self) -> QueryResult<()> {
self.runtime().cancelled()
}
/// Returns `true` if the queries have been cancelled.
fn is_cancelled(&self) -> bool {
self.runtime().is_cancelled()
}
}
impl RootDatabase {
pub fn new<S>(workspace: WorkspaceMetadata, settings: ProgramSettings, system: S) -> Self
where
S: System + 'static + Send + Sync + RefUnwindSafe,
{
let mut db = Self {
workspace: None,
storage: salsa::Storage::default(),
files: Files::default(),
system: Arc::new(system),
};
/// Database that supports running queries from multiple threads.
pub trait ParallelDatabase: Database + Send {
/// Creates a snapshot of the database state that can be used to query the database in another thread.
///
/// The snapshot is a read-only view of the database but query results are shared between threads.
/// All queries will be automatically cancelled when applying any mutations (calling [`HasJars::jars_mut`])
/// to the database (not the snapshot, because they're readonly).
///
/// ## Creating a snapshot
///
/// Creating a snapshot of the database's jars is cheap but creating a snapshot of
/// other state stored on the database might require deep-cloning data. That's why you should
/// avoid creating snapshots in a hot function (e.g. don't create a snapshot for each file, instead
/// create a snapshot when scheduling the check of an entire program).
///
/// ## Salsa compatibility
/// Salsa prohibits creating a snapshot while running a local query (it's fine if other workers run a query) [[source](https://github.com/salsa-rs/salsa/issues/80)].
/// We should avoid creating snapshots while running a query because we might want to adopt Salsa in the future (if we can figure out persistent caching).
/// Unfortunately, the infrastructure doesn't provide an automated way of knowing when a query is run, that's
/// why we have to "enforce" this constraint manually.
#[must_use]
fn snapshot(&self) -> Snapshot<Self>;
}
let workspace = Workspace::from_metadata(&db, workspace);
// Initialize the `Program` singleton
Program::from_settings(&db, settings);
pub trait DbWithJar<Jar>: Database + HasJar<Jar> {}
db.workspace = Some(workspace);
db
/// Readonly snapshot of a database.
///
/// ## Dead locks
/// A snapshot should always be dropped as soon as it is no longer necessary to run queries.
/// Storing the snapshot without running a query or periodically checking if cancellation was requested
/// can lead to deadlocks because mutating the [`Database`] requires cancels all pending queries
/// and waiting for all [`Snapshot`]s to be dropped.
#[derive(Debug)]
pub struct Snapshot<DB: ?Sized>
where
DB: ParallelDatabase,
{
db: DB,
}
impl<DB> Snapshot<DB>
where
DB: ParallelDatabase,
{
pub fn new(db: DB) -> Self {
Snapshot { db }
}
}
impl<DB> std::ops::Deref for Snapshot<DB>
where
DB: ParallelDatabase,
{
type Target = DB;
fn deref(&self) -> &DB {
&self.db
}
}
pub trait Upcast<T: ?Sized> {
fn upcast(&self) -> &T;
}
// Red knot specific databases code.
pub trait SourceDb: DbWithJar<SourceJar> {
// queries
fn file_id(&self, path: &std::path::Path) -> FileId;
fn file_path(&self, file_id: FileId) -> Arc<std::path::Path>;
}
pub trait SemanticDb: SourceDb + DbWithJar<SemanticJar> + Upcast<dyn SourceDb> {}
pub trait LintDb: SemanticDb + DbWithJar<LintJar> + Upcast<dyn SemanticDb> {}
pub trait Db: LintDb + Upcast<dyn LintDb> {}
#[derive(Debug, Default)]
pub struct SourceJar {
pub sources: SourceStorage,
pub parsed: ParsedStorage,
}
#[derive(Debug, Default)]
pub struct SemanticJar {
pub module_resolver: ModuleResolver,
pub symbol_tables: SymbolTablesStorage,
pub type_store: TypeStore,
}
#[derive(Debug, Default)]
pub struct LintJar {
pub lint_syntax: LintSyntaxStorage,
pub lint_semantic: LintSemanticStorage,
}
#[cfg(test)]
pub(crate) mod tests {
use std::path::Path;
use std::sync::Arc;
use crate::db::{
Database, DbRuntime, DbWithJar, HasJar, HasJars, JarsStorage, LintDb, LintJar, QueryResult,
SourceDb, SourceJar, Upcast,
};
use crate::files::{FileId, Files};
use super::{SemanticDb, SemanticJar};
// This can be a partial database used in a single crate for testing.
// It would hold fewer data than the full database.
#[derive(Debug, Default)]
pub(crate) struct TestDb {
files: Files,
jars: JarsStorage<Self>,
}
pub fn workspace(&self) -> Workspace {
// SAFETY: The workspace is always initialized in `new`.
self.workspace.unwrap()
}
#[tracing::instrument(level = "debug", skip(self, changes))]
pub fn apply_changes(&mut self, changes: Vec<FileWatcherChange>) {
let workspace = self.workspace();
let workspace_path = workspace.root(self).to_path_buf();
// TODO: Optimize change tracking by only reloading a package if a file that is part of the package was changed.
let mut structural_change = false;
for change in changes {
if matches!(
change.path.file_name(),
Some(".gitignore" | ".ignore" | "ruff.toml" | ".ruff.toml" | "pyproject.toml")
) {
// Changes to ignore files or settings can change the workspace structure or add/remove files
// from packages.
structural_change = true;
} else {
match change.kind {
FileChangeKind::Created => {
// Reload the package when a new file was added. This is necessary because the file might be excluded
// by a gitignore.
if workspace.package(self, &change.path).is_some() {
structural_change = true;
}
}
FileChangeKind::Modified => {}
FileChangeKind::Deleted => {
if let Some(package) = workspace.package(self, &change.path) {
if let Some(file) = system_path_to_file(self, &change.path) {
package.remove_file(self, file);
}
}
}
}
}
File::touch_path(self, &change.path);
impl HasJar<SourceJar> for TestDb {
fn jar(&self) -> QueryResult<&SourceJar> {
Ok(&self.jars()?.0)
}
if structural_change {
match WorkspaceMetadata::from_path(&workspace_path, self.system()) {
Ok(metadata) => {
tracing::debug!("Reload workspace after structural change.");
// TODO: Handle changes in the program settings.
workspace.reload(self, metadata);
}
Err(error) => {
tracing::error!("Failed to load workspace, keep old workspace: {error}");
}
}
fn jar_mut(&mut self) -> &mut SourceJar {
&mut self.jars_mut().0
}
}
/// Checks all open files in the workspace and its dependencies.
pub fn check(&self) -> Result<Vec<String>, Cancelled> {
self.with_db(|db| db.workspace().check(db))
impl HasJar<SemanticJar> for TestDb {
fn jar(&self) -> QueryResult<&SemanticJar> {
Ok(&self.jars()?.1)
}
fn jar_mut(&mut self) -> &mut SemanticJar {
&mut self.jars_mut().1
}
}
pub fn check_file(&self, file: File) -> Result<Diagnostics, Cancelled> {
self.with_db(|db| check_file(db, file))
impl HasJar<LintJar> for TestDb {
fn jar(&self) -> QueryResult<&LintJar> {
Ok(&self.jars()?.2)
}
fn jar_mut(&mut self) -> &mut LintJar {
&mut self.jars_mut().2
}
}
pub(crate) fn with_db<F, T>(&self, f: F) -> Result<T, Cancelled>
where
F: FnOnce(&RootDatabase) -> T + std::panic::UnwindSafe,
{
// The `AssertUnwindSafe` here looks scary, but is a consequence of Salsa's design.
// Salsa uses panics to implement cancellation and to recover from cycles. However, the Salsa
// storage isn't `UnwindSafe` or `RefUnwindSafe` because its dependencies `DashMap` and `parking_lot::*` aren't
// unwind safe.
//
// Having to use `AssertUnwindSafe` isn't as big as a deal as it might seem because
// the `UnwindSafe` and `RefUnwindSafe` traits are designed to catch logical bugs.
// They don't protect against [UB](https://internals.rust-lang.org/t/pre-rfc-deprecating-unwindsafe/15974).
// On top of that, `Cancelled` only catches specific Salsa-panics and propagates all other panics.
//
// That still leaves us with possible logical bugs in two sources:
// * In Salsa itself: This must be considered a bug in Salsa and needs fixing upstream.
// Reviewing Salsa code specifically around unwind safety seems doable.
// * Our code: This is the main concern. Luckily, it only involves code that uses internal mutability
// and calls into Salsa queries when mutating the internal state. Using `AssertUnwindSafe`
// certainly makes it harder to catch these issues in our user code.
//
// For now, this is the only solution at hand unless Salsa decides to change its design.
// [Zulip support thread](https://salsa.zulipchat.com/#narrow/stream/145099-general/topic/How.20to.20use.20.60Cancelled.3A.3Acatch.60)
let db = &AssertUnwindSafe(self);
Cancelled::catch(|| f(db))
}
}
impl Upcast<dyn SemanticDb> for RootDatabase {
fn upcast(&self) -> &(dyn SemanticDb + 'static) {
self
}
}
impl Upcast<dyn SourceDb> for RootDatabase {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
}
impl Upcast<dyn ResolverDb> for RootDatabase {
fn upcast(&self) -> &(dyn ResolverDb + 'static) {
self
}
}
impl ResolverDb for RootDatabase {}
impl SemanticDb for RootDatabase {}
impl SourceDb for RootDatabase {
fn vendored(&self) -> &VendoredFileSystem {
vendored_typeshed_stubs()
}
fn system(&self) -> &dyn System {
&*self.system
}
fn files(&self) -> &Files {
&self.files
}
}
impl Database for RootDatabase {}
impl Db for RootDatabase {}
impl salsa::ParallelDatabase for RootDatabase {
fn snapshot(&self) -> salsa::Snapshot<Self> {
salsa::Snapshot::new(Self {
workspace: self.workspace,
storage: self.storage.snapshot(),
files: self.files.snapshot(),
system: self.system.clone(),
})
impl SourceDb for TestDb {
fn file_id(&self, path: &Path) -> FileId {
self.files.intern(path)
}
fn file_path(&self, file_id: FileId) -> Arc<Path> {
self.files.path(file_id)
}
}
impl DbWithJar<SourceJar> for TestDb {}
impl Upcast<dyn SourceDb> for TestDb {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
}
impl SemanticDb for TestDb {}
impl DbWithJar<SemanticJar> for TestDb {}
impl Upcast<dyn SemanticDb> for TestDb {
fn upcast(&self) -> &(dyn SemanticDb + 'static) {
self
}
}
impl LintDb for TestDb {}
impl Upcast<dyn LintDb> for TestDb {
fn upcast(&self) -> &(dyn LintDb + 'static) {
self
}
}
impl DbWithJar<LintJar> for TestDb {}
impl HasJars for TestDb {
type Jars = (SourceJar, SemanticJar, LintJar);
fn jars(&self) -> QueryResult<&Self::Jars> {
self.jars.jars()
}
fn jars_mut(&mut self) -> &mut Self::Jars {
self.jars.jars_mut()
}
}
impl Database for TestDb {
fn runtime(&self) -> &DbRuntime {
self.jars.runtime()
}
fn runtime_mut(&mut self) -> &mut DbRuntime {
self.jars.runtime_mut()
}
}
}

View File

@@ -0,0 +1,37 @@
use crate::db::query::QueryResult;
/// Gives access to a specific jar in the database.
///
/// Nope, the terminology isn't borrowed from Java but from Salsa <https://salsa-rs.github.io/salsa/>,
/// which is an analogy to storing the salsa in different jars.
///
/// The basic idea is that each crate can define its own jar and the jars can be combined to a single
/// database in the top level crate. Each crate also defines its own `Database` trait. The combination of
/// `Database` trait and the jar allows to write queries in isolation without having to know how they get composed at the upper levels.
///
/// Salsa further defines a `HasIngredient` trait which slices the jar to a specific storage (e.g. a specific cache).
/// We don't need this just yet because we write our queries by hand. We may want a similar trait if we decide
/// to use a macro to generate the queries.
pub trait HasJar<T> {
/// Gives a read-only reference to the jar.
fn jar(&self) -> QueryResult<&T>;
/// Gives a mutable reference to the jar.
fn jar_mut(&mut self) -> &mut T;
}
/// Gives access to the jars in a database.
pub trait HasJars {
/// A type storing the jars.
///
/// Most commonly, this is a tuple where each jar is a tuple element.
type Jars: Default;
/// Gives access to the underlying jars but tests if the queries have been cancelled.
///
/// Returns `Err(QueryError::Cancelled)` if the queries have been cancelled.
fn jars(&self) -> QueryResult<&Self::Jars>;
/// Gives mutable access to the underlying jars.
fn jars_mut(&mut self) -> &mut Self::Jars;
}

View File

@@ -0,0 +1,20 @@
use std::fmt::{Display, Formatter};
/// Reason why a db query operation failed.
#[derive(Debug, Clone, Copy)]
pub enum QueryError {
/// The query was cancelled because the DB was mutated or the query was cancelled by the host (e.g. on a file change or when pressing CTRL+C).
Cancelled,
}
impl Display for QueryError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
QueryError::Cancelled => f.write_str("query was cancelled"),
}
}
}
impl std::error::Error for QueryError {}
pub type QueryResult<T> = Result<T, QueryError>;

View File

@@ -0,0 +1,41 @@
use crate::cancellation::CancellationTokenSource;
use crate::db::{QueryError, QueryResult};
/// Holds the jar agnostic state of the database.
#[derive(Debug, Default)]
pub struct DbRuntime {
/// The cancellation token source used to signal other works that the queries should be aborted and
/// exit at the next possible point.
cancellation_token: CancellationTokenSource,
}
impl DbRuntime {
pub(super) fn snapshot(&self) -> Self {
Self {
cancellation_token: self.cancellation_token.clone(),
}
}
/// Cancels the pending queries of other workers. The current worker cannot have any pending
/// queries because we're holding a mutable reference to the runtime.
pub(super) fn cancel_other_workers(&mut self) {
self.cancellation_token.cancel();
// Set a new cancellation token so that we're in a non-cancelled state again when running the next
// query.
self.cancellation_token = CancellationTokenSource::default();
}
/// Returns `Ok` if the queries have not been cancelled and `Err(QueryError::Cancelled)` otherwise.
pub(super) fn cancelled(&self) -> QueryResult<()> {
if self.cancellation_token.is_cancelled() {
Err(QueryError::Cancelled)
} else {
Ok(())
}
}
/// Returns `true` if the queries have been cancelled.
pub(super) fn is_cancelled(&self) -> bool {
self.cancellation_token.is_cancelled()
}
}

View File

@@ -0,0 +1,117 @@
use std::fmt::Formatter;
use std::sync::Arc;
use crossbeam::sync::WaitGroup;
use crate::db::query::QueryResult;
use crate::db::runtime::DbRuntime;
use crate::db::{HasJars, ParallelDatabase};
/// Stores the jars of a database and the state for each worker.
///
/// Today, all state is shared across all workers, but it may be desired to store data per worker in the future.
pub struct JarsStorage<T>
where
T: HasJars + Sized,
{
// It's important that `jars_wait_group` is declared after `jars` to ensure that `jars` is dropped first.
// See https://doc.rust-lang.org/reference/destructors.html
/// Stores the jars of the database.
jars: Arc<T::Jars>,
/// Used to count the references to `jars`. Allows implementing `jars_mut` without requiring to clone `jars`.
jars_wait_group: WaitGroup,
/// The data agnostic state.
runtime: DbRuntime,
}
impl<Db> JarsStorage<Db>
where
Db: HasJars,
{
pub(super) fn new() -> Self {
Self {
jars: Arc::new(Db::Jars::default()),
jars_wait_group: WaitGroup::default(),
runtime: DbRuntime::default(),
}
}
/// Creates a snapshot of the jars.
///
/// Creating the snapshot is cheap because it doesn't clone the jars, it only increments a ref counter.
#[must_use]
pub fn snapshot(&self) -> JarsStorage<Db>
where
Db: ParallelDatabase,
{
Self {
jars: self.jars.clone(),
jars_wait_group: self.jars_wait_group.clone(),
runtime: self.runtime.snapshot(),
}
}
pub(crate) fn jars(&self) -> QueryResult<&Db::Jars> {
self.runtime.cancelled()?;
Ok(&self.jars)
}
/// Returns a mutable reference to the jars without cloning their content.
///
/// The method cancels any pending queries of other works and waits for them to complete so that
/// this instance is the only instance holding a reference to the jars.
pub(crate) fn jars_mut(&mut self) -> &mut Db::Jars {
// We have a mutable ref here, so no more workers can be spawned between calling this function and taking the mut ref below.
self.cancel_other_workers();
// Now all other references to `self.jars` should have been released. We can now safely return a mutable reference
// to the Arc's content.
let jars =
Arc::get_mut(&mut self.jars).expect("All references to jars should have been released");
jars
}
pub(crate) fn runtime(&self) -> &DbRuntime {
&self.runtime
}
pub(crate) fn runtime_mut(&mut self) -> &mut DbRuntime {
// Note: This method may need to use a similar trick to `jars_mut` if `DbRuntime` is ever to store data that is shared between workers.
&mut self.runtime
}
#[tracing::instrument(level = "trace", skip(self))]
fn cancel_other_workers(&mut self) {
self.runtime.cancel_other_workers();
// Wait for all other works to complete.
let existing_wait = std::mem::take(&mut self.jars_wait_group);
existing_wait.wait();
}
}
impl<Db> Default for JarsStorage<Db>
where
Db: HasJars,
{
fn default() -> Self {
Self::new()
}
}
impl<T> std::fmt::Debug for JarsStorage<T>
where
T: HasJars,
<T as HasJars>::Jars: std::fmt::Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SharedStorage")
.field("jars", &self.jars)
.field("jars_wait_group", &self.jars_wait_group)
.field("runtime", &self.runtime)
.finish()
}
}

View File

@@ -0,0 +1,180 @@
use std::fmt::{Debug, Formatter};
use std::hash::{Hash, Hasher};
use std::path::Path;
use std::sync::Arc;
use hashbrown::hash_map::RawEntryMut;
use parking_lot::RwLock;
use rustc_hash::FxHasher;
use ruff_index::{newtype_index, IndexVec};
type Map<K, V> = hashbrown::HashMap<K, V, ()>;
#[newtype_index]
pub struct FileId;
// TODO we'll need a higher level virtual file system abstraction that allows testing if a file exists
// or retrieving its content (ideally lazily and in a way that the memory can be retained later)
// I suspect that we'll end up with a FileSystem trait and our own Path abstraction.
#[derive(Default)]
pub struct Files {
inner: Arc<RwLock<FilesInner>>,
}
impl Files {
#[tracing::instrument(level = "debug", skip(self))]
pub fn intern(&self, path: &Path) -> FileId {
self.inner.write().intern(path)
}
pub fn try_get(&self, path: &Path) -> Option<FileId> {
self.inner.read().try_get(path)
}
#[tracing::instrument(level = "debug", skip(self))]
pub fn path(&self, id: FileId) -> Arc<Path> {
self.inner.read().path(id)
}
/// Snapshots files for a new database snapshot.
///
/// This method should not be used outside a database snapshot.
#[must_use]
pub fn snapshot(&self) -> Files {
Files {
inner: self.inner.clone(),
}
}
}
impl Debug for Files {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let files = self.inner.read();
let mut debug = f.debug_map();
for item in files.iter() {
debug.entry(&item.0, &item.1);
}
debug.finish()
}
}
impl PartialEq for Files {
fn eq(&self, other: &Self) -> bool {
self.inner.read().eq(&other.inner.read())
}
}
impl Eq for Files {}
#[derive(Default)]
struct FilesInner {
by_path: Map<FileId, ()>,
// TODO should we use a map here to reclaim the space for removed files?
// TODO I think we should use our own path abstraction here to avoid having to normalize paths
// and dealing with non-utf paths everywhere.
by_id: IndexVec<FileId, Arc<Path>>,
}
impl FilesInner {
/// Inserts the path and returns a new id for it or returns the id if it is an existing path.
// TODO should this accept Path or PathBuf?
pub(crate) fn intern(&mut self, path: &Path) -> FileId {
let hash = FilesInner::hash_path(path);
let entry = self
.by_path
.raw_entry_mut()
.from_hash(hash, |existing_file| &*self.by_id[*existing_file] == path);
match entry {
RawEntryMut::Occupied(entry) => *entry.key(),
RawEntryMut::Vacant(entry) => {
let id = self.by_id.push(Arc::from(path));
entry.insert_with_hasher(hash, id, (), |file| {
FilesInner::hash_path(&self.by_id[*file])
});
id
}
}
}
fn hash_path(path: &Path) -> u64 {
let mut hasher = FxHasher::default();
path.hash(&mut hasher);
hasher.finish()
}
pub(crate) fn try_get(&self, path: &Path) -> Option<FileId> {
let mut hasher = FxHasher::default();
path.hash(&mut hasher);
let hash = hasher.finish();
Some(
*self
.by_path
.raw_entry()
.from_hash(hash, |existing_file| &*self.by_id[*existing_file] == path)?
.0,
)
}
/// Returns the path for the file with the given id.
pub(crate) fn path(&self, id: FileId) -> Arc<Path> {
self.by_id[id].clone()
}
pub(crate) fn iter(&self) -> impl Iterator<Item = (FileId, Arc<Path>)> + '_ {
self.by_path.keys().map(|id| (*id, self.by_id[*id].clone()))
}
}
impl PartialEq for FilesInner {
fn eq(&self, other: &Self) -> bool {
self.by_id == other.by_id
}
}
impl Eq for FilesInner {}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
#[test]
fn insert_path_twice_same_id() {
let files = Files::default();
let path = PathBuf::from("foo/bar");
let id1 = files.intern(&path);
let id2 = files.intern(&path);
assert_eq!(id1, id2);
}
#[test]
fn insert_different_paths_different_ids() {
let files = Files::default();
let path1 = PathBuf::from("foo/bar");
let path2 = PathBuf::from("foo/bar/baz");
let id1 = files.intern(&path1);
let id2 = files.intern(&path2);
assert_ne!(id1, id2);
}
#[test]
fn four_files() {
let files = Files::default();
let foo_path = PathBuf::from("foo");
let foo_id = files.intern(&foo_path);
let bar_path = PathBuf::from("bar");
files.intern(&bar_path);
let baz_path = PathBuf::from("baz");
files.intern(&baz_path);
let qux_path = PathBuf::from("qux");
files.intern(&qux_path);
let foo_id_2 = files.try_get(&foo_path).expect("foo_path to be found");
assert_eq!(foo_id_2, foo_id);
}
}

View File

@@ -0,0 +1,67 @@
//! Key observations
//!
//! The HIR (High-Level Intermediate Representation) avoids allocations to large extends by:
//! * Using an arena per node type
//! * using ids and id ranges to reference items.
//!
//! Using separate arena per node type has the advantage that the IDs are relatively stable, because
//! they only change when a node of the same kind has been added or removed. (What's unclear is if that matters or if
//! it still triggers a re-compute because the AST-id in the node has changed).
//!
//! The HIR does not store all details. It mainly stores the *public* interface. There's a reference
//! back to the AST node to get more details.
//!
//!
use crate::ast_ids::{HasAstId, TypedAstId};
use crate::files::FileId;
use std::fmt::Formatter;
use std::hash::{Hash, Hasher};
pub struct HirAstId<N: HasAstId> {
file_id: FileId,
node_id: TypedAstId<N>,
}
impl<N: HasAstId> Copy for HirAstId<N> {}
impl<N: HasAstId> Clone for HirAstId<N> {
fn clone(&self) -> Self {
*self
}
}
impl<N: HasAstId> PartialEq for HirAstId<N> {
fn eq(&self, other: &Self) -> bool {
self.file_id == other.file_id && self.node_id == other.node_id
}
}
impl<N: HasAstId> Eq for HirAstId<N> {}
impl<N: HasAstId> std::fmt::Debug for HirAstId<N> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HirAstId")
.field("file_id", &self.file_id)
.field("node_id", &self.node_id)
.finish()
}
}
impl<N: HasAstId> Hash for HirAstId<N> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.file_id.hash(state);
self.node_id.hash(state);
}
}
impl<N: HasAstId> HirAstId<N> {
pub fn upcast<M: HasAstId>(self) -> HirAstId<M>
where
N: Into<M>,
{
HirAstId {
file_id: self.file_id,
node_id: self.node_id.upcast(),
}
}
}

View File

@@ -0,0 +1,556 @@
use std::ops::{Index, Range};
use ruff_index::{newtype_index, IndexVec};
use ruff_python_ast::visitor::preorder;
use ruff_python_ast::visitor::preorder::PreorderVisitor;
use ruff_python_ast::{
Decorator, ExceptHandler, ExceptHandlerExceptHandler, Expr, MatchCase, ModModule, Stmt,
StmtAnnAssign, StmtAssign, StmtClassDef, StmtFunctionDef, StmtGlobal, StmtImport,
StmtImportFrom, StmtNonlocal, StmtTypeAlias, TypeParam, TypeParamParamSpec, TypeParamTypeVar,
TypeParamTypeVarTuple, WithItem,
};
use crate::ast_ids::{AstIds, HasAstId};
use crate::files::FileId;
use crate::hir::HirAstId;
use crate::Name;
#[newtype_index]
pub struct FunctionId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Function {
ast_id: HirAstId<StmtFunctionDef>,
name: Name,
parameters: Range<ParameterId>,
type_parameters: Range<TypeParameterId>, // TODO: type_parameters, return expression, decorators
}
#[newtype_index]
pub struct ParameterId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Parameter {
kind: ParameterKind,
name: Name,
default: Option<()>, // TODO use expression HIR
ast_id: HirAstId<ruff_python_ast::Parameter>,
}
// TODO or should `Parameter` be an enum?
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum ParameterKind {
PositionalOnly,
Arguments,
Vararg,
KeywordOnly,
Kwarg,
}
#[newtype_index]
pub struct ClassId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Class {
name: Name,
ast_id: HirAstId<StmtClassDef>,
// TODO type parameters, inheritance, decorators, members
}
#[newtype_index]
pub struct AssignmentId;
// This can have more than one name...
// but that means we can't implement `name()` on `ModuleItem`.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Assignment {
// TODO: Handle multiple names / targets
name: Name,
ast_id: HirAstId<StmtAssign>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct AnnotatedAssignment {
name: Name,
ast_id: HirAstId<StmtAnnAssign>,
}
#[newtype_index]
pub struct AnnotatedAssignmentId;
#[newtype_index]
pub struct TypeAliasId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct TypeAlias {
name: Name,
ast_id: HirAstId<StmtTypeAlias>,
parameters: Range<TypeParameterId>,
}
#[newtype_index]
pub struct TypeParameterId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum TypeParameter {
TypeVar(TypeParameterTypeVar),
ParamSpec(TypeParameterParamSpec),
TypeVarTuple(TypeParameterTypeVarTuple),
}
impl TypeParameter {
pub fn ast_id(&self) -> HirAstId<TypeParam> {
match self {
TypeParameter::TypeVar(type_var) => type_var.ast_id.upcast(),
TypeParameter::ParamSpec(param_spec) => param_spec.ast_id.upcast(),
TypeParameter::TypeVarTuple(type_var_tuple) => type_var_tuple.ast_id.upcast(),
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct TypeParameterTypeVar {
name: Name,
ast_id: HirAstId<TypeParamTypeVar>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct TypeParameterParamSpec {
name: Name,
ast_id: HirAstId<TypeParamParamSpec>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct TypeParameterTypeVarTuple {
name: Name,
ast_id: HirAstId<TypeParamTypeVarTuple>,
}
#[newtype_index]
pub struct GlobalId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Global {
// TODO track names
ast_id: HirAstId<StmtGlobal>,
}
#[newtype_index]
pub struct NonLocalId;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct NonLocal {
// TODO track names
ast_id: HirAstId<StmtNonlocal>,
}
pub enum DefinitionId {
Function(FunctionId),
Parameter(ParameterId),
Class(ClassId),
Assignment(AssignmentId),
AnnotatedAssignment(AnnotatedAssignmentId),
Global(GlobalId),
NonLocal(NonLocalId),
TypeParameter(TypeParameterId),
TypeAlias(TypeAlias),
}
pub enum DefinitionItem {
Function(Function),
Parameter(Parameter),
Class(Class),
Assignment(Assignment),
AnnotatedAssignment(AnnotatedAssignment),
Global(Global),
NonLocal(NonLocal),
TypeParameter(TypeParameter),
TypeAlias(TypeAlias),
}
// The closest is rust-analyzers item-tree. It only represents "Items" which make the public interface of a module
// (it excludes any other statement or expressions). rust-analyzer uses it as the main input to the name resolution
// algorithm
// > It is the input to the name resolution algorithm, as well as to the queries defined in `adt.rs`,
// > `data.rs`, and most things in `attr.rs`.
//
// > One important purpose of this layer is to provide an "invalidation barrier" for incremental
// > computations: when typing inside an item body, the `ItemTree` of the modified file is typically
// > unaffected, so we don't have to recompute name resolution results or item data (see `data.rs`).
//
// I haven't fully figured this out but I think that this composes the "public" interface of a module?
// But maybe that's too optimistic.
//
//
#[derive(Debug, Clone, Default, Eq, PartialEq)]
pub struct Definitions {
functions: IndexVec<FunctionId, Function>,
parameters: IndexVec<ParameterId, Parameter>,
classes: IndexVec<ClassId, Class>,
assignments: IndexVec<AssignmentId, Assignment>,
annotated_assignments: IndexVec<AnnotatedAssignmentId, AnnotatedAssignment>,
type_aliases: IndexVec<TypeAliasId, TypeAlias>,
type_parameters: IndexVec<TypeParameterId, TypeParameter>,
globals: IndexVec<GlobalId, Global>,
non_locals: IndexVec<NonLocalId, NonLocal>,
}
impl Definitions {
pub fn from_module(module: &ModModule, ast_ids: &AstIds, file_id: FileId) -> Self {
let mut visitor = DefinitionsVisitor {
definitions: Definitions::default(),
ast_ids,
file_id,
};
visitor.visit_body(&module.body);
visitor.definitions
}
}
impl Index<FunctionId> for Definitions {
type Output = Function;
fn index(&self, index: FunctionId) -> &Self::Output {
&self.functions[index]
}
}
impl Index<ParameterId> for Definitions {
type Output = Parameter;
fn index(&self, index: ParameterId) -> &Self::Output {
&self.parameters[index]
}
}
impl Index<ClassId> for Definitions {
type Output = Class;
fn index(&self, index: ClassId) -> &Self::Output {
&self.classes[index]
}
}
impl Index<AssignmentId> for Definitions {
type Output = Assignment;
fn index(&self, index: AssignmentId) -> &Self::Output {
&self.assignments[index]
}
}
impl Index<AnnotatedAssignmentId> for Definitions {
type Output = AnnotatedAssignment;
fn index(&self, index: AnnotatedAssignmentId) -> &Self::Output {
&self.annotated_assignments[index]
}
}
impl Index<TypeAliasId> for Definitions {
type Output = TypeAlias;
fn index(&self, index: TypeAliasId) -> &Self::Output {
&self.type_aliases[index]
}
}
impl Index<GlobalId> for Definitions {
type Output = Global;
fn index(&self, index: GlobalId) -> &Self::Output {
&self.globals[index]
}
}
impl Index<NonLocalId> for Definitions {
type Output = NonLocal;
fn index(&self, index: NonLocalId) -> &Self::Output {
&self.non_locals[index]
}
}
impl Index<TypeParameterId> for Definitions {
type Output = TypeParameter;
fn index(&self, index: TypeParameterId) -> &Self::Output {
&self.type_parameters[index]
}
}
struct DefinitionsVisitor<'a> {
definitions: Definitions,
ast_ids: &'a AstIds,
file_id: FileId,
}
impl DefinitionsVisitor<'_> {
fn ast_id<N: HasAstId>(&self, node: &N) -> HirAstId<N> {
HirAstId {
file_id: self.file_id,
node_id: self.ast_ids.ast_id(node),
}
}
fn lower_function_def(&mut self, function: &StmtFunctionDef) -> FunctionId {
let name = Name::new(&function.name);
let first_type_parameter_id = self.definitions.type_parameters.next_index();
let mut last_type_parameter_id = first_type_parameter_id;
if let Some(type_params) = &function.type_params {
for parameter in &type_params.type_params {
let id = self.lower_type_parameter(parameter);
last_type_parameter_id = id;
}
}
let parameters = self.lower_parameters(&function.parameters);
self.definitions.functions.push(Function {
name,
ast_id: self.ast_id(function),
parameters,
type_parameters: first_type_parameter_id..last_type_parameter_id,
})
}
fn lower_parameters(&mut self, parameters: &ruff_python_ast::Parameters) -> Range<ParameterId> {
let first_parameter_id = self.definitions.parameters.next_index();
let mut last_parameter_id = first_parameter_id;
for parameter in &parameters.posonlyargs {
last_parameter_id = self.definitions.parameters.push(Parameter {
kind: ParameterKind::PositionalOnly,
name: Name::new(&parameter.parameter.name),
default: None,
ast_id: self.ast_id(&parameter.parameter),
});
}
if let Some(vararg) = &parameters.vararg {
last_parameter_id = self.definitions.parameters.push(Parameter {
kind: ParameterKind::Vararg,
name: Name::new(&vararg.name),
default: None,
ast_id: self.ast_id(vararg),
});
}
for parameter in &parameters.kwonlyargs {
last_parameter_id = self.definitions.parameters.push(Parameter {
kind: ParameterKind::KeywordOnly,
name: Name::new(&parameter.parameter.name),
default: None,
ast_id: self.ast_id(&parameter.parameter),
});
}
if let Some(kwarg) = &parameters.kwarg {
last_parameter_id = self.definitions.parameters.push(Parameter {
kind: ParameterKind::KeywordOnly,
name: Name::new(&kwarg.name),
default: None,
ast_id: self.ast_id(kwarg),
});
}
first_parameter_id..last_parameter_id
}
fn lower_class_def(&mut self, class: &StmtClassDef) -> ClassId {
let name = Name::new(&class.name);
self.definitions.classes.push(Class {
name,
ast_id: self.ast_id(class),
})
}
fn lower_assignment(&mut self, assignment: &StmtAssign) {
// FIXME handle multiple names
if let Some(Expr::Name(name)) = assignment.targets.first() {
self.definitions.assignments.push(Assignment {
name: Name::new(&name.id),
ast_id: self.ast_id(assignment),
});
}
}
fn lower_annotated_assignment(&mut self, annotated_assignment: &StmtAnnAssign) {
if let Expr::Name(name) = &*annotated_assignment.target {
self.definitions
.annotated_assignments
.push(AnnotatedAssignment {
name: Name::new(&name.id),
ast_id: self.ast_id(annotated_assignment),
});
}
}
fn lower_type_alias(&mut self, type_alias: &StmtTypeAlias) {
if let Expr::Name(name) = &*type_alias.name {
let name = Name::new(&name.id);
let lower_parameters_id = self.definitions.type_parameters.next_index();
let mut last_parameter_id = lower_parameters_id;
if let Some(type_params) = &type_alias.type_params {
for type_parameter in &type_params.type_params {
let id = self.lower_type_parameter(type_parameter);
last_parameter_id = id;
}
}
self.definitions.type_aliases.push(TypeAlias {
name,
ast_id: self.ast_id(type_alias),
parameters: lower_parameters_id..last_parameter_id,
});
}
}
fn lower_type_parameter(&mut self, type_parameter: &TypeParam) -> TypeParameterId {
match type_parameter {
TypeParam::TypeVar(type_var) => {
self.definitions
.type_parameters
.push(TypeParameter::TypeVar(TypeParameterTypeVar {
name: Name::new(&type_var.name),
ast_id: self.ast_id(type_var),
}))
}
TypeParam::ParamSpec(param_spec) => {
self.definitions
.type_parameters
.push(TypeParameter::ParamSpec(TypeParameterParamSpec {
name: Name::new(&param_spec.name),
ast_id: self.ast_id(param_spec),
}))
}
TypeParam::TypeVarTuple(type_var_tuple) => {
self.definitions
.type_parameters
.push(TypeParameter::TypeVarTuple(TypeParameterTypeVarTuple {
name: Name::new(&type_var_tuple.name),
ast_id: self.ast_id(type_var_tuple),
}))
}
}
}
fn lower_import(&mut self, _import: &StmtImport) {
// TODO
}
fn lower_import_from(&mut self, _import_from: &StmtImportFrom) {
// TODO
}
fn lower_global(&mut self, global: &StmtGlobal) -> GlobalId {
self.definitions.globals.push(Global {
ast_id: self.ast_id(global),
})
}
fn lower_non_local(&mut self, non_local: &StmtNonlocal) -> NonLocalId {
self.definitions.non_locals.push(NonLocal {
ast_id: self.ast_id(non_local),
})
}
fn lower_except_handler(&mut self, _except_handler: &ExceptHandlerExceptHandler) {
// TODO
}
fn lower_with_item(&mut self, _with_item: &WithItem) {
// TODO
}
fn lower_match_case(&mut self, _match_case: &MatchCase) {
// TODO
}
}
impl PreorderVisitor<'_> for DefinitionsVisitor<'_> {
fn visit_stmt(&mut self, stmt: &Stmt) {
match stmt {
// Definition statements
Stmt::FunctionDef(definition) => {
self.lower_function_def(definition);
self.visit_body(&definition.body);
}
Stmt::ClassDef(definition) => {
self.lower_class_def(definition);
self.visit_body(&definition.body);
}
Stmt::Assign(assignment) => {
self.lower_assignment(assignment);
}
Stmt::AnnAssign(annotated_assignment) => {
self.lower_annotated_assignment(annotated_assignment);
}
Stmt::TypeAlias(type_alias) => {
self.lower_type_alias(type_alias);
}
Stmt::Import(import) => self.lower_import(import),
Stmt::ImportFrom(import_from) => self.lower_import_from(import_from),
Stmt::Global(global) => {
self.lower_global(global);
}
Stmt::Nonlocal(non_local) => {
self.lower_non_local(non_local);
}
// Visit the compound statement bodies because they can contain other definitions.
Stmt::For(_)
| Stmt::While(_)
| Stmt::If(_)
| Stmt::With(_)
| Stmt::Match(_)
| Stmt::Try(_) => {
preorder::walk_stmt(self, stmt);
}
// Skip over simple statements because they can't contain any other definitions.
Stmt::Return(_)
| Stmt::Delete(_)
| Stmt::AugAssign(_)
| Stmt::Raise(_)
| Stmt::Assert(_)
| Stmt::Expr(_)
| Stmt::Pass(_)
| Stmt::Break(_)
| Stmt::Continue(_)
| Stmt::IpyEscapeCommand(_) => {
// No op
}
}
}
fn visit_expr(&mut self, _: &'_ Expr) {}
fn visit_decorator(&mut self, _decorator: &'_ Decorator) {}
fn visit_except_handler(&mut self, except_handler: &'_ ExceptHandler) {
match except_handler {
ExceptHandler::ExceptHandler(except_handler) => {
self.lower_except_handler(except_handler);
}
}
}
fn visit_with_item(&mut self, with_item: &'_ WithItem) {
self.lower_with_item(with_item);
}
fn visit_match_case(&mut self, match_case: &'_ MatchCase) {
self.lower_match_case(match_case);
self.visit_body(&match_case.body);
}
}

View File

@@ -1,6 +1,109 @@
use crate::db::Jar;
use std::fmt::Formatter;
use std::hash::BuildHasherDefault;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use rustc_hash::{FxHashSet, FxHasher};
use crate::files::FileId;
pub mod ast_ids;
pub mod cache;
pub mod cancellation;
pub mod db;
pub mod files;
pub mod hir;
pub mod lint;
pub mod module;
mod parse;
pub mod program;
pub mod source;
mod symbols;
mod types;
pub mod watch;
pub mod workspace;
pub(crate) type FxDashMap<K, V> = dashmap::DashMap<K, V, BuildHasherDefault<FxHasher>>;
#[allow(unused)]
pub(crate) type FxDashSet<V> = dashmap::DashSet<V, BuildHasherDefault<FxHasher>>;
pub(crate) type FxIndexSet<V> = indexmap::set::IndexSet<V, BuildHasherDefault<FxHasher>>;
#[derive(Debug, Clone)]
pub struct Workspace {
/// TODO this should be a resolved path. We should probably use a newtype wrapper that guarantees that
/// PATH is a UTF-8 path and is normalized.
root: PathBuf,
/// The files that are open in the workspace.
///
/// * Editor: The files that are actively being edited in the editor (the user has a tab open with the file).
/// * CLI: The resolved files passed as arguments to the CLI.
open_files: FxHashSet<FileId>,
}
impl Workspace {
pub fn new(root: PathBuf) -> Self {
Self {
root,
open_files: FxHashSet::default(),
}
}
pub fn root(&self) -> &Path {
self.root.as_path()
}
// TODO having the content in workspace feels wrong.
pub fn open_file(&mut self, file_id: FileId) {
self.open_files.insert(file_id);
}
pub fn close_file(&mut self, file_id: FileId) {
self.open_files.remove(&file_id);
}
// TODO introduce an `OpenFile` type instead of using an anonymous tuple.
pub fn open_files(&self) -> impl Iterator<Item = FileId> + '_ {
self.open_files.iter().copied()
}
pub fn is_file_open(&self, file_id: FileId) -> bool {
self.open_files.contains(&file_id)
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub struct Name(smol_str::SmolStr);
impl Name {
#[inline]
pub fn new(name: &str) -> Self {
Self(smol_str::SmolStr::new(name))
}
pub fn as_str(&self) -> &str {
self.0.as_str()
}
}
impl Deref for Name {
type Target = str;
#[inline]
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl<T> From<T> for Name
where
T: Into<smol_str::SmolStr>,
{
fn from(value: T) -> Self {
Self(value.into())
}
}
impl std::fmt::Display for Name {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}

View File

@@ -1,59 +1,59 @@
use std::cell::RefCell;
use std::ops::Deref;
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use std::time::Duration;
use tracing::trace_span;
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{ModModule, StringLiteral};
use red_knot_module_resolver::ModuleName;
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::{HasTy, SemanticModel};
use ruff_db::files::File;
use ruff_db::parsed::{parsed_module, ParsedModule};
use ruff_db::source::{source_text, SourceText};
use ruff_python_ast as ast;
use ruff_python_ast::visitor::{walk_stmt, Visitor};
use crate::cache::KeyValueCache;
use crate::db::{LintDb, LintJar, QueryResult};
use crate::files::FileId;
use crate::module::ModuleName;
use crate::parse::{parse, Parsed};
use crate::source::{source_text, Source};
use crate::symbols::{
resolve_global_symbol, symbol_table, Definition, GlobalSymbolId, SymbolId, SymbolTable,
};
use crate::types::{infer_definition_type, infer_symbol_type, Type};
use crate::db::Db;
#[tracing::instrument(level = "debug", skip(db))]
pub(crate) fn lint_syntax(db: &dyn LintDb, file_id: FileId) -> QueryResult<Diagnostics> {
let lint_jar: &LintJar = db.jar()?;
let storage = &lint_jar.lint_syntax;
/// Workaround query to test for if the computation should be cancelled.
/// Ideally, push for Salsa to expose an API for testing if cancellation was requested.
#[salsa::tracked]
#[allow(unused_variables)]
pub(crate) fn unwind_if_cancelled(db: &dyn Db) {}
#[salsa::tracked(return_ref)]
pub(crate) fn lint_syntax(db: &dyn Db, file_id: File) -> Diagnostics {
#[allow(clippy::print_stdout)]
if std::env::var("RED_KNOT_SLOW_LINT").is_ok() {
for i in 0..10 {
unwind_if_cancelled(db);
db.cancelled()?;
println!("RED_KNOT_SLOW_LINT is set, sleeping for {i}/10 seconds");
std::thread::sleep(Duration::from_secs(1));
}
}
let mut diagnostics = Vec::new();
storage.get(&file_id, |file_id| {
let mut diagnostics = Vec::new();
let source = source_text(db.upcast(), file_id);
lint_lines(&source, &mut diagnostics);
let source = source_text(db.upcast(), *file_id)?;
lint_lines(source.text(), &mut diagnostics);
let parsed = parsed_module(db.upcast(), file_id);
let parsed = parse(db.upcast(), *file_id)?;
if parsed.errors().is_empty() {
let ast = parsed.syntax();
if parsed.errors().is_empty() {
let ast = parsed.ast();
let mut visitor = SyntaxLintVisitor {
diagnostics,
source: &source,
};
visitor.visit_body(&ast.body);
diagnostics = visitor.diagnostics;
} else {
diagnostics.extend(parsed.errors().iter().map(ToString::to_string));
}
let mut visitor = SyntaxLintVisitor {
diagnostics,
source: source.text(),
};
visitor.visit_body(&ast.body);
diagnostics = visitor.diagnostics;
} else {
diagnostics.extend(parsed.errors().iter().map(std::string::ToString::to_string));
}
Diagnostics::from(diagnostics)
Ok(Diagnostics::from(diagnostics))
})
}
fn lint_lines(source: &str, diagnostics: &mut Vec<String>) {
@@ -73,123 +73,165 @@ fn lint_lines(source: &str, diagnostics: &mut Vec<String>) {
}
}
#[salsa::tracked(return_ref)]
pub(crate) fn lint_semantic(db: &dyn Db, file_id: File) -> Diagnostics {
let _span = trace_span!("lint_semantic", ?file_id).entered();
#[tracing::instrument(level = "debug", skip(db))]
pub(crate) fn lint_semantic(db: &dyn LintDb, file_id: FileId) -> QueryResult<Diagnostics> {
let lint_jar: &LintJar = db.jar()?;
let storage = &lint_jar.lint_semantic;
let source = source_text(db.upcast(), file_id);
let parsed = parsed_module(db.upcast(), file_id);
let semantic = SemanticModel::new(db.upcast(), file_id);
storage.get(&file_id, |file_id| {
let source = source_text(db.upcast(), *file_id)?;
let parsed = parse(db.upcast(), *file_id)?;
let symbols = symbol_table(db.upcast(), *file_id)?;
if !parsed.is_valid() {
return Diagnostics::Empty;
}
let context = SemanticLintContext {
source,
parsed,
semantic,
diagnostics: RefCell::new(Vec::new()),
};
SemanticVisitor { context: &context }.visit_body(parsed.suite());
Diagnostics::from(context.diagnostics.take())
}
fn lint_unresolved_imports(context: &SemanticLintContext, import: AnyImportRef) {
match import {
AnyImportRef::Import(import) => {
for alias in &import.names {
let ty = alias.ty(&context.semantic);
if ty.is_unbound() {
context.push_diagnostic(format!("Unresolved import '{}'", &alias.name));
}
}
}
AnyImportRef::ImportFrom(import) => {
for alias in &import.names {
let ty = alias.ty(&context.semantic);
if ty.is_unbound() {
context.push_diagnostic(format!("Unresolved import '{}'", &alias.name));
}
}
}
}
}
fn lint_bad_override(context: &SemanticLintContext, class: &ast::StmtClassDef) {
let semantic = &context.semantic;
// TODO we should have a special marker on the real typing module (from typeshed) so if you
// have your own "typing" module in your project, we don't consider it THE typing module (and
// same for other stdlib modules that our lint rules care about)
let Some(typing) = semantic.resolve_module(ModuleName::new("typing").unwrap()) else {
return;
};
let override_ty = semantic.global_symbol_ty(&typing, "override");
let Type::Class(class_ty) = class.ty(semantic) else {
return;
};
for function in class
.body
.iter()
.filter_map(|stmt| stmt.as_function_def_stmt())
{
let Type::Function(ty) = function.ty(semantic) else {
return;
let context = SemanticLintContext {
file_id: *file_id,
source,
parsed,
symbols,
db,
diagnostics: RefCell::new(Vec::new()),
};
// TODO this shouldn't make direct use of the Db; see comment on SemanticModel::db
let db = semantic.db();
lint_unresolved_imports(&context)?;
lint_bad_overrides(&context)?;
if ty.has_decorator(db, override_ty) {
let method_name = ty.name(db);
if class_ty
.inherited_class_member(db, &method_name)
.is_unbound()
Ok(Diagnostics::from(context.diagnostics.take()))
})
}
fn lint_unresolved_imports(context: &SemanticLintContext) -> QueryResult<()> {
// TODO: Consider iterating over the dependencies (imports) only instead of all definitions.
for (symbol, definition) in context.symbols().all_definitions() {
match definition {
Definition::Import(import) => {
let ty = context.infer_symbol_type(symbol)?;
if ty.is_unknown() {
context.push_diagnostic(format!("Unresolved module {}", import.module));
}
}
Definition::ImportFrom(import) => {
let ty = context.infer_symbol_type(symbol)?;
if ty.is_unknown() {
let module_name = import.module().map(Deref::deref).unwrap_or_default();
let message = if import.level() > 0 {
format!(
"Unresolved relative import '{}' from {}{}",
import.name(),
".".repeat(import.level() as usize),
module_name
)
} else {
format!(
"Unresolved import '{}' from '{}'",
import.name(),
module_name
)
};
context.push_diagnostic(message);
}
}
_ => {}
}
}
Ok(())
}
fn lint_bad_overrides(context: &SemanticLintContext) -> QueryResult<()> {
// TODO we should have a special marker on the real typing module (from typeshed) so if you
// have your own "typing" module in your project, we don't consider it THE typing module (and
// same for other stdlib modules that our lint rules care about)
let Some(typing_override) =
resolve_global_symbol(context.db.upcast(), ModuleName::new("typing"), "override")?
else {
// TODO once we bundle typeshed, this should be unreachable!()
return Ok(());
};
// TODO we should maybe index definitions by type instead of iterating all, or else iterate all
// just once, match, and branch to all lint rules that care about a type of definition
for (symbol, definition) in context.symbols().all_definitions() {
if !matches!(definition, Definition::FunctionDef(_)) {
continue;
}
let ty = infer_definition_type(
context.db.upcast(),
GlobalSymbolId {
file_id: context.file_id,
symbol_id: symbol,
},
definition.clone(),
)?;
let Type::Function(func) = ty else {
unreachable!("type of a FunctionDef should always be a Function");
};
let Some(class) = func.get_containing_class(context.db.upcast())? else {
// not a method of a class
continue;
};
if func.has_decorator(context.db.upcast(), typing_override)? {
let method_name = func.name(context.db.upcast())?;
if class
.get_super_class_member(context.db.upcast(), &method_name)?
.is_none()
{
// TODO should have a qualname() method to support nested classes
context.push_diagnostic(
format!(
"Method {}.{} is decorated with `typing.override` but does not override any base class method",
class_ty.name(db),
class.name(context.db.upcast())?,
method_name,
));
}
}
}
Ok(())
}
pub(crate) struct SemanticLintContext<'a> {
source: SourceText,
parsed: &'a ParsedModule,
semantic: SemanticModel<'a>,
pub struct SemanticLintContext<'a> {
file_id: FileId,
source: Source,
parsed: Parsed,
symbols: Arc<SymbolTable>,
db: &'a dyn LintDb,
diagnostics: RefCell<Vec<String>>,
}
impl<'db> SemanticLintContext<'db> {
#[allow(unused)]
pub(crate) fn source_text(&self) -> &str {
self.source.as_str()
impl<'a> SemanticLintContext<'a> {
pub fn source_text(&self) -> &str {
self.source.text()
}
#[allow(unused)]
pub(crate) fn ast(&self) -> &'db ast::ModModule {
self.parsed.syntax()
pub fn file_id(&self) -> FileId {
self.file_id
}
pub(crate) fn push_diagnostic(&self, diagnostic: String) {
pub fn ast(&self) -> &ModModule {
self.parsed.ast()
}
pub fn symbols(&self) -> &SymbolTable {
&self.symbols
}
pub fn infer_symbol_type(&self, symbol_id: SymbolId) -> QueryResult<Type> {
infer_symbol_type(
self.db.upcast(),
GlobalSymbolId {
file_id: self.file_id,
symbol_id,
},
)
}
pub fn push_diagnostic(&self, diagnostic: String) {
self.diagnostics.borrow_mut().push(diagnostic);
}
#[allow(unused)]
pub(crate) fn extend_diagnostics(&mut self, diagnostics: impl IntoIterator<Item = String>) {
pub fn extend_diagnostics(&mut self, diagnostics: impl IntoIterator<Item = String>) {
self.diagnostics.get_mut().extend(diagnostics);
}
}
@@ -201,7 +243,7 @@ struct SyntaxLintVisitor<'a> {
}
impl Visitor<'_> for SyntaxLintVisitor<'_> {
fn visit_string_literal(&mut self, string_literal: &'_ ast::StringLiteral) {
fn visit_string_literal(&mut self, string_literal: &'_ StringLiteral) {
// A very naive implementation of use double quotes
let text = &self.source[string_literal.range];
@@ -212,33 +254,10 @@ impl Visitor<'_> for SyntaxLintVisitor<'_> {
}
}
struct SemanticVisitor<'a> {
context: &'a SemanticLintContext<'a>,
}
impl Visitor<'_> for SemanticVisitor<'_> {
fn visit_stmt(&mut self, stmt: &ast::Stmt) {
match stmt {
ast::Stmt::ClassDef(class) => {
lint_bad_override(self.context, class);
}
ast::Stmt::Import(import) => {
lint_unresolved_imports(self.context, AnyImportRef::Import(import));
}
ast::Stmt::ImportFrom(import) => {
lint_unresolved_imports(self.context, AnyImportRef::ImportFrom(import));
}
_ => {}
}
walk_stmt(self, stmt);
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone)]
pub enum Diagnostics {
Empty,
List(Vec<String>),
List(Arc<Vec<String>>),
}
impl Diagnostics {
@@ -262,13 +281,41 @@ impl From<Vec<String>> for Diagnostics {
if value.is_empty() {
Diagnostics::Empty
} else {
Diagnostics::List(value)
Diagnostics::List(Arc::new(value))
}
}
}
#[derive(Copy, Clone, Debug)]
enum AnyImportRef<'a> {
Import(&'a ast::StmtImport),
ImportFrom(&'a ast::StmtImportFrom),
#[derive(Default, Debug)]
pub struct LintSyntaxStorage(KeyValueCache<FileId, Diagnostics>);
impl Deref for LintSyntaxStorage {
type Target = KeyValueCache<FileId, Diagnostics>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for LintSyntaxStorage {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Default, Debug)]
pub struct LintSemanticStorage(KeyValueCache<FileId, Diagnostics>);
impl Deref for LintSemanticStorage {
type Target = KeyValueCache<FileId, Diagnostics>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for LintSemanticStorage {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@@ -1,8 +1,9 @@
#![allow(clippy::dbg_macro)]
use std::path::Path;
use std::sync::Mutex;
use clap::Parser;
use crossbeam::channel as crossbeam_channel;
use salsa::ParallelDatabase;
use tracing::subscriber::Interest;
use tracing::{Level, Metadata};
use tracing_subscriber::filter::LevelFilter;
@@ -10,102 +11,50 @@ use tracing_subscriber::layer::{Context, Filter, SubscriberExt};
use tracing_subscriber::{Layer, Registry};
use tracing_tree::time::Uptime;
use red_knot::db::RootDatabase;
use red_knot::db::{HasJar, ParallelDatabase, QueryError, SourceDb, SourceJar};
use red_knot::module::{set_module_search_paths, ModuleSearchPath, ModuleSearchPathKind};
use red_knot::program::check::ExecutionMode;
use red_knot::program::{FileWatcherChange, Program};
use red_knot::watch::FileWatcher;
use red_knot::watch::FileWatcherChange;
use red_knot::workspace::WorkspaceMetadata;
use ruff_db::program::{ProgramSettings, SearchPathSettings};
use ruff_db::system::{OsSystem, System, SystemPathBuf};
use red_knot::Workspace;
use cli::target_version::TargetVersion;
use cli::verbosity::{Verbosity, VerbosityLevel};
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
fn main() -> anyhow::Result<()> {
setup_tracing();
mod cli;
let arguments: Vec<_> = std::env::args().collect();
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An experimental multifile analysis backend for Ruff"
)]
#[command(version)]
struct Args {
#[arg(
long,
help = "Changes the current working directory.",
long_help = "Changes the current working directory before any specified operations. This affects the workspace and configuration discovery.",
value_name = "PATH"
)]
current_directory: Option<SystemPathBuf>,
if arguments.len() < 2 {
eprintln!("Usage: red_knot <path>");
return Err(anyhow::anyhow!("Invalid arguments"));
}
#[arg(
long,
value_name = "DIRECTORY",
help = "Custom directory to use for stdlib typeshed stubs"
)]
custom_typeshed_dir: Option<SystemPathBuf>,
let entry_point = Path::new(&arguments[1]);
#[arg(
long,
value_name = "PATH",
help = "Additional path to use as a module-resolution source (can be passed multiple times)"
)]
extra_search_path: Vec<SystemPathBuf>,
if !entry_point.exists() {
eprintln!("The entry point does not exist.");
return Err(anyhow::anyhow!("Invalid arguments"));
}
#[arg(long, help = "Python version to assume when resolving types", default_value_t = TargetVersion::default(), value_name="VERSION")]
target_version: TargetVersion,
if !entry_point.is_file() {
eprintln!("The entry point is not a file.");
return Err(anyhow::anyhow!("Invalid arguments"));
}
#[clap(flatten)]
verbosity: Verbosity,
}
let workspace_folder = entry_point.parent().unwrap();
let workspace = Workspace::new(workspace_folder.to_path_buf());
#[allow(
clippy::print_stdout,
clippy::unnecessary_wraps,
clippy::print_stderr,
clippy::dbg_macro
)]
pub fn main() -> anyhow::Result<()> {
let Args {
current_directory,
custom_typeshed_dir,
extra_search_path: extra_paths,
target_version,
verbosity,
} = Args::parse_from(std::env::args().collect::<Vec<_>>());
let workspace_search_path = ModuleSearchPath::new(
workspace.root().to_path_buf(),
ModuleSearchPathKind::FirstParty,
);
let mut program = Program::new(workspace);
set_module_search_paths(&mut program, vec![workspace_search_path]);
let verbosity = verbosity.level();
countme::enable(verbosity == Some(VerbosityLevel::Trace));
setup_tracing(verbosity);
let entry_id = program.file_id(entry_point);
program.workspace_mut().open_file(entry_id);
let cwd = if let Some(cwd) = current_directory {
let canonicalized = cwd.as_utf8_path().canonicalize_utf8().unwrap();
SystemPathBuf::from_utf8_path_buf(canonicalized)
} else {
let cwd = std::env::current_dir().unwrap();
SystemPathBuf::from_path_buf(cwd).unwrap()
};
let system = OsSystem::new(cwd.clone());
let workspace_metadata =
WorkspaceMetadata::from_path(system.current_directory(), &system).unwrap();
// TODO: Respect the settings from the workspace metadata. when resolving the program settings.
let program_settings = ProgramSettings {
target_version: target_version.into(),
search_paths: SearchPathSettings {
extra_paths,
workspace_root: workspace_metadata.root().to_path_buf(),
custom_typeshed: custom_typeshed_dir,
site_packages: None,
},
};
// TODO: Use the `program_settings` to compute the key for the database's persistent
// cache and load the cache if it exists.
let mut db = RootDatabase::new(workspace_metadata, program_settings, system);
let (main_loop, main_loop_cancellation_token) = MainLoop::new(verbosity);
let (main_loop, main_loop_cancellation_token) = MainLoop::new();
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
@@ -124,29 +73,31 @@ pub fn main() -> anyhow::Result<()> {
file_changes_notifier.notify(changes);
})?;
file_watcher.watch_folder(db.workspace().root(&db).as_std_path())?;
file_watcher.watch_folder(workspace_folder)?;
main_loop.run(&mut db);
main_loop.run(&mut program);
println!("{}", countme::get_all());
let source_jar: &SourceJar = program.jar().unwrap();
dbg!(source_jar.parsed.statistics());
dbg!(source_jar.sources.statistics());
Ok(())
}
struct MainLoop {
verbosity: Option<VerbosityLevel>,
orchestrator: crossbeam_channel::Sender<OrchestratorMessage>,
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
orchestrator_sender: crossbeam_channel::Sender<OrchestratorMessage>,
main_loop_receiver: crossbeam_channel::Receiver<MainLoopMessage>,
}
impl MainLoop {
fn new(verbosity: Option<VerbosityLevel>) -> (Self, MainLoopCancellationToken) {
fn new() -> (Self, MainLoopCancellationToken) {
let (orchestrator_sender, orchestrator_receiver) = crossbeam_channel::bounded(1);
let (main_loop_sender, main_loop_receiver) = crossbeam_channel::bounded(1);
let mut orchestrator = Orchestrator {
receiver: orchestrator_receiver,
main_loop: main_loop_sender.clone(),
sender: main_loop_sender.clone(),
revision: 0,
};
@@ -156,9 +107,8 @@ impl MainLoop {
(
Self {
verbosity,
orchestrator: orchestrator_sender,
receiver: main_loop_receiver,
orchestrator_sender,
main_loop_receiver,
},
MainLoopCancellationToken {
sender: main_loop_sender,
@@ -168,49 +118,45 @@ impl MainLoop {
fn file_changes_notifier(&self) -> FileChangesNotifier {
FileChangesNotifier {
sender: self.orchestrator.clone(),
sender: self.orchestrator_sender.clone(),
}
}
#[allow(clippy::print_stderr)]
fn run(self, db: &mut RootDatabase) {
self.orchestrator.send(OrchestratorMessage::Run).unwrap();
fn run(self, program: &mut Program) {
self.orchestrator_sender
.send(OrchestratorMessage::Run)
.unwrap();
for message in &self.receiver {
for message in &self.main_loop_receiver {
tracing::trace!("Main Loop: Tick");
match message {
MainLoopMessage::CheckWorkspace { revision } => {
let db = db.snapshot();
let orchestrator = self.orchestrator.clone();
MainLoopMessage::CheckProgram { revision } => {
let program = program.snapshot();
let sender = self.orchestrator_sender.clone();
// Spawn a new task that checks the workspace. This needs to be done in a separate thread
// Spawn a new task that checks the program. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
if let Ok(result) = db.check() {
orchestrator
.send(OrchestratorMessage::CheckCompleted {
rayon::spawn(move || match program.check(ExecutionMode::ThreadPool) {
Ok(result) => {
sender
.send(OrchestratorMessage::CheckProgramCompleted {
diagnostics: result,
revision,
})
.unwrap();
}
Err(QueryError::Cancelled) => {}
});
}
MainLoopMessage::ApplyChanges(changes) => {
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes);
program.apply_changes(changes);
}
MainLoopMessage::CheckCompleted(diagnostics) => {
eprintln!("{}", diagnostics.join("\n"));
if self.verbosity == Some(VerbosityLevel::Trace) {
eprintln!("{}", countme::get_all());
}
dbg!(diagnostics);
}
MainLoopMessage::Exit => {
if self.verbosity == Some(VerbosityLevel::Trace) {
eprintln!("{}", countme::get_all());
}
return;
}
}
@@ -220,7 +166,7 @@ impl MainLoop {
impl Drop for MainLoop {
fn drop(&mut self) {
self.orchestrator
self.orchestrator_sender
.send(OrchestratorMessage::Shutdown)
.unwrap();
}
@@ -252,32 +198,31 @@ impl MainLoopCancellationToken {
struct Orchestrator {
/// Sends messages to the main loop.
main_loop: crossbeam_channel::Sender<MainLoopMessage>,
sender: crossbeam_channel::Sender<MainLoopMessage>,
/// Receives messages from the main loop.
receiver: crossbeam_channel::Receiver<OrchestratorMessage>,
revision: usize,
}
impl Orchestrator {
#[allow(clippy::print_stderr)]
fn run(&mut self) {
while let Ok(message) = self.receiver.recv() {
match message {
OrchestratorMessage::Run => {
self.main_loop
.send(MainLoopMessage::CheckWorkspace {
self.sender
.send(MainLoopMessage::CheckProgram {
revision: self.revision,
})
.unwrap();
}
OrchestratorMessage::CheckCompleted {
OrchestratorMessage::CheckProgramCompleted {
diagnostics,
revision,
} => {
// Only take the diagnostics if they are for the latest revision.
if self.revision == revision {
self.main_loop
self.sender
.send(MainLoopMessage::CheckCompleted(diagnostics))
.unwrap();
} else {
@@ -312,7 +257,7 @@ impl Orchestrator {
changes.extend(file_changes);
}
Ok(OrchestratorMessage::CheckCompleted { .. })=> {
Ok(OrchestratorMessage::CheckProgramCompleted { .. })=> {
// disregard any outdated completion message.
}
Ok(OrchestratorMessage::Run) => unreachable!("The orchestrator is already running."),
@@ -325,8 +270,8 @@ impl Orchestrator {
},
default(std::time::Duration::from_millis(10)) => {
// No more file changes after 10 ms, send the changes and schedule a new analysis
self.main_loop.send(MainLoopMessage::ApplyChanges(changes)).unwrap();
self.main_loop.send(MainLoopMessage::CheckWorkspace { revision: self.revision}).unwrap();
self.sender.send(MainLoopMessage::ApplyChanges(changes)).unwrap();
self.sender.send(MainLoopMessage::CheckProgram { revision: self.revision}).unwrap();
return;
}
}
@@ -342,7 +287,7 @@ impl Orchestrator {
/// Message sent from the orchestrator to the main loop.
#[derive(Debug)]
enum MainLoopMessage {
CheckWorkspace { revision: usize },
CheckProgram { revision: usize },
CheckCompleted(Vec<String>),
ApplyChanges(Vec<FileWatcherChange>),
Exit,
@@ -353,7 +298,7 @@ enum OrchestratorMessage {
Run,
Shutdown,
CheckCompleted {
CheckProgramCompleted {
diagnostics: Vec<String>,
revision: usize,
},
@@ -361,14 +306,7 @@ enum OrchestratorMessage {
FileChanges(Vec<FileWatcherChange>),
}
fn setup_tracing(verbosity: Option<VerbosityLevel>) {
let trace_level = match verbosity {
None => Level::WARN,
Some(VerbosityLevel::Info) => Level::INFO,
Some(VerbosityLevel::Debug) => Level::DEBUG,
Some(VerbosityLevel::Trace) => Level::TRACE,
};
fn setup_tracing() {
let subscriber = Registry::default().with(
tracing_tree::HierarchicalLayer::default()
.with_indent_lines(true)
@@ -378,7 +316,9 @@ fn setup_tracing(verbosity: Option<VerbosityLevel>) {
.with_targets(true)
.with_writer(|| Box::new(std::io::stderr()))
.with_timer(Uptime::default())
.with_filter(LoggingFilter { trace_level }),
.with_filter(LoggingFilter {
trace_level: Level::TRACE,
}),
);
tracing::subscriber::set_global_default(subscriber).unwrap();

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,93 @@
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use ruff_python_ast as ast;
use ruff_python_parser::{Mode, ParseError};
use ruff_text_size::{Ranged, TextRange};
use crate::cache::KeyValueCache;
use crate::db::{QueryResult, SourceDb};
use crate::files::FileId;
use crate::source::source_text;
#[derive(Debug, Clone, PartialEq)]
pub struct Parsed {
inner: Arc<ParsedInner>,
}
#[derive(Debug, PartialEq)]
struct ParsedInner {
ast: ast::ModModule,
errors: Vec<ParseError>,
}
impl Parsed {
fn new(ast: ast::ModModule, errors: Vec<ParseError>) -> Self {
Self {
inner: Arc::new(ParsedInner { ast, errors }),
}
}
pub(crate) fn from_text(text: &str) -> Self {
let result = ruff_python_parser::parse(text, Mode::Module);
let (module, errors) = match result {
Ok(ast::Mod::Module(module)) => (module, vec![]),
Ok(ast::Mod::Expression(expression)) => (
ast::ModModule {
range: expression.range(),
body: vec![ast::Stmt::Expr(ast::StmtExpr {
range: expression.range(),
value: expression.body,
})],
},
vec![],
),
Err(errors) => (
ast::ModModule {
range: TextRange::default(),
body: Vec::new(),
},
vec![errors],
),
};
Parsed::new(module, errors)
}
pub fn ast(&self) -> &ast::ModModule {
&self.inner.ast
}
pub fn errors(&self) -> &[ParseError] {
&self.inner.errors
}
}
#[tracing::instrument(level = "debug", skip(db))]
pub(crate) fn parse(db: &dyn SourceDb, file_id: FileId) -> QueryResult<Parsed> {
let jar = db.jar()?;
jar.parsed.get(&file_id, |file_id| {
let source = source_text(db, *file_id)?;
Ok(Parsed::from_text(source.text()))
})
}
#[derive(Debug, Default)]
pub struct ParsedStorage(KeyValueCache<FileId, Parsed>);
impl Deref for ParsedStorage {
type Target = KeyValueCache<FileId, Parsed>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for ParsedStorage {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

View File

@@ -0,0 +1,413 @@
use rayon::{current_num_threads, yield_local};
use rustc_hash::FxHashSet;
use crate::db::{Database, QueryError, QueryResult};
use crate::files::FileId;
use crate::lint::{lint_semantic, lint_syntax, Diagnostics};
use crate::module::{file_to_module, resolve_module};
use crate::program::Program;
use crate::symbols::{symbol_table, Dependency};
impl Program {
/// Checks all open files in the workspace and its dependencies.
#[tracing::instrument(level = "debug", skip_all)]
pub fn check(&self, mode: ExecutionMode) -> QueryResult<Vec<String>> {
self.cancelled()?;
let mut context = CheckContext::new(self);
match mode {
ExecutionMode::SingleThreaded => SingleThreadedExecutor.run(&mut context)?,
ExecutionMode::ThreadPool => ThreadPoolExecutor.run(&mut context)?,
};
Ok(context.finish())
}
#[tracing::instrument(level = "debug", skip(self, context))]
fn check_file(&self, file: FileId, context: &CheckFileContext) -> QueryResult<Diagnostics> {
self.cancelled()?;
let symbol_table = symbol_table(self, file)?;
let dependencies = symbol_table.dependencies();
if !dependencies.is_empty() {
let module = file_to_module(self, file)?;
// TODO scheduling all dependencies here is wasteful if we don't infer any types on them
// but I think that's unlikely, so it is okay?
// Anyway, we need to figure out a way to retrieve the dependencies of a module
// from the persistent cache. So maybe it should be a separate query after all.
for dependency in dependencies {
let dependency_name = match dependency {
Dependency::Module(name) => Some(name.clone()),
Dependency::Relative { .. } => match &module {
Some(module) => module.resolve_dependency(self, dependency)?,
None => None,
},
};
if let Some(dependency_name) = dependency_name {
// TODO We may want to have a different check functions for non-first-party
// files because we only need to index them and not check them.
// Supporting non-first-party code also requires supporting typing stubs.
if let Some(dependency) = resolve_module(self, dependency_name)? {
if dependency.path(self)?.root().kind().is_first_party() {
context.schedule_dependency(dependency.path(self)?.file());
}
}
}
}
}
let mut diagnostics = Vec::new();
if self.workspace().is_file_open(file) {
diagnostics.extend_from_slice(&lint_syntax(self, file)?);
diagnostics.extend_from_slice(&lint_semantic(self, file)?);
}
Ok(Diagnostics::from(diagnostics))
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ExecutionMode {
SingleThreaded,
ThreadPool,
}
/// Context that stores state information about the entire check operation.
struct CheckContext<'a> {
/// IDs of the files that have been queued for checking.
///
/// Used to avoid queuing the same file twice.
scheduled_files: FxHashSet<FileId>,
/// Reference to the program that is checked.
program: &'a Program,
/// The aggregated diagnostics
diagnostics: Vec<String>,
}
impl<'a> CheckContext<'a> {
fn new(program: &'a Program) -> Self {
Self {
scheduled_files: FxHashSet::default(),
program,
diagnostics: Vec::new(),
}
}
/// Returns the tasks to check all open files in the workspace.
fn check_open_files(&mut self) -> Vec<CheckOpenFileTask> {
self.scheduled_files
.extend(self.program.workspace().open_files());
self.program
.workspace()
.open_files()
.map(|file_id| CheckOpenFileTask { file_id })
.collect()
}
/// Returns the task to check a dependency.
fn check_dependency(&mut self, file_id: FileId) -> Option<CheckDependencyTask> {
if self.scheduled_files.insert(file_id) {
Some(CheckDependencyTask { file_id })
} else {
None
}
}
/// Pushes the result for a single file check operation
fn push_diagnostics(&mut self, diagnostics: &Diagnostics) {
self.diagnostics.extend_from_slice(diagnostics);
}
/// Returns a reference to the program that is being checked.
fn program(&self) -> &'a Program {
self.program
}
/// Creates a task context that is used to check a single file.
fn task_context<'b, S>(&self, dependency_scheduler: &'b S) -> CheckTaskContext<'a, 'b, S>
where
S: ScheduleDependency,
{
CheckTaskContext {
program: self.program,
dependency_scheduler,
}
}
fn finish(self) -> Vec<String> {
self.diagnostics
}
}
/// Trait that abstracts away how a dependency of a file gets scheduled for checking.
trait ScheduleDependency {
/// Schedules the file with the given ID for checking.
fn schedule(&self, file_id: FileId);
}
impl<T> ScheduleDependency for T
where
T: Fn(FileId),
{
fn schedule(&self, file_id: FileId) {
let f = self;
f(file_id);
}
}
/// Context that is used to run a single file check task.
///
/// The task is generic over `S` because it is passed across thread boundaries and
/// we don't want to add the requirement that [`ScheduleDependency`] must be [`Send`].
struct CheckTaskContext<'a, 'scheduler, S>
where
S: ScheduleDependency,
{
dependency_scheduler: &'scheduler S,
program: &'a Program,
}
impl<'a, 'scheduler, S> CheckTaskContext<'a, 'scheduler, S>
where
S: ScheduleDependency,
{
fn as_file_context(&self) -> CheckFileContext<'scheduler> {
CheckFileContext {
dependency_scheduler: self.dependency_scheduler,
}
}
}
/// Context passed when checking a single file.
///
/// This is a trimmed down version of [`CheckTaskContext`] with the type parameter `S` erased
/// to avoid monomorphization of [`Program:check_file`].
struct CheckFileContext<'a> {
dependency_scheduler: &'a dyn ScheduleDependency,
}
impl<'a> CheckFileContext<'a> {
fn schedule_dependency(&self, file_id: FileId) {
self.dependency_scheduler.schedule(file_id);
}
}
#[derive(Debug)]
enum CheckFileTask {
OpenFile(CheckOpenFileTask),
Dependency(CheckDependencyTask),
}
impl CheckFileTask {
/// Runs the task and returns the results for checking this file.
fn run<S>(&self, context: &CheckTaskContext<S>) -> QueryResult<Diagnostics>
where
S: ScheduleDependency,
{
match self {
Self::OpenFile(task) => task.run(context),
Self::Dependency(task) => task.run(context),
}
}
fn file_id(&self) -> FileId {
match self {
CheckFileTask::OpenFile(task) => task.file_id,
CheckFileTask::Dependency(task) => task.file_id,
}
}
}
/// Task to check an open file.
#[derive(Debug)]
struct CheckOpenFileTask {
file_id: FileId,
}
impl CheckOpenFileTask {
fn run<S>(&self, context: &CheckTaskContext<S>) -> QueryResult<Diagnostics>
where
S: ScheduleDependency,
{
context
.program
.check_file(self.file_id, &context.as_file_context())
}
}
/// Task to check a dependency file.
#[derive(Debug)]
struct CheckDependencyTask {
file_id: FileId,
}
impl CheckDependencyTask {
fn run<S>(&self, context: &CheckTaskContext<S>) -> QueryResult<Diagnostics>
where
S: ScheduleDependency,
{
context
.program
.check_file(self.file_id, &context.as_file_context())
}
}
/// Executor that schedules the checking of individual program files.
trait CheckExecutor {
fn run(self, context: &mut CheckContext) -> QueryResult<()>;
}
/// Executor that runs all check operations on the current thread.
///
/// The executor does not schedule dependencies for checking.
/// The main motivation for scheduling dependencies
/// in a multithreaded environment is to parse and index the dependencies concurrently.
/// However, that doesn't make sense in a single threaded environment, because the dependencies then compute
/// with checking the open files. Checking dependencies in a single threaded environment is more likely
/// to hurt performance because we end up analyzing files in their entirety, even if we only need to type check parts of them.
#[derive(Debug, Default)]
struct SingleThreadedExecutor;
impl CheckExecutor for SingleThreadedExecutor {
fn run(self, context: &mut CheckContext) -> QueryResult<()> {
let mut queue = context.check_open_files();
let noop_schedule_dependency = |_| {};
while let Some(file) = queue.pop() {
context.program().cancelled()?;
let task_context = context.task_context(&noop_schedule_dependency);
context.push_diagnostics(&file.run(&task_context)?);
}
Ok(())
}
}
/// Executor that runs the check operations on a thread pool.
///
/// The executor runs each check operation as its own task using a thread pool.
///
/// Other than [`SingleThreadedExecutor`], this executor schedules dependencies for checking. It
/// even schedules dependencies for checking when the thread pool size is 1 for a better debugging experience.
#[derive(Debug, Default)]
struct ThreadPoolExecutor;
impl CheckExecutor for ThreadPoolExecutor {
fn run(self, context: &mut CheckContext) -> QueryResult<()> {
let num_threads = current_num_threads();
let single_threaded = num_threads == 1;
let span = tracing::trace_span!("ThreadPoolExecutor::run", num_threads);
let _ = span.enter();
let mut queue: Vec<_> = context
.check_open_files()
.into_iter()
.map(CheckFileTask::OpenFile)
.collect();
let (sender, receiver) = if single_threaded {
// Use an unbounded queue for single threaded execution to prevent deadlocks
// when a single file schedules multiple dependencies.
crossbeam::channel::unbounded()
} else {
// Use a bounded queue to apply backpressure when the orchestration thread isn't able to keep
// up processing messages from the worker threads.
crossbeam::channel::bounded(num_threads)
};
let schedule_sender = sender.clone();
let schedule_dependency = move |file_id| {
schedule_sender
.send(ThreadPoolMessage::ScheduleDependency(file_id))
.unwrap();
};
let result = rayon::in_place_scope(|scope| {
let mut pending = 0usize;
loop {
context.program().cancelled()?;
// 1. Try to get a queued message to ensure that we have always remaining space in the channel to prevent blocking the worker threads.
// 2. Try to process a queued file
// 3. If there's no queued file wait for the next incoming message.
// 4. Exit if there are no more messages and no senders.
let message = if let Ok(message) = receiver.try_recv() {
message
} else if let Some(task) = queue.pop() {
pending += 1;
let task_context = context.task_context(&schedule_dependency);
let sender = sender.clone();
let task_span = tracing::trace_span!(
parent: &span,
"CheckFileTask::run",
file_id = task.file_id().as_u32(),
);
scope.spawn(move |_| {
task_span.in_scope(|| match task.run(&task_context) {
Ok(result) => {
sender.send(ThreadPoolMessage::Completed(result)).unwrap();
}
Err(err) => sender.send(ThreadPoolMessage::Errored(err)).unwrap(),
});
});
// If this is a single threaded rayon thread pool, yield the current thread
// or we never start processing the work items.
if single_threaded {
yield_local();
}
continue;
} else if let Ok(message) = receiver.recv() {
message
} else {
break;
};
match message {
ThreadPoolMessage::ScheduleDependency(dependency) => {
if let Some(task) = context.check_dependency(dependency) {
queue.push(CheckFileTask::Dependency(task));
}
}
ThreadPoolMessage::Completed(diagnostics) => {
context.push_diagnostics(&diagnostics);
pending -= 1;
if pending == 0 && queue.is_empty() {
break;
}
}
ThreadPoolMessage::Errored(err) => {
return Err(err);
}
}
}
Ok(())
});
result
}
}
#[derive(Debug)]
enum ThreadPoolMessage {
ScheduleDependency(FileId),
Completed(Diagnostics),
Errored(QueryError),
}

View File

@@ -0,0 +1,275 @@
use std::collections::hash_map::Entry;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use rustc_hash::FxHashMap;
use crate::db::{
Database, Db, DbRuntime, DbWithJar, HasJar, HasJars, JarsStorage, LintDb, LintJar,
ParallelDatabase, QueryResult, SemanticDb, SemanticJar, Snapshot, SourceDb, SourceJar, Upcast,
};
use crate::files::{FileId, Files};
use crate::Workspace;
pub mod check;
#[derive(Debug)]
pub struct Program {
jars: JarsStorage<Program>,
files: Files,
workspace: Workspace,
}
impl Program {
pub fn new(workspace: Workspace) -> Self {
Self {
jars: JarsStorage::default(),
files: Files::default(),
workspace,
}
}
pub fn apply_changes<I>(&mut self, changes: I)
where
I: IntoIterator<Item = FileWatcherChange>,
{
let mut aggregated_changes = AggregatedChanges::default();
aggregated_changes.extend(changes.into_iter().map(|change| FileChange {
id: self.files.intern(&change.path),
kind: change.kind,
}));
let (source, semantic, lint) = self.jars_mut();
for change in aggregated_changes.iter() {
semantic.module_resolver.remove_module(change.id);
semantic.symbol_tables.remove(&change.id);
source.sources.remove(&change.id);
source.parsed.remove(&change.id);
// TODO: remove all dependent modules as well
semantic.type_store.remove_module(change.id);
lint.lint_syntax.remove(&change.id);
lint.lint_semantic.remove(&change.id);
}
}
pub fn files(&self) -> &Files {
&self.files
}
pub fn workspace(&self) -> &Workspace {
&self.workspace
}
pub fn workspace_mut(&mut self) -> &mut Workspace {
&mut self.workspace
}
}
impl SourceDb for Program {
fn file_id(&self, path: &Path) -> FileId {
self.files.intern(path)
}
fn file_path(&self, file_id: FileId) -> Arc<Path> {
self.files.path(file_id)
}
}
impl DbWithJar<SourceJar> for Program {}
impl SemanticDb for Program {}
impl DbWithJar<SemanticJar> for Program {}
impl LintDb for Program {}
impl DbWithJar<LintJar> for Program {}
impl Upcast<dyn SemanticDb> for Program {
fn upcast(&self) -> &(dyn SemanticDb + 'static) {
self
}
}
impl Upcast<dyn SourceDb> for Program {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
}
impl Upcast<dyn LintDb> for Program {
fn upcast(&self) -> &(dyn LintDb + 'static) {
self
}
}
impl Db for Program {}
impl Database for Program {
fn runtime(&self) -> &DbRuntime {
self.jars.runtime()
}
fn runtime_mut(&mut self) -> &mut DbRuntime {
self.jars.runtime_mut()
}
}
impl ParallelDatabase for Program {
fn snapshot(&self) -> Snapshot<Self> {
Snapshot::new(Self {
jars: self.jars.snapshot(),
files: self.files.snapshot(),
workspace: self.workspace.clone(),
})
}
}
impl HasJars for Program {
type Jars = (SourceJar, SemanticJar, LintJar);
fn jars(&self) -> QueryResult<&Self::Jars> {
self.jars.jars()
}
fn jars_mut(&mut self) -> &mut Self::Jars {
self.jars.jars_mut()
}
}
impl HasJar<SourceJar> for Program {
fn jar(&self) -> QueryResult<&SourceJar> {
Ok(&self.jars()?.0)
}
fn jar_mut(&mut self) -> &mut SourceJar {
&mut self.jars_mut().0
}
}
impl HasJar<SemanticJar> for Program {
fn jar(&self) -> QueryResult<&SemanticJar> {
Ok(&self.jars()?.1)
}
fn jar_mut(&mut self) -> &mut SemanticJar {
&mut self.jars_mut().1
}
}
impl HasJar<LintJar> for Program {
fn jar(&self) -> QueryResult<&LintJar> {
Ok(&self.jars()?.2)
}
fn jar_mut(&mut self) -> &mut LintJar {
&mut self.jars_mut().2
}
}
#[derive(Clone, Debug)]
pub struct FileWatcherChange {
path: PathBuf,
kind: FileChangeKind,
}
impl FileWatcherChange {
pub fn new(path: PathBuf, kind: FileChangeKind) -> Self {
Self { path, kind }
}
}
#[derive(Copy, Clone, Debug)]
struct FileChange {
id: FileId,
kind: FileChangeKind,
}
impl FileChange {
fn file_id(self) -> FileId {
self.id
}
fn kind(self) -> FileChangeKind {
self.kind
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum FileChangeKind {
Created,
Modified,
Deleted,
}
#[derive(Default, Debug)]
struct AggregatedChanges {
changes: FxHashMap<FileId, FileChangeKind>,
}
impl AggregatedChanges {
fn add(&mut self, change: FileChange) {
match self.changes.entry(change.file_id()) {
Entry::Occupied(mut entry) => {
let merged = entry.get_mut();
match (merged, change.kind()) {
(FileChangeKind::Created, FileChangeKind::Deleted) => {
// Deletion after creations means that ruff never saw the file.
entry.remove();
}
(FileChangeKind::Created, FileChangeKind::Modified) => {
// No-op, for ruff, modifying a file that it doesn't yet know that it exists is still considered a creation.
}
(FileChangeKind::Modified, FileChangeKind::Created) => {
// Uhh, that should probably not happen. Continue considering it a modification.
}
(FileChangeKind::Modified, FileChangeKind::Deleted) => {
*entry.get_mut() = FileChangeKind::Deleted;
}
(FileChangeKind::Deleted, FileChangeKind::Created) => {
*entry.get_mut() = FileChangeKind::Modified;
}
(FileChangeKind::Deleted, FileChangeKind::Modified) => {
// That's weird, but let's consider it a modification.
*entry.get_mut() = FileChangeKind::Modified;
}
(FileChangeKind::Created, FileChangeKind::Created)
| (FileChangeKind::Modified, FileChangeKind::Modified)
| (FileChangeKind::Deleted, FileChangeKind::Deleted) => {
// No-op transitions. Some of them should be impossible but we handle them anyway.
}
}
}
Entry::Vacant(entry) => {
entry.insert(change.kind());
}
}
}
fn extend<I>(&mut self, changes: I)
where
I: IntoIterator<Item = FileChange>,
{
let iter = changes.into_iter();
let (lower, _) = iter.size_hint();
self.changes.reserve(lower);
for change in iter {
self.add(change);
}
}
fn iter(&self) -> impl Iterator<Item = FileChange> + '_ {
self.changes.iter().map(|(id, kind)| FileChange {
id: *id,
kind: *kind,
})
}
}

View File

@@ -0,0 +1,95 @@
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use ruff_notebook::Notebook;
use ruff_python_ast::PySourceType;
use crate::cache::KeyValueCache;
use crate::db::{QueryResult, SourceDb};
use crate::files::FileId;
#[tracing::instrument(level = "debug", skip(db))]
pub(crate) fn source_text(db: &dyn SourceDb, file_id: FileId) -> QueryResult<Source> {
let jar = db.jar()?;
let sources = &jar.sources;
sources.get(&file_id, |file_id| {
let path = db.file_path(*file_id);
let source_text = std::fs::read_to_string(&path).unwrap_or_else(|err| {
tracing::error!("Failed to read file '{path:?}: {err}'. Falling back to empty text");
String::new()
});
let python_ty = PySourceType::from(&path);
let kind = match python_ty {
PySourceType::Python => {
SourceKind::Python(Arc::from(source_text))
}
PySourceType::Stub => SourceKind::Stub(Arc::from(source_text)),
PySourceType::Ipynb => {
let notebook = Notebook::from_source_code(&source_text).unwrap_or_else(|err| {
// TODO should this be changed to never fail?
// or should we instead add a diagnostic somewhere? But what would we return in this case?
tracing::error!(
"Failed to parse notebook '{path:?}: {err}'. Falling back to an empty notebook"
);
Notebook::from_source_code("").unwrap()
});
SourceKind::IpyNotebook(Arc::new(notebook))
}
};
Ok(Source { kind })
})
}
#[derive(Debug, Clone, PartialEq)]
pub enum SourceKind {
Python(Arc<str>),
Stub(Arc<str>),
IpyNotebook(Arc<Notebook>),
}
#[derive(Debug, Clone, PartialEq)]
pub struct Source {
kind: SourceKind,
}
impl Source {
pub fn python<T: Into<Arc<str>>>(source: T) -> Self {
Self {
kind: SourceKind::Python(source.into()),
}
}
pub fn kind(&self) -> &SourceKind {
&self.kind
}
pub fn text(&self) -> &str {
match &self.kind {
SourceKind::Python(text) => text,
SourceKind::Stub(text) => text,
SourceKind::IpyNotebook(notebook) => notebook.source_code(),
}
}
}
#[derive(Debug, Default)]
pub struct SourceStorage(pub(crate) KeyValueCache<FileId, Source>);
impl Deref for SourceStorage {
type Target = KeyValueCache<FileId, Source>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for SourceStorage {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,745 @@
#![allow(dead_code)]
use crate::ast_ids::NodeKey;
use crate::db::{QueryResult, SemanticDb, SemanticJar};
use crate::files::FileId;
use crate::symbols::{symbol_table, GlobalSymbolId, ScopeId, ScopeKind, SymbolId};
use crate::{FxDashMap, FxIndexSet, Name};
use ruff_index::{newtype_index, IndexVec};
use rustc_hash::FxHashMap;
pub(crate) mod infer;
pub(crate) use infer::{infer_definition_type, infer_symbol_type};
/// unique ID for a type
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum Type {
/// the dynamic or gradual type: a statically-unknown set of values
Any,
/// the empty set of values
Never,
/// unknown type (no annotation)
/// equivalent to Any, or to object in strict mode
Unknown,
/// name is not bound to any value
Unbound,
/// a specific function object
Function(FunctionTypeId),
/// a specific class object
Class(ClassTypeId),
/// the set of Python objects with the given class in their __class__'s method resolution order
Instance(ClassTypeId),
Union(UnionTypeId),
Intersection(IntersectionTypeId),
// TODO protocols, callable types, overloads, generics, type vars
}
impl Type {
fn display<'a>(&'a self, store: &'a TypeStore) -> DisplayType<'a> {
DisplayType { ty: self, store }
}
pub const fn is_unbound(&self) -> bool {
matches!(self, Type::Unbound)
}
pub const fn is_unknown(&self) -> bool {
matches!(self, Type::Unknown)
}
}
impl From<FunctionTypeId> for Type {
fn from(id: FunctionTypeId) -> Self {
Type::Function(id)
}
}
impl From<UnionTypeId> for Type {
fn from(id: UnionTypeId) -> Self {
Type::Union(id)
}
}
impl From<IntersectionTypeId> for Type {
fn from(id: IntersectionTypeId) -> Self {
Type::Intersection(id)
}
}
// TODO: currently calling `get_function` et al and holding on to the `FunctionTypeRef` will lock a
// shard of this dashmap, for as long as you hold the reference. This may be a problem. We could
// switch to having all the arenas hold Arc, or we could see if we can split up ModuleTypeStore,
// and/or give it inner mutability and finer-grained internal locking.
#[derive(Debug, Default)]
pub struct TypeStore {
modules: FxDashMap<FileId, ModuleTypeStore>,
}
impl TypeStore {
pub fn remove_module(&mut self, file_id: FileId) {
self.modules.remove(&file_id);
}
pub fn cache_symbol_type(&self, symbol: GlobalSymbolId, ty: Type) {
self.add_or_get_module(symbol.file_id)
.symbol_types
.insert(symbol.symbol_id, ty);
}
pub fn cache_node_type(&self, file_id: FileId, node_key: NodeKey, ty: Type) {
self.add_or_get_module(file_id)
.node_types
.insert(node_key, ty);
}
pub fn get_cached_symbol_type(&self, symbol: GlobalSymbolId) -> Option<Type> {
self.try_get_module(symbol.file_id)?
.symbol_types
.get(&symbol.symbol_id)
.copied()
}
pub fn get_cached_node_type(&self, file_id: FileId, node_key: &NodeKey) -> Option<Type> {
self.try_get_module(file_id)?
.node_types
.get(node_key)
.copied()
}
fn add_or_get_module(&self, file_id: FileId) -> ModuleStoreRefMut {
self.modules
.entry(file_id)
.or_insert_with(|| ModuleTypeStore::new(file_id))
}
fn get_module(&self, file_id: FileId) -> ModuleStoreRef {
self.try_get_module(file_id).expect("module should exist")
}
fn try_get_module(&self, file_id: FileId) -> Option<ModuleStoreRef> {
self.modules.get(&file_id)
}
fn add_function(
&self,
file_id: FileId,
name: &str,
symbol_id: SymbolId,
scope_id: ScopeId,
decorators: Vec<Type>,
) -> FunctionTypeId {
self.add_or_get_module(file_id)
.add_function(name, symbol_id, scope_id, decorators)
}
fn add_class(
&self,
file_id: FileId,
name: &str,
scope_id: ScopeId,
bases: Vec<Type>,
) -> ClassTypeId {
self.add_or_get_module(file_id)
.add_class(name, scope_id, bases)
}
fn add_union(&mut self, file_id: FileId, elems: &[Type]) -> UnionTypeId {
self.add_or_get_module(file_id).add_union(elems)
}
fn add_intersection(
&mut self,
file_id: FileId,
positive: &[Type],
negative: &[Type],
) -> IntersectionTypeId {
self.add_or_get_module(file_id)
.add_intersection(positive, negative)
}
fn get_function(&self, id: FunctionTypeId) -> FunctionTypeRef {
FunctionTypeRef {
module_store: self.get_module(id.file_id),
function_id: id.func_id,
}
}
fn get_class(&self, id: ClassTypeId) -> ClassTypeRef {
ClassTypeRef {
module_store: self.get_module(id.file_id),
class_id: id.class_id,
}
}
fn get_union(&self, id: UnionTypeId) -> UnionTypeRef {
UnionTypeRef {
module_store: self.get_module(id.file_id),
union_id: id.union_id,
}
}
fn get_intersection(&self, id: IntersectionTypeId) -> IntersectionTypeRef {
IntersectionTypeRef {
module_store: self.get_module(id.file_id),
intersection_id: id.intersection_id,
}
}
}
type ModuleStoreRef<'a> = dashmap::mapref::one::Ref<
'a,
FileId,
ModuleTypeStore,
std::hash::BuildHasherDefault<rustc_hash::FxHasher>,
>;
type ModuleStoreRefMut<'a> = dashmap::mapref::one::RefMut<
'a,
FileId,
ModuleTypeStore,
std::hash::BuildHasherDefault<rustc_hash::FxHasher>,
>;
#[derive(Debug)]
pub(crate) struct FunctionTypeRef<'a> {
module_store: ModuleStoreRef<'a>,
function_id: ModuleFunctionTypeId,
}
impl<'a> std::ops::Deref for FunctionTypeRef<'a> {
type Target = FunctionType;
fn deref(&self) -> &Self::Target {
self.module_store.get_function(self.function_id)
}
}
#[derive(Debug)]
pub(crate) struct ClassTypeRef<'a> {
module_store: ModuleStoreRef<'a>,
class_id: ModuleClassTypeId,
}
impl<'a> std::ops::Deref for ClassTypeRef<'a> {
type Target = ClassType;
fn deref(&self) -> &Self::Target {
self.module_store.get_class(self.class_id)
}
}
#[derive(Debug)]
pub(crate) struct UnionTypeRef<'a> {
module_store: ModuleStoreRef<'a>,
union_id: ModuleUnionTypeId,
}
impl<'a> std::ops::Deref for UnionTypeRef<'a> {
type Target = UnionType;
fn deref(&self) -> &Self::Target {
self.module_store.get_union(self.union_id)
}
}
#[derive(Debug)]
pub(crate) struct IntersectionTypeRef<'a> {
module_store: ModuleStoreRef<'a>,
intersection_id: ModuleIntersectionTypeId,
}
impl<'a> std::ops::Deref for IntersectionTypeRef<'a> {
type Target = IntersectionType;
fn deref(&self) -> &Self::Target {
self.module_store.get_intersection(self.intersection_id)
}
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct FunctionTypeId {
file_id: FileId,
func_id: ModuleFunctionTypeId,
}
impl FunctionTypeId {
fn function(self, db: &dyn SemanticDb) -> QueryResult<FunctionTypeRef> {
let jar: &SemanticJar = db.jar()?;
Ok(jar.type_store.get_function(self))
}
pub(crate) fn name(self, db: &dyn SemanticDb) -> QueryResult<Name> {
Ok(self.function(db)?.name().into())
}
pub(crate) fn global_symbol(self, db: &dyn SemanticDb) -> QueryResult<GlobalSymbolId> {
Ok(GlobalSymbolId {
file_id: self.file(),
symbol_id: self.symbol(db)?,
})
}
pub(crate) fn file(self) -> FileId {
self.file_id
}
pub(crate) fn symbol(self, db: &dyn SemanticDb) -> QueryResult<SymbolId> {
let FunctionType { symbol_id, .. } = *self.function(db)?;
Ok(symbol_id)
}
pub(crate) fn get_containing_class(
self,
db: &dyn SemanticDb,
) -> QueryResult<Option<ClassTypeId>> {
let table = symbol_table(db, self.file_id)?;
let FunctionType { symbol_id, .. } = *self.function(db)?;
let scope_id = symbol_id.symbol(&table).scope_id();
let scope = scope_id.scope(&table);
if !matches!(scope.kind(), ScopeKind::Class) {
return Ok(None);
};
let Some(def) = scope.definition() else {
return Ok(None);
};
let Some(symbol_id) = scope.defining_symbol() else {
return Ok(None);
};
let Type::Class(class) = infer_definition_type(
db,
GlobalSymbolId {
file_id: self.file_id,
symbol_id,
},
def,
)?
else {
return Ok(None);
};
Ok(Some(class))
}
pub(crate) fn has_decorator(
self,
db: &dyn SemanticDb,
decorator_symbol: GlobalSymbolId,
) -> QueryResult<bool> {
for deco_ty in self.function(db)?.decorators() {
let Type::Function(deco_func) = deco_ty else {
continue;
};
if deco_func.global_symbol(db)? == decorator_symbol {
return Ok(true);
}
}
Ok(false)
}
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct ClassTypeId {
file_id: FileId,
class_id: ModuleClassTypeId,
}
impl ClassTypeId {
fn class(self, db: &dyn SemanticDb) -> QueryResult<ClassTypeRef> {
let jar: &SemanticJar = db.jar()?;
Ok(jar.type_store.get_class(self))
}
pub(crate) fn name(self, db: &dyn SemanticDb) -> QueryResult<Name> {
Ok(self.class(db)?.name().into())
}
pub(crate) fn get_super_class_member(
self,
db: &dyn SemanticDb,
name: &Name,
) -> QueryResult<Option<Type>> {
// TODO we should linearize the MRO instead of doing this recursively
let class = self.class(db)?;
for base in class.bases() {
if let Type::Class(base) = base {
if let Some(own_member) = base.get_own_class_member(db, name)? {
return Ok(Some(own_member));
}
if let Some(base_member) = base.get_super_class_member(db, name)? {
return Ok(Some(base_member));
}
}
}
Ok(None)
}
fn get_own_class_member(self, db: &dyn SemanticDb, name: &Name) -> QueryResult<Option<Type>> {
// TODO: this should distinguish instance-only members (e.g. `x: int`) and not return them
let ClassType { scope_id, .. } = *self.class(db)?;
let table = symbol_table(db, self.file_id)?;
if let Some(symbol_id) = table.symbol_id_by_name(scope_id, name) {
Ok(Some(infer_symbol_type(
db,
GlobalSymbolId {
file_id: self.file_id,
symbol_id,
},
)?))
} else {
Ok(None)
}
}
// TODO: get_own_instance_member, get_class_member, get_instance_member
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct UnionTypeId {
file_id: FileId,
union_id: ModuleUnionTypeId,
}
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub struct IntersectionTypeId {
file_id: FileId,
intersection_id: ModuleIntersectionTypeId,
}
#[newtype_index]
struct ModuleFunctionTypeId;
#[newtype_index]
struct ModuleClassTypeId;
#[newtype_index]
struct ModuleUnionTypeId;
#[newtype_index]
struct ModuleIntersectionTypeId;
#[derive(Debug)]
struct ModuleTypeStore {
file_id: FileId,
/// arena of all function types defined in this module
functions: IndexVec<ModuleFunctionTypeId, FunctionType>,
/// arena of all class types defined in this module
classes: IndexVec<ModuleClassTypeId, ClassType>,
/// arenda of all union types created in this module
unions: IndexVec<ModuleUnionTypeId, UnionType>,
/// arena of all intersection types created in this module
intersections: IndexVec<ModuleIntersectionTypeId, IntersectionType>,
/// cached types of symbols in this module
symbol_types: FxHashMap<SymbolId, Type>,
/// cached types of AST nodes in this module
node_types: FxHashMap<NodeKey, Type>,
}
impl ModuleTypeStore {
fn new(file_id: FileId) -> Self {
Self {
file_id,
functions: IndexVec::default(),
classes: IndexVec::default(),
unions: IndexVec::default(),
intersections: IndexVec::default(),
symbol_types: FxHashMap::default(),
node_types: FxHashMap::default(),
}
}
fn add_function(
&mut self,
name: &str,
symbol_id: SymbolId,
scope_id: ScopeId,
decorators: Vec<Type>,
) -> FunctionTypeId {
let func_id = self.functions.push(FunctionType {
name: Name::new(name),
symbol_id,
scope_id,
decorators,
});
FunctionTypeId {
file_id: self.file_id,
func_id,
}
}
fn add_class(&mut self, name: &str, scope_id: ScopeId, bases: Vec<Type>) -> ClassTypeId {
let class_id = self.classes.push(ClassType {
name: Name::new(name),
scope_id,
// TODO: if no bases are given, that should imply [object]
bases,
});
ClassTypeId {
file_id: self.file_id,
class_id,
}
}
fn add_union(&mut self, elems: &[Type]) -> UnionTypeId {
let union_id = self.unions.push(UnionType {
elements: elems.iter().copied().collect(),
});
UnionTypeId {
file_id: self.file_id,
union_id,
}
}
fn add_intersection(&mut self, positive: &[Type], negative: &[Type]) -> IntersectionTypeId {
let intersection_id = self.intersections.push(IntersectionType {
positive: positive.iter().copied().collect(),
negative: negative.iter().copied().collect(),
});
IntersectionTypeId {
file_id: self.file_id,
intersection_id,
}
}
fn get_function(&self, func_id: ModuleFunctionTypeId) -> &FunctionType {
&self.functions[func_id]
}
fn get_class(&self, class_id: ModuleClassTypeId) -> &ClassType {
&self.classes[class_id]
}
fn get_union(&self, union_id: ModuleUnionTypeId) -> &UnionType {
&self.unions[union_id]
}
fn get_intersection(&self, intersection_id: ModuleIntersectionTypeId) -> &IntersectionType {
&self.intersections[intersection_id]
}
}
#[derive(Copy, Clone, Debug)]
struct DisplayType<'a> {
ty: &'a Type,
store: &'a TypeStore,
}
impl std::fmt::Display for DisplayType<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.ty {
Type::Any => f.write_str("Any"),
Type::Never => f.write_str("Never"),
Type::Unknown => f.write_str("Unknown"),
Type::Unbound => f.write_str("Unbound"),
// TODO functions and classes should display using a fully qualified name
Type::Class(class_id) => {
f.write_str("Literal[")?;
f.write_str(self.store.get_class(*class_id).name())?;
f.write_str("]")
}
Type::Instance(class_id) => f.write_str(self.store.get_class(*class_id).name()),
Type::Function(func_id) => f.write_str(self.store.get_function(*func_id).name()),
Type::Union(union_id) => self
.store
.get_module(union_id.file_id)
.get_union(union_id.union_id)
.display(f, self.store),
Type::Intersection(int_id) => self
.store
.get_module(int_id.file_id)
.get_intersection(int_id.intersection_id)
.display(f, self.store),
}
}
}
#[derive(Debug)]
pub(crate) struct ClassType {
/// Name of the class at definition
name: Name,
/// `ScopeId` of the class body
scope_id: ScopeId,
/// Types of all class bases
bases: Vec<Type>,
}
impl ClassType {
fn name(&self) -> &str {
self.name.as_str()
}
fn bases(&self) -> &[Type] {
self.bases.as_slice()
}
}
#[derive(Debug)]
pub(crate) struct FunctionType {
/// name of the function at definition
name: Name,
/// symbol which this function is a definition of
symbol_id: SymbolId,
/// scope of this function's body
scope_id: ScopeId,
/// types of all decorators on this function
decorators: Vec<Type>,
}
impl FunctionType {
fn name(&self) -> &str {
self.name.as_str()
}
fn scope_id(&self) -> ScopeId {
self.scope_id
}
pub(crate) fn decorators(&self) -> &[Type] {
self.decorators.as_slice()
}
}
#[derive(Debug)]
pub(crate) struct UnionType {
// the union type includes values in any of these types
elements: FxIndexSet<Type>,
}
impl UnionType {
fn display(&self, f: &mut std::fmt::Formatter<'_>, store: &TypeStore) -> std::fmt::Result {
f.write_str("(")?;
let mut first = true;
for ty in &self.elements {
if !first {
f.write_str(" | ")?;
};
first = false;
write!(f, "{}", ty.display(store))?;
}
f.write_str(")")
}
}
// Negation types aren't expressible in annotations, and are most likely to arise from type
// narrowing along with intersections (e.g. `if not isinstance(...)`), so we represent them
// directly in intersections rather than as a separate type. This sacrifices some efficiency in the
// case where a Not appears outside an intersection (unclear when that could even happen, but we'd
// have to represent it as a single-element intersection if it did) in exchange for better
// efficiency in the within-intersection case.
#[derive(Debug)]
pub(crate) struct IntersectionType {
// the intersection type includes only values in all of these types
positive: FxIndexSet<Type>,
// the intersection type does not include any value in any of these types
negative: FxIndexSet<Type>,
}
impl IntersectionType {
fn display(&self, f: &mut std::fmt::Formatter<'_>, store: &TypeStore) -> std::fmt::Result {
f.write_str("(")?;
let mut first = true;
for (neg, ty) in self
.positive
.iter()
.map(|ty| (false, ty))
.chain(self.negative.iter().map(|ty| (true, ty)))
{
if !first {
f.write_str(" & ")?;
};
first = false;
if neg {
f.write_str("~")?;
};
write!(f, "{}", ty.display(store))?;
}
f.write_str(")")
}
}
#[cfg(test)]
mod tests {
use std::path::Path;
use crate::files::Files;
use crate::symbols::{SymbolFlags, SymbolTable};
use crate::types::{Type, TypeStore};
use crate::FxIndexSet;
#[test]
fn add_class() {
let store = TypeStore::default();
let files = Files::default();
let file_id = files.intern(Path::new("/foo"));
let id = store.add_class(file_id, "C", SymbolTable::root_scope_id(), Vec::new());
assert_eq!(store.get_class(id).name(), "C");
let inst = Type::Instance(id);
assert_eq!(format!("{}", inst.display(&store)), "C");
}
#[test]
fn add_function() {
let store = TypeStore::default();
let files = Files::default();
let file_id = files.intern(Path::new("/foo"));
let mut table = SymbolTable::new();
let func_symbol = table.add_or_update_symbol(
SymbolTable::root_scope_id(),
"func",
SymbolFlags::IS_DEFINED,
);
let id = store.add_function(
file_id,
"func",
func_symbol,
SymbolTable::root_scope_id(),
vec![Type::Unknown],
);
assert_eq!(store.get_function(id).name(), "func");
assert_eq!(store.get_function(id).decorators(), vec![Type::Unknown]);
let func = Type::Function(id);
assert_eq!(format!("{}", func.display(&store)), "func");
}
#[test]
fn add_union() {
let mut store = TypeStore::default();
let files = Files::default();
let file_id = files.intern(Path::new("/foo"));
let c1 = store.add_class(file_id, "C1", SymbolTable::root_scope_id(), Vec::new());
let c2 = store.add_class(file_id, "C2", SymbolTable::root_scope_id(), Vec::new());
let elems = vec![Type::Instance(c1), Type::Instance(c2)];
let id = store.add_union(file_id, &elems);
assert_eq!(
store.get_union(id).elements,
elems.into_iter().collect::<FxIndexSet<_>>()
);
let union = Type::Union(id);
assert_eq!(format!("{}", union.display(&store)), "(C1 | C2)");
}
#[test]
fn add_intersection() {
let mut store = TypeStore::default();
let files = Files::default();
let file_id = files.intern(Path::new("/foo"));
let c1 = store.add_class(file_id, "C1", SymbolTable::root_scope_id(), Vec::new());
let c2 = store.add_class(file_id, "C2", SymbolTable::root_scope_id(), Vec::new());
let c3 = store.add_class(file_id, "C3", SymbolTable::root_scope_id(), Vec::new());
let pos = vec![Type::Instance(c1), Type::Instance(c2)];
let neg = vec![Type::Instance(c3)];
let id = store.add_intersection(file_id, &pos, &neg);
assert_eq!(
store.get_intersection(id).positive,
pos.into_iter().collect::<FxIndexSet<_>>()
);
assert_eq!(
store.get_intersection(id).negative,
neg.into_iter().collect::<FxIndexSet<_>>()
);
let intersection = Type::Intersection(id);
assert_eq!(
format!("{}", intersection.display(&store)),
"(C1 & C2 & ~C3)"
);
}
}

View File

@@ -0,0 +1,292 @@
#![allow(dead_code)]
use ruff_python_ast as ast;
use ruff_python_ast::AstNode;
use crate::db::{QueryResult, SemanticDb, SemanticJar};
use crate::module::ModuleName;
use crate::parse::parse;
use crate::symbols::{
resolve_global_symbol, symbol_table, Definition, GlobalSymbolId, ImportFromDefinition,
};
use crate::types::Type;
use crate::FileId;
// FIXME: Figure out proper dead-lock free synchronisation now that this takes `&db` instead of `&mut db`.
#[tracing::instrument(level = "trace", skip(db))]
pub fn infer_symbol_type(db: &dyn SemanticDb, symbol: GlobalSymbolId) -> QueryResult<Type> {
let symbols = symbol_table(db, symbol.file_id)?;
let defs = symbols.definitions(symbol.symbol_id);
let jar: &SemanticJar = db.jar()?;
if let Some(ty) = jar.type_store.get_cached_symbol_type(symbol) {
return Ok(ty);
}
// TODO handle multiple defs, conditional defs...
assert_eq!(defs.len(), 1);
let ty = infer_definition_type(db, symbol, defs[0].clone())?;
jar.type_store.cache_symbol_type(symbol, ty);
// TODO record dependencies
Ok(ty)
}
#[tracing::instrument(level = "trace", skip(db))]
pub fn infer_definition_type(
db: &dyn SemanticDb,
symbol: GlobalSymbolId,
definition: Definition,
) -> QueryResult<Type> {
let jar: &SemanticJar = db.jar()?;
let type_store = &jar.type_store;
let file_id = symbol.file_id;
match definition {
Definition::ImportFrom(ImportFromDefinition {
module,
name,
level,
}) => {
// TODO relative imports
assert!(matches!(level, 0));
let module_name = ModuleName::new(module.as_ref().expect("TODO relative imports"));
if let Some(remote_symbol) = resolve_global_symbol(db, module_name, &name)? {
infer_symbol_type(db, remote_symbol)
} else {
Ok(Type::Unknown)
}
}
Definition::ClassDef(node_key) => {
if let Some(ty) = type_store.get_cached_node_type(file_id, node_key.erased()) {
Ok(ty)
} else {
let parsed = parse(db.upcast(), file_id)?;
let ast = parsed.ast();
let table = symbol_table(db, file_id)?;
let node = node_key.resolve_unwrap(ast.as_any_node_ref());
let mut bases = Vec::with_capacity(node.bases().len());
for base in node.bases() {
bases.push(infer_expr_type(db, file_id, base)?);
}
let scope_id = table.scope_id_for_node(node_key.erased());
let ty = Type::Class(type_store.add_class(file_id, &node.name.id, scope_id, bases));
type_store.cache_node_type(file_id, *node_key.erased(), ty);
Ok(ty)
}
}
Definition::FunctionDef(node_key) => {
if let Some(ty) = type_store.get_cached_node_type(file_id, node_key.erased()) {
Ok(ty)
} else {
let parsed = parse(db.upcast(), file_id)?;
let ast = parsed.ast();
let table = symbol_table(db, file_id)?;
let node = node_key
.resolve(ast.as_any_node_ref())
.expect("node key should resolve");
let decorator_tys = node
.decorator_list
.iter()
.map(|decorator| infer_expr_type(db, file_id, &decorator.expression))
.collect::<QueryResult<_>>()?;
let scope_id = table.scope_id_for_node(node_key.erased());
let ty = type_store
.add_function(
file_id,
&node.name.id,
symbol.symbol_id,
scope_id,
decorator_tys,
)
.into();
type_store.cache_node_type(file_id, *node_key.erased(), ty);
Ok(ty)
}
}
Definition::Assignment(node_key) => {
let parsed = parse(db.upcast(), file_id)?;
let ast = parsed.ast();
let node = node_key.resolve_unwrap(ast.as_any_node_ref());
// TODO handle unpacking assignment correctly
infer_expr_type(db, file_id, &node.value)
}
_ => todo!("other kinds of definitions"),
}
}
fn infer_expr_type(db: &dyn SemanticDb, file_id: FileId, expr: &ast::Expr) -> QueryResult<Type> {
// TODO cache the resolution of the type on the node
let symbols = symbol_table(db, file_id)?;
match expr {
ast::Expr::Name(name) => {
// TODO look up in the correct scope, don't assume global
if let Some(symbol_id) = symbols.root_symbol_id_by_name(&name.id) {
infer_symbol_type(db, GlobalSymbolId { file_id, symbol_id })
} else {
Ok(Type::Unknown)
}
}
_ => todo!("full expression type resolution"),
}
}
#[cfg(test)]
mod tests {
use crate::db::tests::TestDb;
use crate::db::{HasJar, SemanticJar};
use crate::module::{
resolve_module, set_module_search_paths, ModuleName, ModuleSearchPath, ModuleSearchPathKind,
};
use crate::symbols::{symbol_table, GlobalSymbolId};
use crate::types::{infer_symbol_type, Type};
use crate::Name;
// TODO with virtual filesystem we shouldn't have to write files to disk for these
// tests
struct TestCase {
temp_dir: tempfile::TempDir,
db: TestDb,
src: ModuleSearchPath,
}
fn create_test() -> std::io::Result<TestCase> {
let temp_dir = tempfile::tempdir()?;
let src = temp_dir.path().join("src");
std::fs::create_dir(&src)?;
let src = ModuleSearchPath::new(src.canonicalize()?, ModuleSearchPathKind::FirstParty);
let roots = vec![src.clone()];
let mut db = TestDb::default();
set_module_search_paths(&mut db, roots);
Ok(TestCase { temp_dir, db, src })
}
#[test]
fn follow_import_to_class() -> anyhow::Result<()> {
let case = create_test()?;
let db = &case.db;
let a_path = case.src.path().join("a.py");
let b_path = case.src.path().join("b.py");
std::fs::write(a_path, "from b import C as D; E = D")?;
std::fs::write(b_path, "class C: pass")?;
let a_file = resolve_module(db, ModuleName::new("a"))?
.expect("module should be found")
.path(db)?
.file();
let a_syms = symbol_table(db, a_file)?;
let e_sym = a_syms
.root_symbol_id_by_name("E")
.expect("E symbol should be found");
let ty = infer_symbol_type(
db,
GlobalSymbolId {
file_id: a_file,
symbol_id: e_sym,
},
)?;
let jar = HasJar::<SemanticJar>::jar(db)?;
assert!(matches!(ty, Type::Class(_)));
assert_eq!(format!("{}", ty.display(&jar.type_store)), "Literal[C]");
Ok(())
}
#[test]
fn resolve_base_class_by_name() -> anyhow::Result<()> {
let case = create_test()?;
let db = &case.db;
let path = case.src.path().join("mod.py");
std::fs::write(path, "class Base: pass\nclass Sub(Base): pass")?;
let file = resolve_module(db, ModuleName::new("mod"))?
.expect("module should be found")
.path(db)?
.file();
let syms = symbol_table(db, file)?;
let sym = syms
.root_symbol_id_by_name("Sub")
.expect("Sub symbol should be found");
let ty = infer_symbol_type(
db,
GlobalSymbolId {
file_id: file,
symbol_id: sym,
},
)?;
let Type::Class(class_id) = ty else {
panic!("Sub is not a Class")
};
let jar = HasJar::<SemanticJar>::jar(db)?;
let base_names: Vec<_> = jar
.type_store
.get_class(class_id)
.bases()
.iter()
.map(|base_ty| format!("{}", base_ty.display(&jar.type_store)))
.collect();
assert_eq!(base_names, vec!["Literal[Base]"]);
Ok(())
}
#[test]
fn resolve_method() -> anyhow::Result<()> {
let case = create_test()?;
let db = &case.db;
let path = case.src.path().join("mod.py");
std::fs::write(path, "class C:\n def f(self): pass")?;
let file = resolve_module(db, ModuleName::new("mod"))?
.expect("module should be found")
.path(db)?
.file();
let syms = symbol_table(db, file)?;
let sym = syms
.root_symbol_id_by_name("C")
.expect("C symbol should be found");
let ty = infer_symbol_type(
db,
GlobalSymbolId {
file_id: file,
symbol_id: sym,
},
)?;
let Type::Class(class_id) = ty else {
panic!("C is not a Class");
};
let member_ty = class_id
.get_own_class_member(db, &Name::new("f"))
.expect("C.f to resolve");
let Some(Type::Function(func_id)) = member_ty else {
panic!("C.f is not a Function");
};
let jar = HasJar::<SemanticJar>::jar(db)?;
let function = jar.type_store.get_function(func_id);
assert_eq!(function.name(), "f");
Ok(())
}
}

View File

@@ -1,10 +1,10 @@
use std::path::Path;
use anyhow::Context;
use notify::event::{CreateKind, ModifyKind, RemoveKind};
use notify::event::{CreateKind, RemoveKind};
use notify::{recommended_watcher, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use ruff_db::system::{SystemPath, SystemPathBuf};
use crate::program::{FileChangeKind, FileWatcherChange};
pub struct FileWatcher {
watcher: RecommendedWatcher,
@@ -33,25 +33,12 @@ impl FileWatcher {
}
fn from_handler(handler: Box<dyn EventHandler>) -> anyhow::Result<Self> {
let watcher = recommended_watcher(move |event: notify::Result<Event>| {
match event {
let watcher = recommended_watcher(move |changes: notify::Result<Event>| {
match changes {
Ok(event) => {
// TODO verify that this handles all events correctly
let change_kind = match event.kind {
EventKind::Create(CreateKind::File) => FileChangeKind::Created,
EventKind::Modify(ModifyKind::Name(notify::event::RenameMode::From)) => {
FileChangeKind::Deleted
}
EventKind::Modify(ModifyKind::Name(notify::event::RenameMode::To)) => {
FileChangeKind::Created
}
EventKind::Modify(ModifyKind::Name(notify::event::RenameMode::Any)) => {
// TODO Introduce a better catch all event for cases that we don't understand.
FileChangeKind::Created
}
EventKind::Modify(ModifyKind::Name(notify::event::RenameMode::Both)) => {
todo!("Handle both create and delete event.");
}
EventKind::Modify(_) => FileChangeKind::Modified,
EventKind::Remove(RemoveKind::File) => FileChangeKind::Deleted,
_ => {
@@ -62,9 +49,8 @@ impl FileWatcher {
let mut changes = Vec::new();
for path in event.paths {
if let Some(fs_path) = SystemPath::from_std_path(&path) {
changes
.push(FileWatcherChange::new(fs_path.to_path_buf(), change_kind));
if path.is_file() {
changes.push(FileWatcherChange::new(path, change_kind));
}
}
@@ -89,23 +75,3 @@ impl FileWatcher {
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct FileWatcherChange {
pub path: SystemPathBuf,
#[allow(unused)]
pub kind: FileChangeKind,
}
impl FileWatcherChange {
pub fn new(path: SystemPathBuf, kind: FileChangeKind) -> Self {
Self { path, kind }
}
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum FileChangeKind {
Created,
Modified,
Deleted,
}

View File

@@ -1,344 +0,0 @@
// TODO: Fix clippy warnings created by salsa macros
#![allow(clippy::used_underscore_binding)]
use std::{collections::BTreeMap, sync::Arc};
use rustc_hash::{FxBuildHasher, FxHashSet};
pub use metadata::{PackageMetadata, WorkspaceMetadata};
use ruff_db::{
files::{system_path_to_file, File},
system::{walk_directory::WalkState, SystemPath, SystemPathBuf},
};
use ruff_python_ast::{name::Name, PySourceType};
use crate::{
db::Db,
lint::{lint_semantic, lint_syntax, Diagnostics},
};
mod metadata;
/// The project workspace as a Salsa ingredient.
///
/// A workspace consists of one or multiple packages. Packages can be nested. A file in a workspace
/// belongs to no or exactly one package (files can't belong to multiple packages).
///
/// How workspaces and packages are discovered is TBD. For now, a workspace can be any directory,
/// and it always contains a single package which has the same root as the workspace.
///
/// ## Examples
///
/// ```text
/// app-1/
/// pyproject.toml
/// src/
/// ... python files
///
/// app-2/
/// pyproject.toml
/// src/
/// ... python files
///
/// shared/
/// pyproject.toml
/// src/
/// ... python files
///
/// pyproject.toml
/// ```
///
/// The above project structure has three packages: `app-1`, `app-2`, and `shared`.
/// Each of the packages can define their own settings in their `pyproject.toml` file, but
/// they must be compatible. For example, each package can define a different `requires-python` range,
/// but the ranges must overlap.
///
/// ## How is a workspace different from a program?
/// There are two (related) motivations:
///
/// 1. Program is defined in `ruff_db` and it can't reference the settings types for the linter and formatter
/// without introducing a cyclic dependency. The workspace is defined in a higher level crate
/// where it can reference these setting types.
/// 2. Running `ruff check` with different target versions results in different programs (settings) but
/// it remains the same workspace. That's why program is a narrowed view of the workspace only
/// holding on to the most fundamental settings required for checking.
#[salsa::input]
pub struct Workspace {
#[id]
#[return_ref]
root_buf: SystemPathBuf,
/// The files that are open in the workspace.
///
/// Setting the open files to a non-`None` value changes `check` to only check the
/// open files rather than all files in the workspace.
#[return_ref]
open_file_set: Option<Arc<FxHashSet<File>>>,
/// The (first-party) packages in this workspace.
#[return_ref]
package_tree: BTreeMap<SystemPathBuf, Package>,
}
/// A first-party package in a workspace.
#[salsa::input]
pub struct Package {
#[return_ref]
pub name: Name,
/// The path to the root directory of the package.
#[id]
#[return_ref]
root_buf: SystemPathBuf,
/// The files that are part of this package.
#[return_ref]
file_set: Arc<FxHashSet<File>>,
// TODO: Add the loaded settings.
}
impl Workspace {
/// Discovers the closest workspace at `path` and returns its metadata.
pub fn from_metadata(db: &dyn Db, metadata: WorkspaceMetadata) -> Self {
let mut packages = BTreeMap::new();
for package in metadata.packages {
packages.insert(package.root.clone(), Package::from_metadata(db, package));
}
Workspace::new(db, metadata.root, None, packages)
}
pub fn root(self, db: &dyn Db) -> &SystemPath {
self.root_buf(db)
}
pub fn packages(self, db: &dyn Db) -> impl Iterator<Item = Package> + '_ {
self.package_tree(db).values().copied()
}
pub fn reload(self, db: &mut dyn Db, metadata: WorkspaceMetadata) {
assert_eq!(self.root(db), metadata.root());
let mut old_packages = self.package_tree(db).clone();
let mut new_packages = BTreeMap::new();
for package_metadata in metadata.packages {
let path = package_metadata.root().to_path_buf();
let package = if let Some(old_package) = old_packages.remove(&path) {
old_package.update(db, package_metadata);
old_package
} else {
Package::from_metadata(db, package_metadata)
};
new_packages.insert(path, package);
}
self.set_package_tree(db).to(new_packages);
}
pub fn update_package(self, db: &mut dyn Db, metadata: PackageMetadata) -> anyhow::Result<()> {
let path = metadata.root().to_path_buf();
if let Some(package) = self.package_tree(db).get(&path).copied() {
package.update(db, metadata);
Ok(())
} else {
Err(anyhow::anyhow!("Package {path} not found"))
}
}
/// Returns the closest package to which the first-party `path` belongs.
///
/// Returns `None` if the `path` is outside of any package or if `file` isn't a first-party file
/// (e.g. third-party dependencies or `excluded`).
pub fn package(self, db: &dyn Db, path: &SystemPath) -> Option<Package> {
let packages = self.package_tree(db);
let (package_path, package) = packages.range(..path.to_path_buf()).next_back()?;
if path.starts_with(package_path) {
Some(*package)
} else {
None
}
}
/// Checks all open files in the workspace and its dependencies.
#[tracing::instrument(level = "debug", skip_all)]
pub fn check(self, db: &dyn Db) -> Vec<String> {
let mut result = Vec::new();
if let Some(open_files) = self.open_files(db) {
for file in open_files {
result.extend_from_slice(&check_file(db, *file));
}
} else {
for package in self.packages(db) {
result.extend(package.check(db));
}
}
result
}
/// Opens a file in the workspace.
///
/// This changes the behavior of `check` to only check the open files rather than all files in the workspace.
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn open_file(self, db: &mut dyn Db, file: File) {
let mut open_files = self.take_open_files(db);
open_files.insert(file);
self.set_open_files(db, open_files);
}
/// Closes a file in the workspace.
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn close_file(self, db: &mut dyn Db, file: File) -> bool {
let mut open_files = self.take_open_files(db);
let removed = open_files.remove(&file);
if removed {
self.set_open_files(db, open_files);
}
removed
}
/// Returns the open files in the workspace or `None` if the entire workspace should be checked.
pub fn open_files(self, db: &dyn Db) -> Option<&FxHashSet<File>> {
self.open_file_set(db).as_deref()
}
/// Sets the open files in the workspace.
///
/// This changes the behavior of `check` to only check the open files rather than all files in the workspace.
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn set_open_files(self, db: &mut dyn Db, open_files: FxHashSet<File>) {
self.set_open_file_set(db).to(Some(Arc::new(open_files)));
}
/// This takes the open files from the workspace and returns them.
///
/// This changes the behavior of `check` to check all files in the workspace instead of just the open files.
pub fn take_open_files(self, db: &mut dyn Db) -> FxHashSet<File> {
let open_files = self.open_file_set(db).clone();
if let Some(open_files) = open_files {
// Salsa will cancel any pending queries and remove its own reference to `open_files`
// so that the reference counter to `open_files` now drops to 1.
self.set_open_file_set(db).to(None);
Arc::try_unwrap(open_files).unwrap()
} else {
FxHashSet::default()
}
}
}
impl Package {
pub fn root(self, db: &dyn Db) -> &SystemPath {
self.root_buf(db)
}
/// Returns `true` if `file` is a first-party file part of this package.
pub fn contains_file(self, db: &dyn Db, file: File) -> bool {
self.files(db).contains(&file)
}
pub fn files(self, db: &dyn Db) -> &FxHashSet<File> {
self.file_set(db)
}
pub fn remove_file(self, db: &mut dyn Db, file: File) -> bool {
let mut files_arc = self.file_set(db).clone();
// Set a dummy value. Salsa will cancel any pending queries and remove its own reference to `files`
// so that the reference counter to `files` now drops to 1.
self.set_file_set(db).to(Arc::new(FxHashSet::default()));
let files = Arc::get_mut(&mut files_arc).unwrap();
let removed = files.remove(&file);
self.set_file_set(db).to(files_arc);
removed
}
pub(crate) fn check(self, db: &dyn Db) -> Vec<String> {
let mut result = Vec::new();
for file in self.files(db) {
let diagnostics = check_file(db, *file);
result.extend_from_slice(&diagnostics);
}
result
}
fn from_metadata(db: &dyn Db, metadata: PackageMetadata) -> Self {
let files = discover_package_files(db, metadata.root());
Self::new(db, metadata.name, metadata.root, Arc::new(files))
}
fn update(self, db: &mut dyn Db, metadata: PackageMetadata) {
let root = self.root(db);
assert_eq!(root, metadata.root());
let files = discover_package_files(db, root);
self.set_name(db).to(metadata.name);
self.set_file_set(db).to(Arc::new(files));
}
}
pub(super) fn check_file(db: &dyn Db, file: File) -> Diagnostics {
let mut diagnostics = Vec::new();
diagnostics.extend_from_slice(lint_syntax(db, file));
diagnostics.extend_from_slice(lint_semantic(db, file));
Diagnostics::from(diagnostics)
}
fn discover_package_files(db: &dyn Db, path: &SystemPath) -> FxHashSet<File> {
let paths = std::sync::Mutex::new(Vec::new());
db.system().walk_directory(path).run(|| {
Box::new(|entry| {
match entry {
Ok(entry) => {
// Skip over any non python files to avoid creating too many entries in `Files`.
if entry.file_type().is_file()
&& entry
.path()
.extension()
.and_then(PySourceType::try_from_extension)
.is_some()
{
let mut paths = paths.lock().unwrap();
paths.push(entry.into_path());
}
}
Err(error) => {
// TODO Handle error
tracing::error!("Failed to walk path: {error}");
}
}
WalkState::Continue
})
});
let paths = paths.into_inner().unwrap();
let mut files = FxHashSet::with_capacity_and_hasher(paths.len(), FxBuildHasher);
for path in paths {
// If this returns `None`, then the file was deleted between the `walk_directory` call and now.
// We can ignore this.
if let Some(file) = system_path_to_file(db.upcast(), &path) {
files.insert(file);
}
}
files
}

View File

@@ -1,68 +0,0 @@
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_python_ast::name::Name;
#[derive(Debug)]
pub struct WorkspaceMetadata {
pub(super) root: SystemPathBuf,
/// The (first-party) packages in this workspace.
pub(super) packages: Vec<PackageMetadata>,
}
/// A first-party package in a workspace.
#[derive(Debug)]
pub struct PackageMetadata {
pub(super) name: Name,
/// The path to the root directory of the package.
pub(super) root: SystemPathBuf,
// TODO: Add the loaded package configuration (not the nested ruff settings)
}
impl WorkspaceMetadata {
/// Discovers the closest workspace at `path` and returns its metadata.
pub fn from_path(path: &SystemPath, system: &dyn System) -> anyhow::Result<WorkspaceMetadata> {
let root = if system.is_file(path) {
path.parent().unwrap().to_path_buf()
} else {
path.to_path_buf()
};
if !system.is_directory(&root) {
anyhow::bail!("no workspace found at {:?}", root);
}
// TODO: Discover package name from `pyproject.toml`.
let package_name: Name = path.file_name().unwrap_or("<root>").into();
let package = PackageMetadata {
name: package_name,
root: root.clone(),
};
let workspace = WorkspaceMetadata {
root,
packages: vec![package],
};
Ok(workspace)
}
pub fn root(&self) -> &SystemPath {
&self.root
}
pub fn packages(&self) -> &[PackageMetadata] {
&self.packages
}
}
impl PackageMetadata {
pub fn name(&self) -> &Name {
&self.name
}
pub fn root(&self) -> &SystemPath {
&self.root
}
}

View File

@@ -0,0 +1 @@
2d33fe212221a05661c0db5215a91cf3d7b7f072

View File

@@ -34,9 +34,6 @@ _dummy_thread: 3.0-3.8
_dummy_threading: 3.0-3.8
_heapq: 3.0-
_imp: 3.0-
_interpchannels: 3.13-
_interpqueues: 3.13-
_interpreters: 3.13-
_json: 3.0-
_locale: 3.0-
_lsprof: 3.0-
@@ -68,9 +65,9 @@ array: 3.0-
ast: 3.0-
asynchat: 3.0-3.11
asyncio: 3.4-
asyncio.mixins: 3.10-
asyncio.exceptions: 3.8-
asyncio.format_helpers: 3.7-
asyncio.mixins: 3.10-
asyncio.runners: 3.7-
asyncio.staggered: 3.8-
asyncio.taskgroups: 3.11-
@@ -114,7 +111,6 @@ curses: 3.0-
dataclasses: 3.7-
datetime: 3.0-
dbm: 3.0-
dbm.sqlite3: 3.13-
decimal: 3.0-
difflib: 3.0-
dis: 3.0-
@@ -158,7 +154,6 @@ importlib: 3.0-
importlib._abc: 3.10-
importlib.metadata: 3.8-
importlib.metadata._meta: 3.10-
importlib.metadata.diagnose: 3.13-
importlib.readers: 3.10-
importlib.resources: 3.7-
importlib.resources.abc: 3.11-
@@ -171,7 +166,7 @@ ipaddress: 3.3-
itertools: 3.0-
json: 3.0-
keyword: 3.0-
lib2to3: 3.0-3.12
lib2to3: 3.0-
linecache: 3.0-
locale: 3.0-
logging: 3.0-
@@ -275,7 +270,6 @@ threading: 3.0-
time: 3.0-
timeit: 3.0-
tkinter: 3.0-
tkinter.tix: 3.0-3.12
token: 3.0-
tokenize: 3.0-
tomllib: 3.11-

View File

@@ -0,0 +1,591 @@
import sys
import typing_extensions
from typing import Any, ClassVar, Literal
PyCF_ONLY_AST: Literal[1024]
PyCF_TYPE_COMMENTS: Literal[4096]
PyCF_ALLOW_TOP_LEVEL_AWAIT: Literal[8192]
# Alias used for fields that must always be valid identifiers
# A string `x` counts as a valid identifier if both the following are True
# (1) `x.isidentifier()` evaluates to `True`
# (2) `keyword.iskeyword(x)` evaluates to `False`
_Identifier: typing_extensions.TypeAlias = str
class AST:
if sys.version_info >= (3, 10):
__match_args__ = ()
_attributes: ClassVar[tuple[str, ...]]
_fields: ClassVar[tuple[str, ...]]
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
# TODO: Not all nodes have all of the following attributes
lineno: int
col_offset: int
end_lineno: int | None
end_col_offset: int | None
type_comment: str | None
class mod(AST): ...
class type_ignore(AST): ...
class TypeIgnore(type_ignore):
if sys.version_info >= (3, 10):
__match_args__ = ("lineno", "tag")
tag: str
class FunctionType(mod):
if sys.version_info >= (3, 10):
__match_args__ = ("argtypes", "returns")
argtypes: list[expr]
returns: expr
class Module(mod):
if sys.version_info >= (3, 10):
__match_args__ = ("body", "type_ignores")
body: list[stmt]
type_ignores: list[TypeIgnore]
class Interactive(mod):
if sys.version_info >= (3, 10):
__match_args__ = ("body",)
body: list[stmt]
class Expression(mod):
if sys.version_info >= (3, 10):
__match_args__ = ("body",)
body: expr
class stmt(AST): ...
class FunctionDef(stmt):
if sys.version_info >= (3, 12):
__match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment", "type_params")
elif sys.version_info >= (3, 10):
__match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment")
name: _Identifier
args: arguments
body: list[stmt]
decorator_list: list[expr]
returns: expr | None
if sys.version_info >= (3, 12):
type_params: list[type_param]
class AsyncFunctionDef(stmt):
if sys.version_info >= (3, 12):
__match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment", "type_params")
elif sys.version_info >= (3, 10):
__match_args__ = ("name", "args", "body", "decorator_list", "returns", "type_comment")
name: _Identifier
args: arguments
body: list[stmt]
decorator_list: list[expr]
returns: expr | None
if sys.version_info >= (3, 12):
type_params: list[type_param]
class ClassDef(stmt):
if sys.version_info >= (3, 12):
__match_args__ = ("name", "bases", "keywords", "body", "decorator_list", "type_params")
elif sys.version_info >= (3, 10):
__match_args__ = ("name", "bases", "keywords", "body", "decorator_list")
name: _Identifier
bases: list[expr]
keywords: list[keyword]
body: list[stmt]
decorator_list: list[expr]
if sys.version_info >= (3, 12):
type_params: list[type_param]
class Return(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("value",)
value: expr | None
class Delete(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("targets",)
targets: list[expr]
class Assign(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("targets", "value", "type_comment")
targets: list[expr]
value: expr
class AugAssign(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("target", "op", "value")
target: Name | Attribute | Subscript
op: operator
value: expr
class AnnAssign(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("target", "annotation", "value", "simple")
target: Name | Attribute | Subscript
annotation: expr
value: expr | None
simple: int
class For(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("target", "iter", "body", "orelse", "type_comment")
target: expr
iter: expr
body: list[stmt]
orelse: list[stmt]
class AsyncFor(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("target", "iter", "body", "orelse", "type_comment")
target: expr
iter: expr
body: list[stmt]
orelse: list[stmt]
class While(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("test", "body", "orelse")
test: expr
body: list[stmt]
orelse: list[stmt]
class If(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("test", "body", "orelse")
test: expr
body: list[stmt]
orelse: list[stmt]
class With(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("items", "body", "type_comment")
items: list[withitem]
body: list[stmt]
class AsyncWith(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("items", "body", "type_comment")
items: list[withitem]
body: list[stmt]
class Raise(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("exc", "cause")
exc: expr | None
cause: expr | None
class Try(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("body", "handlers", "orelse", "finalbody")
body: list[stmt]
handlers: list[ExceptHandler]
orelse: list[stmt]
finalbody: list[stmt]
if sys.version_info >= (3, 11):
class TryStar(stmt):
__match_args__ = ("body", "handlers", "orelse", "finalbody")
body: list[stmt]
handlers: list[ExceptHandler]
orelse: list[stmt]
finalbody: list[stmt]
class Assert(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("test", "msg")
test: expr
msg: expr | None
class Import(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("names",)
names: list[alias]
class ImportFrom(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("module", "names", "level")
module: str | None
names: list[alias]
level: int
class Global(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("names",)
names: list[_Identifier]
class Nonlocal(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("names",)
names: list[_Identifier]
class Expr(stmt):
if sys.version_info >= (3, 10):
__match_args__ = ("value",)
value: expr
class Pass(stmt): ...
class Break(stmt): ...
class Continue(stmt): ...
class expr(AST): ...
class BoolOp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("op", "values")
op: boolop
values: list[expr]
class BinOp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("left", "op", "right")
left: expr
op: operator
right: expr
class UnaryOp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("op", "operand")
op: unaryop
operand: expr
class Lambda(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("args", "body")
args: arguments
body: expr
class IfExp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("test", "body", "orelse")
test: expr
body: expr
orelse: expr
class Dict(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("keys", "values")
keys: list[expr | None]
values: list[expr]
class Set(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("elts",)
elts: list[expr]
class ListComp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("elt", "generators")
elt: expr
generators: list[comprehension]
class SetComp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("elt", "generators")
elt: expr
generators: list[comprehension]
class DictComp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("key", "value", "generators")
key: expr
value: expr
generators: list[comprehension]
class GeneratorExp(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("elt", "generators")
elt: expr
generators: list[comprehension]
class Await(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value",)
value: expr
class Yield(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value",)
value: expr | None
class YieldFrom(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value",)
value: expr
class Compare(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("left", "ops", "comparators")
left: expr
ops: list[cmpop]
comparators: list[expr]
class Call(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("func", "args", "keywords")
func: expr
args: list[expr]
keywords: list[keyword]
class FormattedValue(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value", "conversion", "format_spec")
value: expr
conversion: int
format_spec: expr | None
class JoinedStr(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("values",)
values: list[expr]
class Constant(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value", "kind")
value: Any # None, str, bytes, bool, int, float, complex, Ellipsis
kind: str | None
# Aliases for value, for backwards compatibility
s: Any
n: int | float | complex
class NamedExpr(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("target", "value")
target: Name
value: expr
class Attribute(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value", "attr", "ctx")
value: expr
attr: _Identifier
ctx: expr_context
if sys.version_info >= (3, 9):
_Slice: typing_extensions.TypeAlias = expr
else:
class slice(AST): ...
_Slice: typing_extensions.TypeAlias = slice
class Slice(_Slice):
if sys.version_info >= (3, 10):
__match_args__ = ("lower", "upper", "step")
lower: expr | None
upper: expr | None
step: expr | None
if sys.version_info < (3, 9):
class ExtSlice(slice):
dims: list[slice]
class Index(slice):
value: expr
class Subscript(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value", "slice", "ctx")
value: expr
slice: _Slice
ctx: expr_context
class Starred(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("value", "ctx")
value: expr
ctx: expr_context
class Name(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("id", "ctx")
id: _Identifier
ctx: expr_context
class List(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("elts", "ctx")
elts: list[expr]
ctx: expr_context
class Tuple(expr):
if sys.version_info >= (3, 10):
__match_args__ = ("elts", "ctx")
elts: list[expr]
ctx: expr_context
if sys.version_info >= (3, 9):
dims: list[expr]
class expr_context(AST): ...
if sys.version_info < (3, 9):
class AugLoad(expr_context): ...
class AugStore(expr_context): ...
class Param(expr_context): ...
class Suite(mod):
body: list[stmt]
class Del(expr_context): ...
class Load(expr_context): ...
class Store(expr_context): ...
class boolop(AST): ...
class And(boolop): ...
class Or(boolop): ...
class operator(AST): ...
class Add(operator): ...
class BitAnd(operator): ...
class BitOr(operator): ...
class BitXor(operator): ...
class Div(operator): ...
class FloorDiv(operator): ...
class LShift(operator): ...
class Mod(operator): ...
class Mult(operator): ...
class MatMult(operator): ...
class Pow(operator): ...
class RShift(operator): ...
class Sub(operator): ...
class unaryop(AST): ...
class Invert(unaryop): ...
class Not(unaryop): ...
class UAdd(unaryop): ...
class USub(unaryop): ...
class cmpop(AST): ...
class Eq(cmpop): ...
class Gt(cmpop): ...
class GtE(cmpop): ...
class In(cmpop): ...
class Is(cmpop): ...
class IsNot(cmpop): ...
class Lt(cmpop): ...
class LtE(cmpop): ...
class NotEq(cmpop): ...
class NotIn(cmpop): ...
class comprehension(AST):
if sys.version_info >= (3, 10):
__match_args__ = ("target", "iter", "ifs", "is_async")
target: expr
iter: expr
ifs: list[expr]
is_async: int
class excepthandler(AST): ...
class ExceptHandler(excepthandler):
if sys.version_info >= (3, 10):
__match_args__ = ("type", "name", "body")
type: expr | None
name: _Identifier | None
body: list[stmt]
class arguments(AST):
if sys.version_info >= (3, 10):
__match_args__ = ("posonlyargs", "args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults")
posonlyargs: list[arg]
args: list[arg]
vararg: arg | None
kwonlyargs: list[arg]
kw_defaults: list[expr | None]
kwarg: arg | None
defaults: list[expr]
class arg(AST):
if sys.version_info >= (3, 10):
__match_args__ = ("arg", "annotation", "type_comment")
arg: _Identifier
annotation: expr | None
class keyword(AST):
if sys.version_info >= (3, 10):
__match_args__ = ("arg", "value")
arg: _Identifier | None
value: expr
class alias(AST):
if sys.version_info >= (3, 10):
__match_args__ = ("name", "asname")
name: str
asname: _Identifier | None
class withitem(AST):
if sys.version_info >= (3, 10):
__match_args__ = ("context_expr", "optional_vars")
context_expr: expr
optional_vars: expr | None
if sys.version_info >= (3, 10):
class Match(stmt):
__match_args__ = ("subject", "cases")
subject: expr
cases: list[match_case]
class pattern(AST): ...
# Without the alias, Pyright complains variables named pattern are recursively defined
_Pattern: typing_extensions.TypeAlias = pattern
class match_case(AST):
__match_args__ = ("pattern", "guard", "body")
pattern: _Pattern
guard: expr | None
body: list[stmt]
class MatchValue(pattern):
__match_args__ = ("value",)
value: expr
class MatchSingleton(pattern):
__match_args__ = ("value",)
value: Literal[True, False] | None
class MatchSequence(pattern):
__match_args__ = ("patterns",)
patterns: list[pattern]
class MatchStar(pattern):
__match_args__ = ("name",)
name: _Identifier | None
class MatchMapping(pattern):
__match_args__ = ("keys", "patterns", "rest")
keys: list[expr]
patterns: list[pattern]
rest: _Identifier | None
class MatchClass(pattern):
__match_args__ = ("cls", "patterns", "kwd_attrs", "kwd_patterns")
cls: expr
patterns: list[pattern]
kwd_attrs: list[_Identifier]
kwd_patterns: list[pattern]
class MatchAs(pattern):
__match_args__ = ("pattern", "name")
pattern: _Pattern | None
name: _Identifier | None
class MatchOr(pattern):
__match_args__ = ("patterns",)
patterns: list[pattern]
if sys.version_info >= (3, 12):
class type_param(AST):
end_lineno: int
end_col_offset: int
class TypeVar(type_param):
__match_args__ = ("name", "bound")
name: _Identifier
bound: expr | None
class ParamSpec(type_param):
__match_args__ = ("name",)
name: _Identifier
class TypeVarTuple(type_param):
__match_args__ = ("name",)
name: _Identifier
class TypeAlias(stmt):
__match_args__ = ("name", "type_params", "value")
name: Name
type_params: list[type_param]
value: expr

View File

@@ -70,8 +70,6 @@ _VT_co = TypeVar("_VT_co", covariant=True) # Value type covariant containers.
@final
class dict_keys(KeysView[_KT_co], Generic[_KT_co, _VT_co]): # undocumented
def __eq__(self, value: object, /) -> bool: ...
if sys.version_info >= (3, 13):
def isdisjoint(self, other: Iterable[_KT_co], /) -> bool: ...
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
@@ -85,8 +83,6 @@ class dict_values(ValuesView[_VT_co], Generic[_KT_co, _VT_co]): # undocumented
@final
class dict_items(ItemsView[_KT_co, _VT_co]): # undocumented
def __eq__(self, value: object, /) -> bool: ...
if sys.version_info >= (3, 13):
def isdisjoint(self, other: Iterable[tuple[_KT_co, _VT_co]], /) -> bool: ...
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...

View File

@@ -64,6 +64,7 @@ class _CData(metaclass=_CDataMeta):
# Structure.from_buffer(...) # valid at runtime
# Structure(...).from_buffer(...) # invalid at runtime
#
@classmethod
def from_buffer(cls, source: WriteableBuffer, offset: int = ...) -> Self: ...
@classmethod
@@ -99,8 +100,8 @@ class _Pointer(_PointerLike, _CData, Generic[_CT]):
def __getitem__(self, key: slice, /) -> list[Any]: ...
def __setitem__(self, key: int, value: Any, /) -> None: ...
def POINTER(type: type[_CT], /) -> type[_Pointer[_CT]]: ...
def pointer(obj: _CT, /) -> _Pointer[_CT]: ...
def POINTER(type: type[_CT]) -> type[_Pointer[_CT]]: ...
def pointer(arg: _CT, /) -> _Pointer[_CT]: ...
class _CArgObject: ...
@@ -200,11 +201,11 @@ class Array(_CData, Generic[_CT]):
# Sized and _CData prevents using _CDataMeta.
def __len__(self) -> int: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
def addressof(obj: _CData, /) -> int: ...
def alignment(obj_or_type: _CData | type[_CData], /) -> int: ...
def addressof(obj: _CData) -> int: ...
def alignment(obj_or_type: _CData | type[_CData]) -> int: ...
def get_errno() -> int: ...
def resize(obj: _CData, size: int, /) -> None: ...
def set_errno(value: int, /) -> int: ...
def sizeof(obj_or_type: _CData | type[_CData], /) -> int: ...
def resize(obj: _CData, size: int) -> None: ...
def set_errno(value: int) -> int: ...
def sizeof(obj_or_type: _CData | type[_CData]) -> int: ...

View File

@@ -63,7 +63,8 @@ A_COLOR: int
A_DIM: int
A_HORIZONTAL: int
A_INVIS: int
A_ITALIC: int
if sys.platform != "darwin":
A_ITALIC: int
A_LEFT: int
A_LOW: int
A_NORMAL: int

View File

@@ -45,5 +45,5 @@ class make_scanner:
def __init__(self, context: make_scanner) -> None: ...
def __call__(self, string: str, index: int) -> tuple[Any, int]: ...
def encode_basestring_ascii(s: str, /) -> str: ...
def encode_basestring_ascii(s: str) -> str: ...
def scanstring(string: str, end: int, strict: bool = ...) -> tuple[str, int]: ...

View File

@@ -783,7 +783,7 @@ def ntohl(x: int, /) -> int: ... # param & ret val are 32-bit ints
def ntohs(x: int, /) -> int: ... # param & ret val are 16-bit ints
def htonl(x: int, /) -> int: ... # param & ret val are 32-bit ints
def htons(x: int, /) -> int: ... # param & ret val are 16-bit ints
def inet_aton(ip_addr: str, /) -> bytes: ... # ret val 4 bytes in length
def inet_aton(ip_string: str, /) -> bytes: ... # ret val 4 bytes in length
def inet_ntoa(packed_ip: ReadableBuffer, /) -> str: ...
def inet_pton(address_family: int, ip_string: str, /) -> bytes: ...
def inet_ntop(address_family: int, packed_ip: ReadableBuffer, /) -> str: ...
@@ -797,7 +797,7 @@ if sys.platform != "win32":
def socketpair(family: int = ..., type: int = ..., proto: int = ..., /) -> tuple[socket, socket]: ...
def if_nameindex() -> list[tuple[int, str]]: ...
def if_nametoindex(oname: str, /) -> int: ...
def if_nametoindex(name: str, /) -> int: ...
def if_indextoname(index: int, /) -> str: ...
CAPI: object

View File

@@ -64,19 +64,19 @@ UF_NODUMP: Literal[0x00000001]
UF_NOUNLINK: Literal[0x00000010]
UF_OPAQUE: Literal[0x00000008]
def S_IMODE(mode: int, /) -> int: ...
def S_IFMT(mode: int, /) -> int: ...
def S_ISBLK(mode: int, /) -> bool: ...
def S_ISCHR(mode: int, /) -> bool: ...
def S_ISDIR(mode: int, /) -> bool: ...
def S_ISDOOR(mode: int, /) -> bool: ...
def S_ISFIFO(mode: int, /) -> bool: ...
def S_ISLNK(mode: int, /) -> bool: ...
def S_ISPORT(mode: int, /) -> bool: ...
def S_ISREG(mode: int, /) -> bool: ...
def S_ISSOCK(mode: int, /) -> bool: ...
def S_ISWHT(mode: int, /) -> bool: ...
def filemode(mode: int, /) -> str: ...
def S_IMODE(mode: int) -> int: ...
def S_IFMT(mode: int) -> int: ...
def S_ISBLK(mode: int) -> bool: ...
def S_ISCHR(mode: int) -> bool: ...
def S_ISDIR(mode: int) -> bool: ...
def S_ISDOOR(mode: int) -> bool: ...
def S_ISFIFO(mode: int) -> bool: ...
def S_ISLNK(mode: int) -> bool: ...
def S_ISPORT(mode: int) -> bool: ...
def S_ISREG(mode: int) -> bool: ...
def S_ISSOCK(mode: int) -> bool: ...
def S_ISWHT(mode: int) -> bool: ...
def filemode(mode: int) -> str: ...
if sys.platform == "win32":
IO_REPARSE_TAG_SYMLINK: int
@@ -101,17 +101,3 @@ if sys.platform == "win32":
FILE_ATTRIBUTE_SYSTEM: Literal[4]
FILE_ATTRIBUTE_TEMPORARY: Literal[256]
FILE_ATTRIBUTE_VIRTUAL: Literal[65536]
if sys.version_info >= (3, 13):
SF_SETTABLE: Literal[0x3FFF0000]
# https://github.com/python/cpython/issues/114081#issuecomment-2119017790
# SF_RESTRICTED: Literal[0x00080000]
SF_FIRMLINK: Literal[0x00800000]
SF_DATALESS: Literal[0x40000000]
SF_SUPPORTED: Literal[0x9F0000]
SF_SYNTHETIC: Literal[0xC0000000]
UF_TRACKED: Literal[0x00000040]
UF_DATAVAULT: Literal[0x00000080]
UF_SETTABLE: Literal[0x0000FFFF]

View File

@@ -13,7 +13,7 @@ error = RuntimeError
def _count() -> int: ...
@final
class LockType:
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def acquire(self, blocking: bool = ..., timeout: float = ...) -> bool: ...
def release(self) -> None: ...
def locked(self) -> bool: ...
def __enter__(self) -> bool: ...
@@ -22,14 +22,14 @@ class LockType:
) -> None: ...
@overload
def start_new_thread(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]], /) -> int: ...
def start_new_thread(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]]) -> int: ...
@overload
def start_new_thread(function: Callable[..., object], args: tuple[Any, ...], kwargs: dict[str, Any], /) -> int: ...
def start_new_thread(function: Callable[..., object], args: tuple[Any, ...], kwargs: dict[str, Any]) -> int: ...
def interrupt_main() -> None: ...
def exit() -> NoReturn: ...
def allocate_lock() -> LockType: ...
def get_ident() -> int: ...
def stack_size(size: int = 0, /) -> int: ...
def stack_size(size: int = ...) -> int: ...
TIMEOUT_MAX: float

View File

@@ -1,7 +1,5 @@
import sys
from collections.abc import Callable
from typing import Any, ClassVar, Literal, final
from typing_extensions import TypeAlias
# _tkinter is meant to be only used internally by tkinter, but some tkinter
# functions e.g. return _tkinter.Tcl_Obj objects. Tcl_Obj represents a Tcl
@@ -32,8 +30,6 @@ class Tcl_Obj:
class TclError(Exception): ...
_TkinterTraceFunc: TypeAlias = Callable[[tuple[str, ...]], object]
# This class allows running Tcl code. Tkinter uses it internally a lot, and
# it's often handy to drop a piece of Tcl code into a tkinter program. Example:
#
@@ -90,9 +86,6 @@ class TkappType:
def unsetvar(self, *args, **kwargs): ...
def wantobjects(self, *args, **kwargs): ...
def willdispatch(self): ...
if sys.version_info >= (3, 12):
def gettrace(self, /) -> _TkinterTraceFunc | None: ...
def settrace(self, func: _TkinterTraceFunc | None, /) -> None: ...
# These should be kept in sync with tkinter.tix constants, except ALL_EVENTS which doesn't match TCL_ALL_EVENTS
ALL_EVENTS: Literal[-3]

View File

@@ -326,8 +326,6 @@ class structseq(Generic[_T_co]):
# but only has any meaning if you supply it a dict where the keys are strings.
# https://github.com/python/typeshed/pull/6560#discussion_r767149830
def __new__(cls: type[Self], sequence: Iterable[_T_co], dict: dict[str, Any] = ...) -> Self: ...
if sys.version_info >= (3, 13):
def __replace__(self: Self, **kwargs: Any) -> Self: ...
# Superset of typing.AnyStr that also includes LiteralString
AnyOrLiteralStr = TypeVar("AnyOrLiteralStr", str, bytes, LiteralString) # noqa: Y001

Some files were not shown because too many files have changed in this diff Show More