Compare commits

..

3 Commits

Author SHA1 Message Date
Micha Reiser
dc24d01b2e Implicit string concat formatting 2024-02-14 17:54:12 +01:00
Micha Reiser
5a9d656bc4 Extract normalize into its own submodule 2024-02-14 17:22:45 +01:00
Micha Reiser
33184dc6a4 Extract AnyString nodes from string/mod 2024-02-14 17:14:28 +01:00
4315 changed files with 147998 additions and 339506 deletions

View File

@@ -1,10 +1,3 @@
[alias]
dev = "run --package ruff_dev --bin ruff_dev"
benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# statically link the C runtime so the executable does not depend on
# that shared/dynamic library.
#
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]

8
.gitattributes vendored
View File

@@ -2,17 +2,9 @@
crates/ruff_linter/resources/test/fixtures/isort/line_ending_crlf.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W605_1.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_2.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_3.py text eol=crlf
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_crlf.py text eol=crlf
crates/ruff_python_formatter/tests/snapshots/format@docstring_code_examples_crlf.py.snap text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lexing/line_continuation_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_mac_eol.py text eol=cr
crates/ruff_python_parser/resources/inline linguist-generated=true
ruff.schema.json linguist-generated=true text=auto eol=lf
*.md.snap linguist-language=Markdown

16
.github/CODEOWNERS vendored
View File

@@ -5,17 +5,5 @@
# - The '*' pattern is global owners.
# - Order is important. The last matching pattern has the most precedence.
/crates/ruff_notebook/ @dhruvmanila
/crates/ruff_formatter/ @MichaReiser
/crates/ruff_python_formatter/ @MichaReiser
/crates/ruff_python_parser/ @MichaReiser @dhruvmanila
# flake8-pyi
/crates/ruff_linter/src/rules/flake8_pyi/ @AlexWaygood
# Script for fuzzing the parser
/scripts/fuzz-parser/ @AlexWaygood
# red-knot
/crates/red_knot* @carljm @MichaReiser @AlexWaygood
/crates/ruff_db/ @carljm @MichaReiser @AlexWaygood
# Jupyter
/crates/ruff_linter/src/jupyter/ @dhruvmanila

View File

@@ -3,8 +3,6 @@ Thank you for taking the time to report an issue! We're glad to have you involve
If you're filing a bug report, please consider including the following information:
* List of keywords you searched for before creating this issue. Write them down here so that others can find this issue more easily and help provide feedback.
e.g. "RUF001", "unused variable", "Jupyter notebook"
* A minimal code snippet that reproduces the bug.
* The command you invoked (e.g., `ruff /path/to/file.py --fix`), ideally including the `--isolated` flag.
* The current Ruff settings (any relevant sections from your `pyproject.toml`).

21
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
labels: ["internal"]
groups:
actions:
patterns:
- "*"
ignore:
# The latest versions of these are not compatible with our release workflow
- dependency-name: "actions/upload-artifact"
- dependency-name: "actions/download-artifact"
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
labels: ["internal"]

111
.github/renovate.json5 vendored
View File

@@ -1,111 +0,0 @@
{
$schema: "https://docs.renovatebot.com/renovate-schema.json",
dependencyDashboard: true,
suppressNotifications: ["prEditedNotification"],
extends: ["config:recommended"],
labels: ["internal"],
schedule: ["before 4am on Monday"],
semanticCommits: "disabled",
separateMajorMinor: false,
prHourlyLimit: 10,
enabledManagers: ["github-actions", "pre-commit", "cargo", "pep621", "pip_requirements", "npm"],
cargo: {
// See https://docs.renovatebot.com/configuration-options/#rangestrategy
rangeStrategy: "update-lockfile",
},
pep621: {
// The default for this package manager is to only search for `pyproject.toml` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/pep621/#file-matching
fileMatch: ["^(python|scripts)/.*pyproject\\.toml$"],
},
pip_requirements: {
// The default for this package manager is to run on all requirements.txt files:
// https://docs.renovatebot.com/modules/manager/pip_requirements/#file-matching
// `fileMatch` doesn't work for excluding files; to exclude `requirements.txt` files
// outside the `doc/` directory, we instead have to use `ignorePaths`. Unlike `fileMatch`,
// which takes a regex string, `ignorePaths` takes a glob string, so we have to use
// a "negative glob pattern".
// See:
// - https://docs.renovatebot.com/modules/manager/#ignoring-files-that-match-the-default-filematch
// - https://docs.renovatebot.com/configuration-options/#ignorepaths
// - https://docs.renovatebot.com/string-pattern-matching/#negative-matching
ignorePaths: ["!docs/requirements*.txt"]
},
npm: {
// The default for this package manager is to only search for `package.json` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/npm/#file-matching
fileMatch: ["^playground/.*package\\.json$"],
},
"pre-commit": {
enabled: true,
},
packageRules: [
{
// Group upload/download artifact updates, the versions are dependent
groupName: "Artifact GitHub Actions dependencies",
matchManagers: ["github-actions"],
matchDatasources: ["gitea-tags", "github-tags"],
matchPackagePatterns: ["actions/.*-artifact"],
description: "Weekly update of artifact-related GitHub Actions dependencies",
},
{
// This package rule disables updates for GitHub runners:
// we'd only pin them to a specific version
// if there was a deliberate reason to do so
groupName: "GitHub runners",
matchManagers: ["github-actions"],
matchDatasources: ["github-runners"],
description: "Disable PRs updating GitHub runners (e.g. 'runs-on: macos-14')",
enabled: false,
},
{
// Disable updates of `zip-rs`; intentionally pinned for now due to ownership change
// See: https://github.com/astral-sh/uv/issues/3642
matchPackagePatterns: ["zip"],
matchManagers: ["cargo"],
enabled: false,
},
{
// `mkdocs-material` requires a manual update to keep the version in sync
// with `mkdocs-material-insider`.
// See: https://squidfunk.github.io/mkdocs-material/insiders/upgrade/
matchManagers: ["pip_requirements"],
matchPackagePatterns: ["mkdocs-material"],
enabled: false,
},
{
groupName: "pre-commit dependencies",
matchManagers: ["pre-commit"],
description: "Weekly update of pre-commit dependencies",
},
{
groupName: "NPM Development dependencies",
matchManagers: ["npm"],
matchDepTypes: ["devDependencies"],
description: "Weekly update of NPM development dependencies",
},
{
groupName: "Monaco",
matchManagers: ["npm"],
matchPackagePatterns: ["monaco"],
description: "Weekly update of the Monaco editor",
},
{
groupName: "strum",
matchManagers: ["cargo"],
matchPackagePatterns: ["strum"],
description: "Weekly update of strum dependencies",
},
{
groupName: "ESLint",
matchManagers: ["npm"],
matchPackageNames: ["eslint"],
allowedVersions: "<9",
description: "Constraint ESLint to version 8 until TypeScript-eslint supports ESLint 9", // https://github.com/typescript-eslint/typescript-eslint/issues/8211
},
],
vulnerabilityAlerts: {
commitMessageSuffix: "",
labels: ["internal", "security"],
},
}

View File

@@ -1,68 +0,0 @@
# Build and publish a Docker image.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
#
# TODO(charlie): Ideally, the publish step would happen as a publish job within `cargo-dist`, but
# sharing the built image as an artifact between jobs is challenging.
name: "[ruff] Build Docker image"
on:
workflow_call:
inputs:
plan:
required: true
type: string
pull_request:
paths:
- .github/workflows/build-docker.yml
jobs:
docker-publish:
name: Build Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/astral-sh/ruff
- name: Check tag consistency
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ fromJson(inputs.plan).announcement_tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ fromJson(inputs.plan).announcement_tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: "Build and push Docker image"
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
# Reuse the builder
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
tags: ghcr.io/astral-sh/ruff:latest,ghcr.io/astral-sh/ruff:${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || 'dry-run' }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -23,8 +23,6 @@ jobs:
name: "Determine changes"
runs-on: ubuntu-latest
outputs:
# Flag that is raised when any code that affects parser is changed
parser: ${{ steps.changed.outputs.parser_any_changed }}
# Flag that is raised when any code that affects linter is changed
linter: ${{ steps.changed.outputs.linter_any_changed }}
# Flag that is raised when any code that affects formatter is changed
@@ -37,21 +35,10 @@ jobs:
with:
fetch-depth: 0
- uses: tj-actions/changed-files@v44
- uses: tj-actions/changed-files@v42
id: changed
with:
files_yaml: |
parser:
- Cargo.toml
- Cargo.lock
- crates/ruff_python_trivia/**
- crates/ruff_source_file/**
- crates/ruff_text_size/**
- crates/ruff_python_ast/**
- crates/ruff_python_parser/**
- scripts/fuzz-parser/**
- .github/workflows/ci.yaml
linter:
- Cargo.toml
- Cargo.lock
@@ -59,6 +46,7 @@ jobs:
- "!crates/ruff_python_formatter/**"
- "!crates/ruff_formatter/**"
- "!crates/ruff_dev/**"
- "!crates/ruff_shrinking/**"
- scripts/*
- python/**
- .github/workflows/ci.yaml
@@ -111,7 +99,7 @@ jobs:
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features --locked -- -D warnings
- name: "Clippy (wasm)"
run: cargo clippy -p ruff_wasm -p red_knot_wasm --target wasm32-unknown-unknown --all-features --locked -- -D warnings
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features --locked -- -D warnings
cargo-test-linux:
name: "cargo test (linux)"
@@ -142,17 +130,10 @@ jobs:
# Check for broken links in the documentation.
- run: cargo doc --all --no-deps
env:
RUSTDOCFLAGS: "-D warnings"
# Use --document-private-items so that all our doc comments are kept in
# sync, not just public items. Eventually we should do this for all
# crates; for now add crates here as they are warning-clean to prevent
# regression.
- run: cargo doc --no-deps -p red_knot_python_semantic -p red_knot -p ruff_db --document-private-items
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
with:
name: ruff
path: target/debug/ruff
@@ -174,9 +155,6 @@ jobs:
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
# Workaround for <https://github.com/nextest-rs/nextest/issues/1493>.
RUSTUP_WINDOWS_PATH_ADD_BIN: 1
run: |
cargo nextest run --all-features --profile ci
cargo test --all-features --doc
@@ -198,62 +176,10 @@ jobs:
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@v0.4.0
- uses: Swatinem/rust-cache@v2
- name: "Test ruff_wasm"
- name: "Run wasm-pack"
run: |
cd crates/ruff_wasm
wasm-pack test --node
- name: "Test red_knot_wasm"
run: |
cd crates/red_knot_wasm
wasm-pack test --node
cargo-build-release:
name: "cargo build (release)"
runs-on: macos-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: "Build"
run: cargo build --release --locked
cargo-build-msrv:
name: "cargo build (msrv)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- uses: SebRollen/toml-action@v1.2.0
id: msrv
with:
file: "Cargo.toml"
field: "workspace.package.rust-version"
- name: "Install Rust toolchain"
run: rustup default ${{ steps.msrv.outputs.value }}
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo +${{ steps.msrv.outputs.value }} insta test --all-features --unreferenced reject --test-runner nextest
cargo-fuzz:
name: "cargo fuzz"
@@ -268,47 +194,12 @@ jobs:
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@main
- name: "Install cargo-fuzz"
uses: taiki-e/install-action@v2
with:
tool: cargo-fuzz@0.11.2
- name: "Install cargo-fuzz"
# Download the latest version from quick install and not the github releases because github releases only has MUSL targets.
run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm
- run: cargo fuzz build -s none
fuzz-parser:
name: "Fuzz the parser"
runs-on: ubuntu-latest
needs:
- cargo-test-linux
- determine_changes
if: ${{ needs.determine_changes.outputs.parser == 'true' }}
timeout-minutes: 20
env:
FORCE_COLOR: 1
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install Python requirements
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
- uses: actions/download-artifact@v4
name: Download Ruff binary to test
id: download-cached-binary
with:
name: ruff
path: ruff-to-test
- name: Fuzz
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ${{ steps.download-cached-binary.outputs.download-path }}/ruff
python scripts/fuzz-parser/fuzz.py 0-500 --test-executable ${{ steps.download-cached-binary.outputs.download-path }}/ruff
scripts:
name: "test scripts"
runs-on: ubuntu-latest
@@ -337,7 +228,9 @@ jobs:
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes.
if: ${{ github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
if: github.event_name == 'pull_request' && ${{
needs.determine_changes.outputs.code == 'true'
}}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
@@ -345,19 +238,18 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v4
- uses: actions/download-artifact@v3
name: Download comparison Ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v3
name: Download baseline Ruff binary
with:
name: ruff
branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
check_artifacts: true
- name: Install ruff-ecosystem
@@ -432,28 +324,34 @@ jobs:
run: |
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
name: Upload Results
with:
name: ecosystem-result
path: ecosystem-result
cargo-shear:
name: "cargo shear"
cargo-udeps:
name: "cargo udeps"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- uses: cargo-bins/cargo-binstall@main
- run: cargo binstall --no-confirm cargo-shear
- run: cargo shear
- name: "Install nightly Rust toolchain"
# Only pinned to make caching work, update freely
run: rustup toolchain install nightly-2023-10-15
- uses: Swatinem/rust-cache@v2
- name: "Install cargo-udeps"
uses: taiki-e/install-action@cargo-udeps
- name: "Run cargo-udeps"
run: cargo +nightly-2023-10-15 udeps
python-package:
name: "python package"
@@ -573,7 +471,7 @@ jobs:
- determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
steps:
- uses: extractions/setup-just@v2
- uses: extractions/setup-just@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -586,7 +484,7 @@ jobs:
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v4
- uses: actions/download-artifact@v3
name: Download development ruff binary
id: ruff-target
with:
@@ -610,7 +508,7 @@ jobs:
benchmarks:
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ github.repository == 'astral-sh/ruff' && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- name: "Checkout Branch"
@@ -630,7 +528,7 @@ jobs:
run: cargo codspeed build --features codspeed -p ruff_benchmark
- name: "Run benchmarks"
uses: CodSpeedHQ/action@v3
uses: CodSpeedHQ/action@v2
with:
run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}

View File

@@ -1,72 +0,0 @@
name: Daily parser fuzz
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *"
pull_request:
paths:
- ".github/workflows/daily_fuzz.yaml"
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
FORCE_COLOR: 1
jobs:
fuzz:
name: Fuzz
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install Python requirements
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: Build ruff
# A debug build means the script runs slower once it gets started,
# but this is outweighed by the fact that a release build takes *much* longer to compile in CI
run: cargo build --locked
- name: Fuzz
run: python scripts/fuzz-parser/fuzz.py $(shuf -i 0-9999999999999999999 -n 1000) --test-executable target/debug/ruff
create-issue-on-failure:
name: Create an issue if the daily fuzz surfaced any bugs
runs-on: ubuntu-latest
needs: fuzz
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.fuzz.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Daily parser fuzz failed on ${new Date().toDateString()}`,
body: "Runs listed here: https://github.com/astral-sh/ruff/actions/workflows/daily_fuzz.yml",
labels: ["bug", "parser", "fuzzer"],
})

55
.github/workflows/docs.yaml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
release:
types: [published]
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v5
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.4.1
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy site --project-name=astral-docs --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -1,29 +0,0 @@
# Notify downstream repositories of a new release.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[ruff] Notify dependents"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
update-dependents:
name: Notify dependents
runs-on: ubuntu-latest
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v7
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

View File

@@ -1,16 +1,9 @@
# Publish the Ruff playground.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[Playground] Release"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
release:
types: [published]
env:
CARGO_INCREMENTAL: 0
@@ -47,7 +40,7 @@ jobs:
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.7.0
uses: cloudflare/wrangler-action@v3.4.1
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}

View File

@@ -17,13 +17,12 @@ jobs:
comment:
runs-on: ubuntu-latest
steps:
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v3
name: Download pull request number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
@@ -33,7 +32,7 @@ jobs:
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v3
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
@@ -44,20 +43,11 @@ jobs:
path: pr/ecosystem
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Guard against malicious ecosystem results that symlink to a secret
# file on this runner
if [[ -L pr/ecosystem/ecosystem-result ]]
then
echo "Error: ecosystem-result cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt

View File

@@ -1,151 +0,0 @@
# Publish the Ruff documentation.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v5
with:
python-version: 3.12
- name: "Set docs version"
run: |
version="${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}"
# if version is missing, exit with error
if [[ -z "$version" ]]; then
echo "Can't build docs without a version."
exit 1
fi
# Use version as display name for now
display_name="$version"
echo "version=$version" >> $GITHUB_ENV
echo "display_name=$display_name" >> $GITHUB_ENV
- name: "Set branch name"
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
timestamp="$(date +%s)"
# create branch_display_name from display_name by replacing all
# characters disallowed in git branch names with hyphens
branch_display_name="$(echo "$display_name" | tr -c '[:alnum:]._' '-' | tr -s '-')"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> $GITHUB_ENV
echo "timestamp=$timestamp" >> $GITHUB_ENV
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo"
run: |
version="${{ env.version }}"
git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
- name: "Copy docs"
run: rm -rf astral-docs/site/ruff && mkdir -p astral-docs/site && cp -r site/ruff astral-docs/site/
- name: "Commit docs"
working-directory: astral-docs
run: |
branch_name="${{ env.branch_name }}"
git config user.name "astral-docs-bot"
git config user.email "176161322+astral-docs-bot@users.noreply.github.com"
git checkout -b $branch_name
git add site/ruff
git commit -m "Update ruff documentation for $version"
- name: "Create Pull Request"
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
branch_name="${{ env.branch_name }}"
# set the PR title
pull_request_title="Update ruff documentation for $display_name"
# Delete any existing pull requests that are open for this version
# by checking against pull_request_title because the new PR will
# supersede the old one.
gh pr list --state open --json title --jq '.[] | select(.title == "$pull_request_title") | .number' | \
xargs -I {} gh pr close {}
# push the branch to GitHub
git push origin $branch_name
# create the PR
gh pr create --base main --head $branch_name \
--title "$pull_request_title" \
--body "Automated documentation update for $display_name" \
--label "documentation"
- name: "Merge Pull Request"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
branch_name="${{ env.branch_name }}"
# auto-merge the PR if the build was triggered by a release. Manual builds should be reviewed by a human.
# give the PR a few seconds to be created before trying to auto-merge it
sleep 10
gh pr merge --squash $branch_name

View File

@@ -1,34 +0,0 @@
# Publish a release to PyPI.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish job
# within `cargo-dist`.
name: "[ruff] Publish to PyPI"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
pypi-publish:
name: Upload to PyPI
runs-on: ubuntu-latest
environment:
name: release
permissions:
# For PyPI's trusted publishing.
id-token: write
steps:
- uses: actions/download-artifact@v4
with:
pattern: wheels-*
path: wheels
merge-multiple: true
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true

View File

@@ -1,55 +0,0 @@
# Build and publish ruff-api for wasm.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish
# job within `cargo-dist`.
name: "Build and publish wasm"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
ruff_wasm:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
strategy:
matrix:
target: [web, bundler, nodejs]
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack build"
run: wasm-pack build --target ${{ matrix.target }} crates/ruff_wasm
- name: "Rename generated package"
run: | # Replace the package name w/ jq
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@v4
with:
node-version: 18
registry-url: "https://registry.npmjs.org"
- name: "Publish (dry-run)"
if: ${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --dry-run crates/ruff_wasm/pkg
- name: "Publish"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --provenance --access public crates/ruff_wasm/pkg
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -1,23 +1,21 @@
# Build ruff on all platforms.
#
# Generates both wheels (for PyPI) and archived binaries (for GitHub releases).
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
name: "Build binaries"
name: "[ruff] Release"
on:
workflow_call:
workflow_dispatch:
inputs:
plan:
required: true
tag:
description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)."
type: string
sha:
description: "The full sha of the commit to be released. If omitted, the latest commit on the default branch will be used."
default: ""
type: string
pull_request:
paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work.
# When we change pyproject.toml, we want to ensure that the maturin builds still work
- pyproject.toml
# And when we change this workflow itself...
- .github/workflows/build-binaries.yml
- .github/workflows/release.yaml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -25,7 +23,6 @@ concurrency:
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
@@ -34,12 +31,11 @@ env:
jobs:
sdist:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -53,21 +49,20 @@ jobs:
- name: "Test sdist"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
ruff --help
python -m ruff --help
- name: "Upload sdist"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-sdist
name: wheels
path: dist
macos-x86_64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-12
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -79,77 +74,69 @@ jobs:
with:
target: x86_64
args: --release --locked --out dist
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-macos-x86_64
path: dist
- name: "Archive binary"
run: |
TARGET=x86_64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-macos-x86_64
path: |
*.tar.gz
*.sha256
macos-aarch64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: arm64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - aarch64"
uses: PyO3/maturin-action@v1
with:
target: aarch64
args: --release --locked --out dist
- name: "Test wheel - aarch64"
- name: "Test wheel - x86_64"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-aarch64-apple-darwin
name: wheels
path: dist
- name: "Archive binary"
run: |
TARGET=aarch64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-x86_64-apple-darwin.tar.gz
tar czvf $ARCHIVE_FILE -C target/x86_64-apple-darwin/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: artifacts-aarch64-apple-darwin
name: binaries
path: |
*.tar.gz
*.sha256
macos-universal:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - universal2"
uses: PyO3/maturin-action@v1
with:
args: --release --locked --target universal2-apple-darwin --out dist
- name: "Test wheel - universal2"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-${{ inputs.tag }}-aarch64-apple-darwin.tar.gz
tar czvf $ARCHIVE_FILE -C target/aarch64-apple-darwin/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
windows:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: windows-latest
strategy:
matrix:
@@ -163,7 +150,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -175,37 +162,33 @@ jobs:
with:
target: ${{ matrix.platform.target }}
args: --release --locked --out dist
env:
# aarch64 build fails, see https://github.com/PyO3/maturin/issues/2110
XWIN_VERSION: 16
- name: "Test wheel"
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-${{ matrix.platform.target }}
name: wheels
path: dist
- name: "Archive binary"
shell: bash
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.zip
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.platform.target }}.zip
7z a $ARCHIVE_FILE ./target/${{ matrix.platform.target }}/release/ruff.exe
sha256sum $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: artifacts-${{ matrix.platform.target }}
name: binaries
path: |
*.zip
*.sha256
linux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -215,7 +198,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -232,36 +215,27 @@ jobs:
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-${{ matrix.target }}
name: wheels
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: artifacts-${{ matrix.target }}
name: binaries
path: |
*.tar.gz
*.sha256
linux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -277,19 +251,13 @@ jobs:
arch: s390x
- target: powerpc64le-unknown-linux-gnu
arch: ppc64le
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: powerpc64-unknown-linux-gnu
arch: ppc64
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: arm-unknown-linux-musleabihf
arch: arm
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -306,8 +274,8 @@ jobs:
if: matrix.platform.arch != 'ppc64'
name: Test wheel
with:
arch: ${{ matrix.platform.arch == 'arm' && 'armv6' || matrix.platform.arch }}
distro: ${{ matrix.platform.arch == 'arm' && 'bullseye' || 'ubuntu20.04' }}
arch: ${{ matrix.platform.arch }}
distro: ubuntu20.04
githubToken: ${{ github.token }}
install: |
apt-get update
@@ -317,33 +285,24 @@ jobs:
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-${{ matrix.platform.target }}
name: wheels
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.platform.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.platform.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: artifacts-${{ matrix.platform.target }}
name: binaries
path: |
*.tar.gz
*.sha256
musllinux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -353,7 +312,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -376,35 +335,26 @@ jobs:
apk add python3
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
.venv/bin/ruff check --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-${{ matrix.target }}
name: wheels
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: artifacts-${{ matrix.target }}
name: binaries
path: |
*.tar.gz
*.sha256
musllinux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -418,7 +368,7 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
@@ -442,29 +392,202 @@ jobs:
run: |
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
.venv/bin/ruff check --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: wheels-${{ matrix.platform.target }}
name: wheels
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
ARCHIVE_FILE=ruff-${{ inputs.tag }}-${{ matrix.platform.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.platform.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: artifacts-${{ matrix.platform.target }}
name: binaries
path: |
*.tar.gz
*.sha256
validate-tag:
name: Validate tag
runs-on: ubuntu-latest
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
steps:
- uses: actions/checkout@v4
with:
ref: main # We checkout the main branch to check for the commit
- name: Check main branch
if: ${{ inputs.sha }}
run: |
# Fetch the main branch since a shallow checkout is used by default
git fetch origin main --unshallow
if ! git branch --contains ${{ inputs.sha }} | grep -E '(^|\s)main$'; then
echo "The specified sha is not on the main branch" >&2
exit 1
fi
- name: Check tag consistency
run: |
# Switch to the commit we want to release
git checkout ${{ inputs.sha }}
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ inputs.tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ inputs.tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
upload-release:
name: Upload to PyPI
runs-on: ubuntu-latest
needs:
- macos-universal
- macos-x86_64
- windows
- linux
- linux-cross
- musllinux
- musllinux-cross
- validate-tag
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
environment:
name: release
permissions:
# For pypi trusted publishing
id-token: write
steps:
- uses: actions/download-artifact@v3
with:
name: wheels
path: wheels
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true
tag-release:
name: Tag release
runs-on: ubuntu-latest
needs: upload-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For git tag
contents: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- name: git tag
run: |
git config user.email "hey@astral.sh"
git config user.name "Ruff Release CI"
git tag -m "v${{ inputs.tag }}" "v${{ inputs.tag }}"
# If there is duplicate tag, this will fail. The publish to pypi action will have been a noop (due to skip
# existing), so we make a non-destructive exit here
git push --tags
publish-release:
name: Publish to GitHub
runs-on: ubuntu-latest
needs: tag-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For GitHub release publishing
contents: write
steps:
- uses: actions/download-artifact@v3
with:
name: binaries
path: binaries
- name: "Publish to GitHub"
uses: softprops/action-gh-release@v1
with:
draft: true
files: binaries/*
tag_name: v${{ inputs.tag }}
docker-publish:
# This action doesn't need to wait on any other task, it's easy to re-tag if something failed and we're validating
# the tag here also
name: Push Docker image ghcr.io/astral-sh/ruff
runs-on: ubuntu-latest
environment:
name: release
permissions:
# For the docker push
packages: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/astral-sh/ruff
- name: Check tag consistency
# Unlike validate-tag we don't check if the commit is on the main branch, but it seems good enough since we can
# change docker tags
if: ${{ inputs.tag }}
run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ inputs.tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ inputs.tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: "Build and push Docker image"
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
# Reuse the builder
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ inputs.tag != '' }}
tags: ghcr.io/astral-sh/ruff:latest,ghcr.io/astral-sh/ruff:${{ inputs.tag || 'dry-run' }}
labels: ${{ steps.meta.outputs.labels }}
# After the release has been published, we update downstream repositories
# This is separate because if this fails the release is still fine, we just need to do some manual workflow triggers
update-dependents:
name: Update dependents
runs-on: ubuntu-latest
needs: publish-release
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v7
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

View File

@@ -1,298 +0,0 @@
# Copyright 2022-2024, axodotdev
# SPDX-License-Identifier: MIT or Apache-2.0
#
# CI that:
#
# * checks for a Git Tag that looks like a release
# * builds artifacts with cargo-dist (archives, installers, hashes)
# * uploads those artifacts to temporary workflow zip
# * on success, uploads the artifacts to a GitHub Release
#
# Note that the GitHub Release will be created with a generated
# title/body based on your changelogs.
name: Release
permissions:
"contents": "write"
# This task will run whenever you workflow_dispatch with a tag that looks like a version
# like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc.
# Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where
# PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION
# must be a Cargo-style SemVer Version (must have at least major.minor.patch).
#
# If PACKAGE_NAME is specified, then the announcement will be for that
# package (erroring out if it doesn't have the given version or isn't cargo-dist-able).
#
# If PACKAGE_NAME isn't specified, then the announcement will be for all
# (cargo-dist-able) packages in the workspace with that version (this mode is
# intended for workspaces with only one dist-able package, or with all dist-able
# packages versioned/released in lockstep).
#
# If you push multiple tags at once, separate instances of this workflow will
# spin up, creating an independent announcement for each one. However, GitHub
# will hard limit this to 3 tags per commit, as it will assume more tags is a
# mistake.
#
# If there's a prerelease-style suffix to the version, then the release(s)
# will be marked as a prerelease.
on:
workflow_dispatch:
inputs:
tag:
description: Release Tag
required: true
default: dry-run
type: string
jobs:
# Run 'cargo dist plan' (or host) to determine what tasks we need to do
plan:
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.plan.outputs.manifest }}
tag: ${{ (inputs.tag != 'dry-run' && inputs.tag) || '' }}
tag-flag: ${{ inputs.tag && inputs.tag != 'dry-run' && format('--tag={0}', inputs.tag) || '' }}
publishing: ${{ inputs.tag && inputs.tag != 'dry-run' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cargo-dist
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.18.0/cargo-dist-installer.sh | sh"
- name: Cache cargo-dist
uses: actions/upload-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/cargo-dist
# sure would be cool if github gave us proper conditionals...
# so here's a doubly-nested ternary-via-truthiness to try to provide the best possible
# functionality based on whether this is a pull_request, and whether it's from a fork.
# (PRs run on the *source* but secrets are usually on the *target* -- that's *good*
# but also really annoying to build CI around when it needs secrets to work right.)
- id: plan
run: |
cargo dist ${{ (inputs.tag && inputs.tag != 'dry-run' && format('host --steps=create --tag={0}', inputs.tag)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "cargo dist ran successfully"
cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json
custom-build-binaries:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-binaries.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-build-docker:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-docker.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
permissions:
"contents": "read"
"packages": "write"
# Build and package all the platform-agnostic(ish) things
build-global-artifacts:
needs:
- plan
- custom-build-binaries
- custom-build-docker
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/cargo-dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- id: cargo-dist
shell: bash
run: |
cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "cargo dist ran successfully"
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@v4
with:
name: artifacts-build-global
path: |
${{ steps.cargo-dist.outputs.paths }}
${{ env.BUILD_MANIFEST_NAME }}
# Determines if we should publish/announce
host:
needs:
- plan
- custom-build-binaries
- custom-build-docker
- build-global-artifacts
# Only run if we're "publishing", and only if local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.host.outputs.manifest }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/cargo-dist
# Fetch artifacts from scratch-storage
- name: Fetch artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
# This is a harmless no-op for GitHub Releases, hosting for that happens in "announce"
- id: host
shell: bash
run: |
cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
echo "artifacts uploaded and released successfully"
cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
# Overwrite the previous copy
name: artifacts-dist-manifest
path: dist-manifest.json
custom-publish-pypi:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-pypi.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"id-token": "write"
"packages": "write"
custom-publish-wasm:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-wasm.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"contents": "read"
"id-token": "write"
"packages": "write"
# Create a GitHub Release while uploading all files to it
announce:
needs:
- plan
- host
- custom-publish-pypi
- custom-publish-wasm
# use "always() && ..." to allow us to wait for all publish jobs while
# still allowing individual publish jobs to skip themselves (for prereleases).
# "host" however must run to completion, no skipping allowed!
if: ${{ always() && needs.host.result == 'success' && (needs.custom-publish-pypi.result == 'skipped' || needs.custom-publish-pypi.result == 'success') && (needs.custom-publish-wasm.result == 'skipped' || needs.custom-publish-wasm.result == 'success') }}
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
# Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts"
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: artifacts
merge-multiple: true
- name: Cleanup
run: |
# Remove the granular manifests
rm -f artifacts/*-dist-manifest.json
- name: Create GitHub Release
env:
PRERELEASE_FLAG: "${{ fromJson(needs.host.outputs.val).announcement_is_prerelease && '--prerelease' || '' }}"
ANNOUNCEMENT_TITLE: "${{ fromJson(needs.host.outputs.val).announcement_title }}"
ANNOUNCEMENT_BODY: "${{ fromJson(needs.host.outputs.val).announcement_github_body }}"
RELEASE_COMMIT: "${{ github.sha }}"
run: |
# Write and read notes from a file to avoid quoting breaking things
echo "$ANNOUNCEMENT_BODY" > $RUNNER_TEMP/notes.txt
gh release create "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --title "$ANNOUNCEMENT_TITLE" --notes-file "$RUNNER_TEMP/notes.txt" artifacts/*
custom-notify-dependents:
needs:
- plan
- announce
uses: ./.github/workflows/notify-dependents.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-docs:
needs:
- plan
- announce
uses: ./.github/workflows/publish-docs.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-playground:
needs:
- plan
- announce
uses: ./.github/workflows/publish-playground.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit

View File

@@ -1,80 +0,0 @@
name: Sync typeshed
on:
workflow_dispatch:
schedule:
# Run on the 1st and the 15th of every month:
- cron: "0 0 1,15 * *"
env:
FORCE_COLOR: 1
GH_TOKEN: ${{ github.token }}
jobs:
sync:
name: Sync typeshed
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@v4
name: Checkout Ruff
with:
path: ruff
- uses: actions/checkout@v4
name: Checkout typeshed
with:
repository: python/typeshed
path: typeshed
- name: Setup git
run: |
git config --global user.name typeshedbot
git config --global user.email '<>'
- name: Sync typeshed
id: sync
run: |
rm -rf ruff/crates/red_knot_python_semantic/vendor/typeshed
mkdir ruff/crates/red_knot_python_semantic/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_python_semantic/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_python_semantic/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_python_semantic/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_python_semantic/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_python_semantic/vendor/typeshed/source_commit.txt
- name: Commit the changes
id: commit
if: ${{ steps.sync.outcome == 'success' }}
run: |
cd ruff
git checkout -b typeshedbot/sync-typeshed
git add .
git diff --staged --quiet || git commit -m "Sync typeshed. Source commit: https://github.com/python/typeshed/commit/$(git -C ../typeshed rev-parse HEAD)"
- name: Create a PR
if: ${{ steps.sync.outcome == 'success' && steps.commit.outcome == 'success' }}
run: |
cd ruff
git push --force origin typeshedbot/sync-typeshed
gh pr list --repo $GITHUB_REPOSITORY --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
create-issue-on-failure:
name: Create an issue if the typeshed sync failed
runs-on: ubuntu-latest
needs: [sync]
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.sync.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
body: "Runs are listed here: https://github.com/astral-sh/ruff/actions/workflows/sync_typeshed.yaml",
})

9
.gitignore vendored
View File

@@ -21,14 +21,6 @@ flamegraph.svg
# `CARGO_TARGET_DIR=target-llvm-lines RUSTFLAGS="-Csymbol-mangling-version=v0" cargo llvm-lines -p ruff --lib`
/target*
# samply profiles
profile.json
# tracing-flame traces
tracing.folded
tracing-flamechart.svg
tracing-flamegraph.svg
###
# Rust.gitignore
###
@@ -100,7 +92,6 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
repos/
# Translations
*.mo

View File

@@ -14,10 +14,7 @@ MD041: false
# MD013/line-length
MD013: false
# MD014/commands-show-output
MD014: false
# MD024/no-duplicate-heading
MD024:
# Allow when nested under different parents e.g. CHANGELOG.md
siblings_only: true
allow_different_nesting: true

View File

@@ -2,12 +2,8 @@ fail_fast: true
exclude: |
(?x)^(
crates/red_knot_python_semantic/vendor/.*|
crates/red_knot_workspace/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff_notebook/resources/.*|
crates/ruff_server/resources/.*|
crates/ruff/resources/.*|
crates/ruff_python_formatter/resources/.*|
crates/ruff_python_formatter/tests/snapshots/.*|
@@ -17,7 +13,7 @@ exclude: |
repos:
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.19
rev: v0.15
hooks:
- id: validate-pyproject
@@ -35,7 +31,7 @@ repos:
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.41.0
rev: v0.37.0
hooks:
- id: markdownlint-fix
exclude: |
@@ -45,7 +41,7 @@ repos:
)$
- repo: https://github.com/crate-ci/typos
rev: v1.23.6
rev: v1.16.22
hooks:
- id: typos
@@ -59,17 +55,22 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.1
rev: v0.1.4
hooks:
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
types_or: [python, pyi]
require_serial: true
exclude: |
(?x)^(
crates/ruff_linter/resources/.*|
crates/ruff_python_formatter/resources/.*
)$
# Prettier
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.1.0
rev: v3.0.3
hooks:
- id: prettier
types: [yaml]

View File

@@ -1,2 +0,0 @@
# Auto-generated by `cargo-dist`.
.github/workflows/release.yml

View File

@@ -1,5 +0,0 @@
{
"recommendations": [
"rust-lang.rust-analyzer"
]
}

View File

@@ -1,6 +0,0 @@
{
"rust-analyzer.check.extraArgs": [
"--all-features"
],
"rust-analyzer.check.command": "clippy",
}

View File

@@ -1,101 +1,5 @@
# Breaking Changes
## 0.6.0
- Detect imports in `src` layouts by default for `isort` rules ([#12848](https://github.com/astral-sh/ruff/pull/12848))
- The pytest rules `PT001` and `PT023` now default to omitting the decorator parentheses when there are no arguments ([#12838](https://github.com/astral-sh/ruff/pull/12838)).
- Lint and format Jupyter Notebook by default ([#12878](https://github.com/astral-sh/ruff/pull/12878)).
You can disable specific rules for notebooks using [`per-file-ignores`](https://docs.astral.sh/ruff/settings/#lint_per-file-ignores):
```toml
[tool.ruff.lint.per-file-ignores]
"*.ipynb" = ["E501"] # disable line-too-long in notebooks
```
If you'd prefer to either only lint or only format Jupyter Notebook files, you can use the
section-specific `exclude` option to do so. For example, the following would only lint Jupyter
Notebook files and not format them:
```toml
[tool.ruff.format]
exclude = ["*.ipynb"]
```
And, conversely, the following would only format Jupyter Notebook files and not lint them:
```toml
[tool.ruff.lint]
exclude = ["*.ipynb"]
```
You can completely disable Jupyter Notebook support by updating the [`extend-exclude`](https://docs.astral.sh/ruff/settings/#extend-exclude) setting:
```toml
[tool.ruff]
extend-exclude = ["*.ipynb"]
```
## 0.5.0
- Follow the XDG specification to discover user-level configurations on macOS (same as on other Unix platforms)
- Selecting `ALL` now excludes deprecated rules
- The released archives now include an extra level of nesting, which can be removed with `--strip-components=1` when untarring.
- The release artifact's file name no longer includes the version tag. This enables users to install via `/latest` URLs on GitHub.
## 0.3.0
### Ruff 2024.2 style
The formatter now formats code according to the Ruff 2024.2 style guide. Read the [changelog](./CHANGELOG.md#030) for a detailed list of stabilized style changes.
### `isort`: Use one blank line after imports in typing stub files ([#9971](https://github.com/astral-sh/ruff/pull/9971))
Previously, Ruff used one or two blank lines (or the number configured by `isort.lines-after-imports`) after imports in typing stub files (`.pyi` files).
The [typing style guide for stubs](https://typing.readthedocs.io/en/latest/source/stubs.html#style-guide) recommends using at most 1 blank line for grouping.
As of this release, `isort` now always uses one blank line after imports in stub files, the same as the formatter.
### `build` is no longer excluded by default ([#10093](https://github.com/astral-sh/ruff/pull/10093))
Ruff maintains a list of directories and files that are excluded by default. This list now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git`
- `.git-rewrite`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `dist`
- `node_modules`
- `site-packages`
- `venv`
Previously, the `build` directory was included in this list. However, the `build` directory tends to be a not-unpopular directory
name, and excluding it by default caused confusion. Ruff now no longer excludes `build` except if it is excluded by a `.gitignore` file
or because it is listed in `extend-exclude`.
### `--format` is no longer a valid `rule` or `linter` command option
Previously, `ruff rule` and `ruff linter` accepted the `--format <FORMAT>` option as an alias for `--output-format`. Ruff no longer
supports this alias. Please use `ruff rule --output-format <FORMAT>` and `ruff linter --output-format <FORMAT>` instead.
## 0.1.9
### `site-packages` is now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))

File diff suppressed because it is too large Load Diff

View File

@@ -2,20 +2,58 @@
Welcome! We're happy to have you here. Thank you in advance for your contribution to Ruff.
- [The Basics](#the-basics)
- [Prerequisites](#prerequisites)
- [Development](#development)
- [Project Structure](#project-structure)
- [Example: Adding a new lint rule](#example-adding-a-new-lint-rule)
- [Rule naming convention](#rule-naming-convention)
- [Rule testing: fixtures and snapshots](#rule-testing-fixtures-and-snapshots)
- [Example: Adding a new configuration option](#example-adding-a-new-configuration-option)
- [MkDocs](#mkdocs)
- [Release Process](#release-process)
- [Creating a new release](#creating-a-new-release)
- [Ecosystem CI](#ecosystem-ci)
- [Benchmarking and Profiling](#benchmarking-and-profiling)
- [CPython Benchmark](#cpython-benchmark)
- [Microbenchmarks](#microbenchmarks)
- [Benchmark-driven Development](#benchmark-driven-development)
- [PR Summary](#pr-summary)
- [Tips](#tips)
- [Profiling Projects](#profiling-projects)
- [Linux](#linux)
- [Mac](#mac)
- [`cargo dev`](#cargo-dev)
- [Subsystems](#subsystems)
- [Compilation Pipeline](#compilation-pipeline)
- [Import Categorization](#import-categorization)
- [Project root](#project-root)
- [Package root](#package-root)
- [Import categorization](#import-categorization-1)
## The Basics
Ruff welcomes contributions in the form of pull requests.
Ruff welcomes contributions in the form of Pull Requests.
For small changes (e.g., bug fixes), feel free to submit a PR.
For larger changes (e.g., new lint rules, new functionality, new configuration options), consider
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed change.
You can also join us on [Discord](https://discord.com/invite/astral-sh) to discuss your idea with the
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5) to discuss your idea with the
community. We've labeled [beginner-friendly tasks](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
in the issue tracker, along with [bugs](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
and [improvements](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted)
that are ready for contributions.
If you're looking for a place to start, we recommend implementing a new lint rule (see:
[_Adding a new lint rule_](#example-adding-a-new-lint-rule), which will allow you to learn from and
pattern-match against the examples in the existing codebase. Many lint rules are inspired by
existing Python plugins, which can be used as a reference implementation.
As a concrete example: consider taking on one of the rules from the [`flake8-pyi`](https://github.com/astral-sh/ruff/issues/848)
plugin, and looking to the originating [Python source](https://github.com/PyCQA/flake8-pyi) for
guidance.
If you have suggestions on how we might improve the contributing documentation, [let us know](https://github.com/astral-sh/ruff/discussions/5693)!
### Prerequisites
@@ -69,11 +107,9 @@ RUFF_UPDATE_SCHEMA=1 cargo test # Rust testing and updating ruff.schema.json
pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
```
These checks will run on GitHub Actions when you open your pull request, but running them locally
These checks will run on GitHub Actions when you open your Pull Request, but running them locally
will save you time and expedite the merge process.
If you're using VS Code, you can also install the recommended [rust-analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) extension to get these checks while editing.
Note that many code changes also require updating the snapshot tests, which is done interactively
after running `cargo test` like so:
@@ -81,14 +117,7 @@ after running `cargo test` like so:
cargo insta review
```
If your pull request relates to a specific lint rule, include the category and rule code in the
title, as in the following examples:
- \[`flake8-bugbear`\] Avoid false positive for usage after `continue` (`B031`)
- \[`flake8-simplify`\] Detect implicit `else` cases in `needless-bool` (`SIM103`)
- \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
Your pull request will be reviewed by a maintainer, which may involve a few rounds of iteration
Your Pull Request will be reviewed by a maintainer, which may involve a few rounds of iteration
prior to merging.
### Project Structure
@@ -96,8 +125,8 @@ prior to merging.
Ruff is structured as a monorepo with a [flat crate structure](https://matklad.github.io/2021/08/22/large-rust-workspaces.html),
such that all crates are contained in a flat `crates` directory.
The vast majority of the code, including all lint rules, lives in the `ruff_linter` crate (located
at `crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
The vast majority of the code, including all lint rules, lives in the `ruff` crate (located at
`crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
At the time of writing, the repository includes the following crates:
@@ -170,14 +199,11 @@ and calling out to lint rule analyzer functions as it goes.
If you need to inspect the AST, you can run `cargo dev print-ast` with a Python file. Grep
for the `Diagnostic::new` invocations to understand how other, similar rules are implemented.
Once you're satisfied with your code, add tests for your rule
(see: [rule testing](#rule-testing-fixtures-and-snapshots)), and regenerate the documentation and
associated assets (like our JSON Schema) with `cargo dev generate-all`.
Once you're satisfied with your code, add tests for your rule. See [rule testing](#rule-testing-fixtures-and-snapshots)
for more details.
Finally, submit a pull request, and include the category, rule name, and rule code in the title, as
in:
> \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
Finally, regenerate the documentation and other generated assets (like our JSON Schema) with:
`cargo dev generate-all`.
#### Rule naming convention
@@ -251,7 +277,7 @@ These represent, respectively: the schema used to parse the `pyproject.toml` fil
intermediate representation; and the final, internal representation used to power Ruff.
To add a new configuration option, you'll likely want to modify these latter few files (along with
`args.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`arg.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`dummy_variable_rgx`, which defines a regular expression to match against acceptable unused
variables (e.g., `_`).
@@ -290,7 +316,7 @@ To preview any changes to the documentation locally:
```
The documentation should then be available locally at
[http://127.0.0.1:8000/ruff/](http://127.0.0.1:8000/ruff/).
[http://127.0.0.1:8000/docs/](http://127.0.0.1:8000/docs/).
## Release Process
@@ -303,64 +329,42 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
### Creating a new release
1. Install `uv`: `curl -LsSf https://astral.sh/uv/install.sh | sh`
We use an experimental in-house tool for managing releases.
1. Run `./scripts/release.sh`; this command will:
- Generate a temporary virtual environment with `rooster`
1. Install `rooster`: `pip install git+https://github.com/zanieb/rooster@main`
1. Run `rooster release`; this command will:
- Generate a changelog entry in `CHANGELOG.md`
- Update versions in `pyproject.toml` and `Cargo.toml`
- Update references to versions in the `README.md` and documentation
- Display contributors for the release
1. The changelog should then be editorialized for consistency
- Often labels will be missing from pull requests they will need to be manually organized into the proper section
- Changes should be edited to be user-facing descriptions, avoiding internal details
1. Highlight any breaking changes in `BREAKING_CHANGES.md`
1. Run `cargo check`. This should update the lock file with new versions.
1. Create a pull request with the changelog and version updates
1. Merge the PR
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yml) with:
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yaml) with:
- The new version number (without starting `v`)
- The commit hash of the merged release pull request on `main`
1. The release workflow will do the following:
1. Build all the assets. If this fails (even though we tested in step 4), we haven't tagged or
uploaded anything, you can restart after pushing a fix. If you just need to rerun the build,
make sure you're [re-running all the failed
jobs](https://docs.github.com/en/actions/managing-workflow-runs/re-running-workflows-and-jobs#re-running-failed-jobs-in-a-workflow) and not just a single failed job.
uploaded anything, you can restart after pushing a fix.
1. Upload to PyPI.
1. Create and push the Git tag (as extracted from `pyproject.toml`). We create the Git tag only
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/astral-sh/ruff/issues/4468)).
1. Attach artifacts to draft GitHub release
1. Trigger downstream repositories. This can fail non-catastrophically, as we can run any
downstream jobs manually if needed.
1. Verify the GitHub release:
1. The Changelog should match the content of `CHANGELOG.md`
1. Append the contributors from the `scripts/release.sh` script
1. Publish the GitHub release
1. Open the draft release in the GitHub release section
1. Copy the changelog for the release into the GitHub release
- See previous releases for formatting of section headers
1. Generate the contributor list with `rooster contributors` and add to the release notes
1. If needed, [update the schemastore](https://github.com/astral-sh/ruff/blob/main/scripts/update_schemastore.py).
1. One can determine if an update is needed when
`git diff old-version-tag new-version-tag -- ruff.schema.json` returns a non-empty diff.
1. Once run successfully, you should follow the link in the output to create a PR.
1. If needed, update the [`ruff-lsp`](https://github.com/astral-sh/ruff-lsp) and
[`ruff-vscode`](https://github.com/astral-sh/ruff-vscode) repositories and follow
the release instructions in those repositories. `ruff-lsp` should always be updated
before `ruff-vscode`.
This step is generally not required for a patch release, but should always be done
for a minor release.
1. If needed, update the `ruff-lsp` and `ruff-vscode` repositories.
## Ecosystem CI
@@ -383,7 +387,7 @@ We have several ways of benchmarking and profiling Ruff:
- Microbenchmarks which run the linter or the formatter on individual files. These run on pull requests.
- Profiling the linter on either the microbenchmarks or entire projects
> **Note**
> \[!NOTE\]
> When running benchmarks, ensure that your CPU is otherwise idle (e.g., close any background
> applications, like web browsers). You may also want to switch your CPU to a "performance"
> mode, if it exists, especially when benchmarking short-lived processes.
@@ -397,18 +401,12 @@ which makes it a good target for benchmarking.
git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff_linter/resources/test/cpython
```
Install `hyperfine`:
```shell
cargo install hyperfine
```
To benchmark the release build:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ -e"
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ -e"
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
Time (mean ± σ): 293.8 ms ± 3.2 ms [User: 2384.6 ms, System: 90.3 ms]
@@ -427,7 +425,7 @@ To benchmark against the ecosystem's existing tools:
```shell
hyperfine --ignore-failure --warmup 5 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"pyflakes crates/ruff_linter/resources/test/cpython" \
"autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython" \
"pycodestyle crates/ruff_linter/resources/test/cpython" \
@@ -473,7 +471,7 @@ To benchmark a subset of rules, e.g. `LineTooLong` and `DocLineTooLong`:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
```
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
@@ -638,11 +636,11 @@ Otherwise, follow the instructions from the linux section.
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
utils with it:
- `cargo dev print-ast <file>`: Print the AST of a python file using Ruff's
[Python parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser).
For `if True: pass # comment`, you can see the syntax tree, the byte offsets for start and
stop of each node and also how the `:` token, the comment and whitespace are not represented
anymore:
- `cargo dev print-ast <file>`: Print the AST of a python file using the
[RustPython parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser) that is
mainly used in Ruff. For `if True: pass # comment`, you can see the syntax tree, the byte offsets
for start and stop of each node and also how the `:` token, the comment and whitespace are not
represented anymore:
```text
[
@@ -815,8 +813,8 @@ To understand Ruff's import categorization system, we first need to define two c
"project root".)
- "Package root": The top-most directory defining the Python package that includes a given Python
file. To find the package root for a given Python file, traverse up its parent directories until
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't in a subtree
marked as a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't marked as
a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
just before that, i.e., the first directory in the package.
For example, given:
@@ -905,11 +903,15 @@ There are three ways in which an import can be categorized as "first-party":
package (e.g., `from foo import bar` or `import foo.bar`), they'll be classified as first-party
automatically. This check is as simple as comparing the first segment of the current file's
module path to the first segment of the import.
1. **Source roots**: Ruff supports a [`src`](https://docs.astral.sh/ruff/settings/#src) setting, which
1. **Source roots**: Ruff supports a `[src](https://docs.astral.sh/ruff/settings/#src)` setting, which
sets the directories to scan when identifying first-party imports. The algorithm is
straightforward: given an import, like `import foo`, iterate over the directories enumerated in
the `src` setting and, for each directory, check for the existence of a subdirectory `foo` or a
file `foo.py`.
By default, `src` is set to the project root, along with `"src"` subdirectory in the project root.
This ensures that Ruff supports both flat and "src" layouts out of the box.
By default, `src` is set to the project root. In the above example, we'd want to set
`src = ["./src"]` to ensure that we locate `./my_project/src/foo` and thus categorize `import foo`
as first-party in `baz.py`. In practice, for this limited example, setting `src = ["./src"]` is
unnecessary, as all imports within `./my_project/src/foo` would be categorized as first-party via
the same-package heuristic; but if your project contains multiple packages, you'll want to set `src`
explicitly.

1820
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ resolver = "2"
[workspace.package]
edition = "2021"
rust-version = "1.76"
rust-version = "1.71"
homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff"
@@ -12,153 +12,108 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db" }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_index = { path = "crates/ruff_index" }
ruff_linter = { path = "crates/ruff_linter" }
ruff_macros = { path = "crates/ruff_macros" }
ruff_notebook = { path = "crates/ruff_notebook" }
ruff_python_ast = { path = "crates/ruff_python_ast" }
ruff_python_codegen = { path = "crates/ruff_python_codegen" }
ruff_python_formatter = { path = "crates/ruff_python_formatter" }
ruff_python_index = { path = "crates/ruff_python_index" }
ruff_python_literal = { path = "crates/ruff_python_literal" }
ruff_python_parser = { path = "crates/ruff_python_parser" }
ruff_python_semantic = { path = "crates/ruff_python_semantic" }
ruff_python_stdlib = { path = "crates/ruff_python_stdlib" }
ruff_python_trivia = { path = "crates/ruff_python_trivia" }
ruff_server = { path = "crates/ruff_server" }
ruff_source_file = { path = "crates/ruff_source_file" }
ruff_text_size = { path = "crates/ruff_text_size" }
ruff_workspace = { path = "crates/ruff_workspace" }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
red_knot_server = { path = "crates/red_knot_server" }
red_knot_workspace = { path = "crates/red_knot_workspace" }
aho-corasick = { version = "1.1.3" }
aho-corasick = { version = "1.1.2" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
anyhow = { version = "1.0.80" }
argfile = { version = "0.2.0" }
anyhow = { version = "1.0.79" }
argfile = { version = "0.1.6" }
assert_cmd = { version = "2.0.13" }
bincode = { version = "1.3.3" }
bitflags = { version = "2.5.0" }
bstr = { version = "1.9.1" }
bitflags = { version = "2.4.1" }
bstr = { version = "1.9.0" }
cachedir = { version = "0.3.1" }
camino = { version = "1.1.7" }
chrono = { version = "0.4.35", default-features = false, features = ["clock"] }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete_command = { version = "0.6.0" }
clearscreen = { version = "3.0.0" }
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
chrono = { version = "0.4.34", default-features = false, features = ["clock"] }
clap = { version = "4.4.18", features = ["derive"] }
clap_complete_command = { version = "0.5.1" }
clearscreen = { version = "2.0.0" }
codspeed-criterion-compat = { version = "2.3.3", default-features = false }
colored = { version = "2.1.0" }
configparser = { version = "3.0.3" }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
compact_str = "0.8.0"
countme = { version ="3.0.1"}
criterion = { version = "0.5.1", default-features = false }
crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" }
dirs = { version = "5.0.0" }
drop_bomb = { version = "0.1.5" }
env_logger = { version = "0.11.0" }
etcetera = { version = "0.8.0" }
env_logger = { version ="0.10.1"}
fern = { version = "0.6.1" }
filetime = { version = "0.2.23" }
fs-err = { version ="2.11.0"}
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
hashbrown = "0.14.3"
hexf-parse = { version ="0.2.1"}
ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" }
imara-diff ={ version = "0.1.5"}
imperative = { version = "1.0.4" }
indicatif = { version = "0.17.8" }
indoc = { version = "2.0.4" }
insta = { version = "1.35.1" }
insta-cmd = { version = "0.6.0" }
indicatif ={ version = "0.17.8"}
indoc ={ version = "2.0.4"}
insta = { version = "1.34.0", feature = ["filters", "glob"] }
insta-cmd = { version = "0.4.0" }
is-macro = { version = "0.3.5" }
is-wsl = { version = "0.4.0" }
itertools = { version = "0.13.0" }
js-sys = { version = "0.3.69" }
jod-thread = { version = "0.1.2" }
libc = { version = "0.2.153" }
itertools = { version = "0.12.1" }
js-sys = { version = "0.3.67" }
lalrpop-util = { version = "0.20.0", default-features = false }
lexical-parse-float = { version = "0.8.0", features = ["format"] }
libcst = { version = "1.1.0", default-features = false }
log = { version = "0.4.17" }
lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed",
] }
matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" }
mimalloc = { version ="0.1.39"}
natord = { version = "1.0.9" }
notify = { version = "6.1.1" }
once_cell = { version = "1.19.0" }
ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.6.0", features = ["serde"] }
pep440_rs = { version = "0.4.0", features = ["serde"] }
pretty_assertions = "1.3.0"
proc-macro2 = { version = "1.0.79" }
proc-macro2 = { version = "1.0.78" }
pyproject-toml = { version = "0.9.0" }
quick-junit = { version = "0.4.0" }
quick-junit = { version = "0.3.5" }
quote = { version = "1.0.23" }
rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
rayon = { version = "1.8.1" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "f608ff8b24f07706492027199f51132244034f29" }
result-like = { version = "0.5.0" }
rustc-hash = { version = "1.1.0" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }
serde-wasm-bindgen = { version = "0.6.4" }
seahash = { version ="4.1.0"}
semver = { version = "1.0.21" }
serde = { version = "1.0.196", features = ["derive"] }
serde-wasm-bindgen = { version = "0.6.3" }
serde_json = { version = "1.0.113" }
serde_test = { version = "1.0.152" }
serde_with = { version = "3.6.0", default-features = false, features = [
"macros",
] }
serde_with = { version = "3.6.0", default-features = false, features = ["macros"] }
shellexpand = { version = "3.0.0" }
shlex = { version ="1.3.0"}
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
smallvec = { version = "1.13.1" }
static_assertions = "1.1.0"
strum = { version = "0.26.0", features = ["strum_macros"] }
strum_macros = { version = "0.26.0" }
syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" }
strum = { version = "0.25.0", features = ["strum_macros"] }
strum_macros = { version = "0.25.3" }
syn = { version = "2.0.40" }
tempfile = { version ="3.9.0"}
test-case = { version = "3.3.1" }
thiserror = { version = "1.0.58" }
tikv-jemallocator = { version = "0.6.0" }
toml = { version = "0.8.11" }
thiserror = { version = "1.0.57" }
tikv-jemallocator = { version ="0.5.0"}
toml = { version = "0.8.9" }
tracing = { version = "0.1.40" }
tracing-flame = { version = "0.2.0" }
tracing-indicatif = { version = "0.3.6" }
tracing-subscriber = { version = "0.3.18", default-features = false, features = ["env-filter", "fmt"] }
tracing-tree = { version = "0.4.0" }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
typed-arena = { version = "2.0.2" }
unic-ucd-category = { version = "0.9" }
unic-ucd-category = { version ="0.9"}
unicode-ident = { version = "1.0.12" }
unicode-width = { version = "0.1.11" }
unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
ureq = { version = "2.9.6" }
unicode_names2 = { version = "1.2.1" }
ureq = { version = "2.9.1" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
"fast-rng",
"macro-diagnostics",
"js",
] }
uuid = { version = "1.6.1", features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
walkdir = { version = "2.3.2" }
wasm-bindgen = { version = "0.2.92" }
wasm-bindgen-test = { version = "0.3.42" }
wasm-bindgen = { version = "0.2.84" }
wasm-bindgen-test = { version = "0.3.40" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false }
[workspace.lints.rust]
unsafe_code = "warn"
unreachable_pub = "warn"
unexpected_cfgs = { level = "warn", check-cfg = ["cfg(fuzzing)", "cfg(codspeed)"] }
[workspace.lints.clippy]
pedantic = { level = "warn", priority = -2 }
@@ -167,7 +122,6 @@ char_lit_as_u8 = "allow"
collapsible_else_if = "allow"
collapsible_if = "allow"
implicit_hasher = "allow"
map_unwrap_or = "allow"
match_same_arms = "allow"
missing_errors_doc = "allow"
missing_panics_doc = "allow"
@@ -222,62 +176,3 @@ opt-level = 1
[profile.profiling]
inherits = "release"
debug = 1
# The profile that 'cargo dist' will build with.
[profile.dist]
inherits = "release"
# Config for 'cargo dist'
[workspace.metadata.dist]
# The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.18.0"
# CI backends to support
ci = ["github"]
# The installers to generate for each app
installers = ["shell", "powershell"]
# The archive format to use for windows builds (defaults .zip)
windows-archive = ".zip"
# The archive format to use for non-windows builds (defaults .tar.xz)
unix-archive = ".tar.gz"
# Target platforms to build apps for (Rust target-triple syntax)
targets = [
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"arm-unknown-linux-musleabihf",
"armv7-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabihf",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
]
# Whether to auto-include files like READMEs, LICENSEs, and CHANGELOGs (default true)
auto-includes = false
# Whether cargo-dist should create a GitHub Release or use an existing draft
create-release = true
# Publish jobs to run in CI
pr-run-mode = "skip"
# Whether CI should trigger releases with dispatches instead of tag pushes
dispatch-releases = true
# The stage during which the GitHub Release should be created
github-release = "announce"
# Whether CI should include auto-generated code to build local artifacts
build-local-artifacts = false
# Local artifacts jobs to run in CI
local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Announcement jobs to run in CI
post-announce-jobs = ["./notify-dependents", "./publish-docs", "./publish-playground"]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program
install-updater = false

25
LICENSE
View File

@@ -1371,28 +1371,3 @@ are:
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
- pydoclint, licensed as follows:
"""
MIT License
Copyright (c) 2023 jsh9
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""

View File

@@ -4,12 +4,11 @@
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
[![image](https://img.shields.io/pypi/v/ruff.svg)](https://pypi.python.org/pypi/ruff)
[![image](https://img.shields.io/pypi/l/ruff.svg)](https://github.com/astral-sh/ruff/blob/main/LICENSE)
[![image](https://img.shields.io/pypi/l/ruff.svg)](https://pypi.python.org/pypi/ruff)
[![image](https://img.shields.io/pypi/pyversions/ruff.svg)](https://pypi.python.org/pypi/ruff)
[![Actions status](https://github.com/astral-sh/ruff/workflows/CI/badge.svg)](https://github.com/astral-sh/ruff/actions)
[![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?logo=discord&logoColor=white)](https://discord.com/invite/astral-sh)
[**Docs**](https://docs.astral.sh/ruff/) | [**Playground**](https://play.ruff.rs/)
[**Discord**](https://discord.gg/c9MhzV8aU5) | [**Docs**](https://docs.astral.sh/ruff/) | [**Playground**](https://play.ruff.rs/)
An extremely fast Python linter and code formatter, written in Rust.
@@ -28,15 +27,15 @@ An extremely fast Python linter and code formatter, written in Rust.
- ⚡️ 10-100x faster than existing linters (like Flake8) and formatters (like Black)
- 🐍 Installable via `pip`
- 🛠️ `pyproject.toml` support
- 🤝 Python 3.13 compatibility
- ⚖️ Drop-in parity with [Flake8](https://docs.astral.sh/ruff/faq/#how-does-ruffs-linter-compare-to-flake8), isort, and [Black](https://docs.astral.sh/ruff/faq/#how-does-ruffs-formatter-compare-to-black)
- 🤝 Python 3.12 compatibility
- ⚖️ Drop-in parity with [Flake8](https://docs.astral.sh/ruff/faq/#how-does-ruff-compare-to-flake8), isort, and Black
- 📦 Built-in caching, to avoid re-analyzing unchanged files
- 🔧 Fix support, for automatic error correction (e.g., automatically remove unused imports)
- 📏 Over [800 built-in rules](https://docs.astral.sh/ruff/rules/), with native re-implementations
- 📏 Over [700 built-in rules](https://docs.astral.sh/ruff/rules/), with native re-implementations
of popular Flake8 plugins, like flake8-bugbear
- ⌨️ First-party [editor integrations](https://docs.astral.sh/ruff/integrations/) for
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://docs.astral.sh/ruff/editors/setup)
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://docs.astral.sh/ruff/configuration/#config-file-discovery)
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://docs.astral.sh/ruff/configuration/#pyprojecttoml-discovery)
Ruff aims to be orders of magnitude faster than alternative tools while integrating more
functionality behind a single, common interface.
@@ -50,7 +49,6 @@ times faster than any individual tool.
Ruff is extremely actively developed and used in major open-source projects like:
- [Apache Airflow](https://github.com/apache/airflow)
- [Apache Superset](https://github.com/apache/superset)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Hugging Face](https://github.com/huggingface/transformers)
- [Pandas](https://github.com/pandas-dev/pandas)
@@ -119,25 +117,7 @@ For more, see the [documentation](https://docs.astral.sh/ruff/).
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI:
```shell
# With pip.
pip install ruff
# With pipx.
pipx install ruff
```
Starting with version `0.5.0`, Ruff can be installed with our standalone installers:
```shell
# On macOS and Linux.
curl -LsSf https://astral.sh/ruff/install.sh | sh
# On Windows.
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.6.2/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.2/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -148,7 +128,7 @@ and with [a variety of other package managers](https://docs.astral.sh/ruff/insta
To run Ruff as a linter, try any of the following:
```shell
ruff check # Lint all files in the current directory (and any subdirectories).
ruff check . # Lint all files in the current directory (and any subdirectories).
ruff check path/to/code/ # Lint all files in `/path/to/code` (and any subdirectories).
ruff check path/to/code/*.py # Lint all `.py` files in `/path/to/code`.
ruff check path/to/code/to/file.py # Lint `file.py`.
@@ -158,7 +138,7 @@ ruff check @arguments.txt # Lint using an input file, treating its con
Or, to run Ruff as a formatter:
```shell
ruff format # Format all files in the current directory (and any subdirectories).
ruff format . # Format all files in the current directory (and any subdirectories).
ruff format path/to/code/ # Format all files in `/path/to/code` (and any subdirectories).
ruff format path/to/code/*.py # Format all `.py` files in `/path/to/code`.
ruff format path/to/code/to/file.py # Format `file.py`.
@@ -170,7 +150,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.2
rev: v0.2.1
hooks:
# Run the linter.
- id: ruff
@@ -179,7 +159,8 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
- id: ruff-format
```
Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or with [various other editors](https://docs.astral.sh/ruff/editors/setup).
Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or
alongside any other editor through the [Ruff LSP](https://github.com/astral-sh/ruff-lsp).
Ruff can also be used as a [GitHub Action](https://github.com/features/actions) via
[`ruff-action`](https://github.com/chartboost/ruff-action):
@@ -191,7 +172,7 @@ jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- uses: chartboost/ruff-action@v1
```
@@ -201,9 +182,10 @@ Ruff can be configured through a `pyproject.toml`, `ruff.toml`, or `.ruff.toml`
[_Configuration_](https://docs.astral.sh/ruff/configuration/), or [_Settings_](https://docs.astral.sh/ruff/settings/)
for a complete list of all configuration options).
If left unspecified, Ruff's default configuration is equivalent to the following `ruff.toml` file:
If left unspecified, Ruff's default configuration is equivalent to:
```toml
[tool.ruff]
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
@@ -241,7 +223,7 @@ indent-width = 4
# Assume Python 3.8
target-version = "py38"
[lint]
[tool.ruff.lint]
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E4", "E7", "E9", "F"]
ignore = []
@@ -253,7 +235,7 @@ unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
[format]
[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"
@@ -267,27 +249,13 @@ skip-magic-trailing-comma = false
line-ending = "auto"
```
Note that, in a `pyproject.toml`, each section header should be prefixed with `tool.ruff`. For
example, `[lint]` should be replaced with `[tool.ruff.lint]`.
Some configuration options can be provided via dedicated command-line arguments, such as those
related to rule enablement and disablement, file discovery, and logging level:
Some configuration options can be provided via the command-line, such as those related to
rule enablement and disablement, file discovery, and logging level:
```shell
ruff check --select F401 --select F403 --quiet
ruff check path/to/code/ --select F401 --select F403 --quiet
```
The remaining configuration options can be provided through a catch-all `--config` argument:
```shell
ruff check --config "lint.per-file-ignores = {'some_file.py' = ['F841']}"
```
To opt in to the latest lint rules, formatter style changes, interface updates, and more, enable
[preview mode](https://docs.astral.sh/ruff/rules/) by setting `preview = true` in your configuration
file or passing `--preview` on the command line. Preview mode enables a collection of unstable
features that may change prior to stabilization.
See `ruff help` for more on Ruff's top-level commands, or `ruff help check` and `ruff help format`
for more on the linting and formatting commands, respectively.
@@ -295,7 +263,7 @@ for more on the linting and formatting commands, respectively.
<!-- Begin section: Rules -->
**Ruff supports over 800 lint rules**, many of which are inspired by popular tools like Flake8,
**Ruff supports over 700 lint rules**, many of which are inspired by popular tools like Flake8,
isort, pyupgrade, and others. Regardless of the rule's origin, Ruff re-implements every rule in
Rust as a first-party feature.
@@ -351,6 +319,7 @@ quality tools, including:
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-trio](https://pypi.org/project/flake8-trio/)
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/astral-sh/ruff/issues/2102))
@@ -372,14 +341,14 @@ For a complete enumeration of the supported rules, see [_Rules_](https://docs.as
Contributions are welcome and highly appreciated. To get started, check out the
[**contributing guidelines**](https://docs.astral.sh/ruff/contributing/).
You can also join us on [**Discord**](https://discord.com/invite/astral-sh).
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5).
## Support
Having trouble? Check out the existing issues on [**GitHub**](https://github.com/astral-sh/ruff/issues),
or feel free to [**open a new one**](https://github.com/astral-sh/ruff/issues/new).
You can also ask for help on [**Discord**](https://discord.com/invite/astral-sh).
You can also ask for help on [**Discord**](https://discord.gg/c9MhzV8aU5).
## Acknowledgements
@@ -409,7 +378,6 @@ Ruff is released under the MIT license.
Ruff is used by a number of major open-source projects and companies, including:
- [Albumentations](https://github.com/albumentations-team/albumentations)
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
- Anthropic ([Python SDK](https://github.com/anthropics/anthropic-sdk-python))
- [Apache Airflow](https://github.com/apache/airflow)
@@ -423,9 +391,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Dagger](https://github.com/dagger/dagger)
- [Dagster](https://github.com/dagster-io/dagster)
- Databricks ([MLflow](https://github.com/mlflow/mlflow))
- [Dify](https://github.com/langgenius/dify)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Godot](https://github.com/godotengine/godot)
- [Gradio](https://github.com/gradio-app/gradio)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- [HTTPX](https://github.com/encode/httpx)
@@ -434,12 +400,10 @@ Ruff is used by a number of major open-source projects and companies, including:
- Hugging Face ([Transformers](https://github.com/huggingface/transformers),
[Datasets](https://github.com/huggingface/datasets),
[Diffusers](https://github.com/huggingface/diffusers))
- IBM ([Qiskit](https://github.com/Qiskit/qiskit))
- ING Bank ([popmon](https://github.com/ing-bank/popmon), [probatus](https://github.com/ing-bank/probatus))
- [Ibis](https://github.com/ibis-project/ibis)
- [ivy](https://github.com/unifyai/ivy)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [Kraken Tech](https://kraken.tech/)
- [LangChain](https://github.com/hwchase17/langchain)
- [Litestar](https://litestar.dev/)
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
@@ -449,18 +413,15 @@ Ruff is used by a number of major open-source projects and companies, including:
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel),
[ONNX Runtime](https://github.com/microsoft/onnxruntime),
[LightGBM](https://github.com/microsoft/LightGBM))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [Mypy](https://github.com/python/mypy)
- [Nautobot](https://github.com/nautobot/nautobot)
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [Nokia](https://nokia.com/)
- [NoneBot](https://github.com/nonebot/nonebot2)
- [NumPyro](https://github.com/pyro-ppl/numpyro)
- [ONNX](https://github.com/onnx/onnx)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [Open Wine Components](https://github.com/Open-Wine-Components/umu-launcher)
- [PDM](https://github.com/pdm-project/pdm)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [Pandas](https://github.com/pandas-dev/pandas)
@@ -488,7 +449,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [Starlette](https://github.com/encode/starlette)
- [Streamlit](https://github.com/streamlit/streamlit)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Vega-Altair](https://github.com/altair-viz/altair)
- WordPress ([Openverse](https://github.com/WordPress/openverse))
@@ -526,7 +486,7 @@ If you're using Ruff, consider adding the Ruff badge to your project's `README.m
## License
This repository is licensed under the [MIT License](https://github.com/astral-sh/ruff/blob/main/LICENSE)
MIT
<div align="center">
<a target="_blank" href="https://astral.sh" style="background:none">

View File

@@ -1,21 +1,11 @@
[files]
# https://github.com/crate-ci/typos/issues/868
extend-exclude = ["crates/red_knot_python_semantic/vendor/**/*", "**/resources/**/*", "**/snapshots/**/*"]
extend-exclude = ["**/resources/**/*", "**/snapshots/**/*"]
[default.extend-words]
"arange" = "arange" # e.g. `numpy.arange`
hel = "hel"
whos = "whos"
spawnve = "spawnve"
ned = "ned"
pn = "pn" # `import panel as pd` is a thing
poit = "poit"
BA = "BA" # acronym for "Bad Allowed", used in testing.
jod = "jod" # e.g., `jod-thread`
[default]
extend-ignore-re = [
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
]

View File

@@ -1,21 +1,7 @@
doc-valid-idents = [
"..",
"CodeQL",
"FastAPI",
"IPython",
"LangChain",
"LibCST",
"McCabe",
"NumPy",
"SCREAMING_SNAKE_CASE",
"SQLAlchemy",
"StackOverflow",
"PyCharm",
]
ignore-interior-mutability = [
# Interned is read-only. The wrapped `Rc` never gets updated.
"ruff_formatter::format_element::Interned",
# The expression is read-only.
"ruff_python_ast::hashable::HashableExpr",
"CodeQL",
"IPython",
"NumPy",
"..",
]

View File

@@ -1,40 +0,0 @@
[package]
name = "red_knot"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_workspace = { workspace = true }
red_knot_server = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
anyhow = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["wrap_help"] }
colored = { workspace = true }
countme = { workspace = true, features = ["enable"] }
crossbeam = { workspace = true }
ctrlc = { version = "3.4.4" }
rayon = { workspace = true }
salsa = { workspace = true }
tracing = { workspace = true, features = ["release_max_level_debug"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
tracing-flame = { workspace = true }
tracing-tree = { workspace = true }
[dev-dependencies]
filetime = { workspace = true }
tempfile = { workspace = true }
[lints]
workspace = true

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -1,123 +0,0 @@
# Tracing
Traces are a useful tool to narrow down the location of a bug or, at least, to understand why the compiler is doing a particular thing.
Note, tracing messages with severity `debug` or greater are user-facing. They should be phrased accordingly.
Tracing spans are only shown when using `-vvv`.
## Verbosity levels
The CLI supports different verbosity levels.
- default: Only show errors and warnings.
- `-v` activates `info!`: Show generally useful information such as paths of configuration files, detected platform, etc., but it's not a lot of messages, it's something you'll activate in CI by default. cargo build e.g. shows you which packages are fresh.
- `-vv` activates `debug!` and timestamps: This should be enough information to get to the bottom of bug reports. When you're processing many packages or files, you'll get pages and pages of output, but each line is link to a specific action or state change.
- `-vvv` activates `trace!` (only in debug builds) and shows tracing-spans: At this level, you're logging everything. Most of this is wasted, it's really slow, we dump e.g. the entire resolution graph. Only useful to developers, and you almost certainly want to use `RED_KNOT_LOG` to filter it down to the area your investigating.
## `RED_KNOT_LOG`
By default, the CLI shows messages from the `ruff` and `red_knot` crates. Tracing messages from other crates are not shown.
The `RED_KNOT_LOG` environment variable allows you to customize which messages are shown by specifying one
or more [filter directives](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives).
### Examples
#### Show all debug messages
Shows debug messages from all crates.
```bash
RED_KNOT_LOG=debug
```
#### Show salsa query execution messages
Show the salsa `execute: my_query` messages in addition to all red knot messages.
```bash
RED_KNOT_LOG=ruff=trace,red_knot=trace,salsa=info
```
#### Show typing traces
Only show traces for the `red_knot_python_semantic::types` module.
```bash
RED_KNOT_LOG="red_knot_python_semantic::types"
```
Note: Ensure that you use `-vvv` to see tracing spans.
#### Show messages for a single file
Shows all messages that are inside of a span for a specific file.
```bash
RED_KNOT_LOG=red_knot[{file=/home/micha/astral/test/x.py}]=trace
```
**Note**: Tracing still shows all spans because tracing can't know at the time of entering the span
whether one if its children has the file `x.py`.
**Note**: Salsa currently logs the entire memoized values. In our case, the source text and parsed AST.
This very quickly leads to extremely long outputs.
## Tracing and Salsa
Be mindful about using `tracing` in Salsa queries, especially when using `warn` or `error` because it isn't guaranteed
that the query will execute after restoring from a persistent cache. In which case the user won't see the message.
For example, don't use `tracing` to show the user a message when generating a lint violation failed
because the message would only be shown when linting the file the first time, but not on subsequent analysis
runs or when restoring from a persistent cache. This can be confusing for users because they
don't understand why a specific lint violation isn't raised. Instead, change your
query to return the failure as part of the query's result or use a Salsa accumulator.
## Tracing in tests
You can use `ruff_db::testing::setup_logging` or `ruff_db::testing::setup_logging_with_filter` to set up logging in tests.
```rust
use ruff_db::testing::setup_logging;
#[test]
fn test() {
let _logging = setup_logging();
tracing::info!("This message will be printed to stderr");
}
```
Note: Most test runners capture stderr and only show its output when a test fails.
Note also that `setup_logging` only sets up logging for the current thread because [`set_global_default`](https://docs.rs/tracing/latest/tracing/subscriber/fn.set_global_default.html) can only be
called **once**.
## Release builds
`trace!` events are removed in release builds.
## Profiling
Red Knot generates a folded stack trace to the current directory named `tracing.folded` when setting the environment variable `RED_KNOT_LOG_PROFILE` to `1` or `true`.
```bash
RED_KNOT_LOG_PROFILE=1 red_knot -- --current-directory=../test -vvv
```
You can convert the textual representation into a visual one using `inferno`.
```shell
cargo install inferno
```
```shell
# flamegraph
cat tracing.folded | inferno-flamegraph > tracing-flamegraph.svg
# flamechart
cat tracing.folded | inferno-flamegraph --flamechart > tracing-flamechart.svg
```
![Example flamegraph](./tracing-flamegraph.png)
See [`tracing-flame`](https://crates.io/crates/tracing-flame) for more details.

View File

@@ -1,254 +0,0 @@
//! Sets up logging for Red Knot
use anyhow::Context;
use colored::Colorize;
use std::fmt;
use std::fs::File;
use std::io::BufWriter;
use tracing::{Event, Subscriber};
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields};
use tracing_subscriber::registry::LookupSpan;
use tracing_subscriber::EnvFilter;
/// Logging flags to `#[command(flatten)]` into your CLI
#[derive(clap::Args, Debug, Clone, Default)]
#[command(about = None, long_about = None)]
pub(crate) struct Verbosity {
#[arg(
long,
short = 'v',
help = "Use verbose output (or `-vv` and `-vvv` for more verbose output)",
action = clap::ArgAction::Count,
global = true,
)]
verbose: u8,
}
impl Verbosity {
/// Returns the verbosity level based on the number of `-v` flags.
///
/// Returns `None` if the user did not specify any verbosity flags.
pub(crate) fn level(&self) -> VerbosityLevel {
match self.verbose {
0 => VerbosityLevel::Default,
1 => VerbosityLevel::Verbose,
2 => VerbosityLevel::ExtraVerbose,
_ => VerbosityLevel::Trace,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub(crate) enum VerbosityLevel {
/// Default output level. Only shows Ruff and Red Knot events up to the [`WARN`](tracing::Level::WARN).
Default,
/// Enables verbose output. Emits Ruff and Red Knot events up to the [`INFO`](tracing::Level::INFO).
/// Corresponds to `-v`.
Verbose,
/// Enables a more verbose tracing format and emits Ruff and Red Knot events up to [`DEBUG`](tracing::Level::DEBUG).
/// Corresponds to `-vv`
ExtraVerbose,
/// Enables all tracing events and uses a tree-like output format. Corresponds to `-vvv`.
Trace,
}
impl VerbosityLevel {
const fn level_filter(self) -> LevelFilter {
match self {
VerbosityLevel::Default => LevelFilter::WARN,
VerbosityLevel::Verbose => LevelFilter::INFO,
VerbosityLevel::ExtraVerbose => LevelFilter::DEBUG,
VerbosityLevel::Trace => LevelFilter::TRACE,
}
}
pub(crate) const fn is_trace(self) -> bool {
matches!(self, VerbosityLevel::Trace)
}
pub(crate) const fn is_extra_verbose(self) -> bool {
matches!(self, VerbosityLevel::ExtraVerbose)
}
}
pub(crate) fn setup_tracing(level: VerbosityLevel) -> anyhow::Result<TracingGuard> {
use tracing_subscriber::prelude::*;
// The `RED_KNOT_LOG` environment variable overrides the default log level.
let filter = if let Ok(log_env_variable) = std::env::var("RED_KNOT_LOG") {
EnvFilter::builder()
.parse(log_env_variable)
.context("Failed to parse directives specified in RED_KNOT_LOG environment variable.")?
} else {
match level {
VerbosityLevel::Default => {
// Show warning traces
EnvFilter::default().add_directive(LevelFilter::WARN.into())
}
level => {
let level_filter = level.level_filter();
// Show info|debug|trace events, but allow `RED_KNOT_LOG` to override
let filter = EnvFilter::default().add_directive(
format!("red_knot={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
);
filter.add_directive(
format!("ruff={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
)
}
}
};
let (profiling_layer, guard) = setup_profile();
let registry = tracing_subscriber::registry()
.with(filter)
.with(profiling_layer);
if level.is_trace() {
let subscriber = registry.with(
tracing_tree::HierarchicalLayer::default()
.with_indent_lines(true)
.with_indent_amount(2)
.with_bracketed_fields(true)
.with_thread_ids(true)
.with_targets(true)
.with_writer(std::io::stderr)
.with_timer(tracing_tree::time::Uptime::default()),
);
subscriber.init();
} else {
let subscriber = registry.with(
tracing_subscriber::fmt::layer()
.event_format(RedKnotFormat {
display_level: true,
display_timestamp: level.is_extra_verbose(),
show_spans: false,
})
.with_writer(std::io::stderr),
);
subscriber.init();
}
Ok(TracingGuard {
_flame_guard: guard,
})
}
#[allow(clippy::type_complexity)]
fn setup_profile<S>() -> (
Option<tracing_flame::FlameLayer<S, BufWriter<File>>>,
Option<tracing_flame::FlushGuard<BufWriter<File>>>,
)
where
S: Subscriber + for<'span> LookupSpan<'span>,
{
if let Ok("1" | "true") = std::env::var("RED_KNOT_LOG_PROFILE").as_deref() {
let (layer, guard) = tracing_flame::FlameLayer::with_file("tracing.folded")
.expect("Flame layer to be created");
(Some(layer), Some(guard))
} else {
(None, None)
}
}
pub(crate) struct TracingGuard {
_flame_guard: Option<tracing_flame::FlushGuard<BufWriter<File>>>,
}
struct RedKnotFormat {
display_timestamp: bool,
display_level: bool,
show_spans: bool,
}
/// See <https://docs.rs/tracing-subscriber/0.3.18/src/tracing_subscriber/fmt/format/mod.rs.html#1026-1156>
impl<S, N> FormatEvent<S, N> for RedKnotFormat
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
let ansi = writer.has_ansi_escapes();
if self.display_timestamp {
let timestamp = chrono::Local::now()
.format("%Y-%m-%d %H:%M:%S.%f")
.to_string();
if ansi {
write!(writer, "{} ", timestamp.dimmed())?;
} else {
write!(
writer,
"{} ",
chrono::Local::now().format("%Y-%m-%d %H:%M:%S.%f")
)?;
}
}
if self.display_level {
let level = meta.level();
// Same colors as tracing
if ansi {
let formatted_level = level.to_string();
match *level {
tracing::Level::TRACE => {
write!(writer, "{} ", formatted_level.purple().bold())?;
}
tracing::Level::DEBUG => write!(writer, "{} ", formatted_level.blue().bold())?,
tracing::Level::INFO => write!(writer, "{} ", formatted_level.green().bold())?,
tracing::Level::WARN => write!(writer, "{} ", formatted_level.yellow().bold())?,
tracing::Level::ERROR => write!(writer, "{} ", level.to_string().red().bold())?,
}
} else {
write!(writer, "{level} ")?;
}
}
if self.show_spans {
let span = event.parent();
let mut seen = false;
let span = span
.and_then(|id| ctx.span(id))
.or_else(|| ctx.lookup_current());
let scope = span.into_iter().flat_map(|span| span.scope().from_root());
for span in scope {
seen = true;
if ansi {
write!(writer, "{}:", span.metadata().name().bold())?;
} else {
write!(writer, "{}:", span.metadata().name())?;
}
}
if seen {
writer.write_char(' ')?;
}
}
ctx.field_format().format_fields(writer.by_ref(), event)?;
writeln!(writer)
}
}

View File

@@ -1,384 +0,0 @@
use std::process::{ExitCode, Termination};
use std::sync::Mutex;
use anyhow::{anyhow, Context};
use clap::Parser;
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use salsa::plumbing::ZalsaDatabase;
use red_knot_python_semantic::SitePackages;
use red_knot_server::run_server;
use red_knot_workspace::db::RootDatabase;
use red_knot_workspace::watch;
use red_knot_workspace::watch::WorkspaceWatcher;
use red_knot_workspace::workspace::settings::Configuration;
use red_knot_workspace::workspace::WorkspaceMetadata;
use ruff_db::system::{OsSystem, System, SystemPath, SystemPathBuf};
use target_version::TargetVersion;
use crate::logging::{setup_tracing, Verbosity};
mod logging;
mod target_version;
mod verbosity;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
struct Args {
#[command(subcommand)]
pub(crate) command: Option<Command>,
#[arg(
long,
help = "Changes the current working directory.",
long_help = "Changes the current working directory before any specified operations. This affects the workspace and configuration discovery.",
value_name = "PATH"
)]
current_directory: Option<SystemPathBuf>,
#[arg(
long,
help = "Path to the virtual environment the project uses",
long_help = "\
Path to the virtual environment the project uses. \
If provided, red-knot will use the `site-packages` directory of this virtual environment \
to resolve type information for the project's third-party dependencies.",
value_name = "PATH"
)]
venv_path: Option<SystemPathBuf>,
#[arg(
long,
value_name = "DIRECTORY",
help = "Custom directory to use for stdlib typeshed stubs"
)]
custom_typeshed_dir: Option<SystemPathBuf>,
#[arg(
long,
value_name = "PATH",
help = "Additional path to use as a module-resolution source (can be passed multiple times)"
)]
extra_search_path: Option<Vec<SystemPathBuf>>,
#[arg(
long,
help = "Python version to assume when resolving types",
value_name = "VERSION"
)]
target_version: Option<TargetVersion>,
#[clap(flatten)]
verbosity: Verbosity,
#[arg(
long,
help = "Run in watch mode by re-running whenever files change",
short = 'W'
)]
watch: bool,
}
impl Args {
fn to_configuration(&self, cli_cwd: &SystemPath) -> Configuration {
let mut configuration = Configuration::default();
if let Some(target_version) = self.target_version {
configuration.target_version = Some(target_version.into());
}
if let Some(venv_path) = &self.venv_path {
configuration.search_paths.site_packages = Some(SitePackages::Derived {
venv_path: SystemPath::absolute(venv_path, cli_cwd),
});
}
if let Some(custom_typeshed_dir) = &self.custom_typeshed_dir {
configuration.search_paths.custom_typeshed =
Some(SystemPath::absolute(custom_typeshed_dir, cli_cwd));
}
if let Some(extra_search_paths) = &self.extra_search_path {
configuration.search_paths.extra_paths = extra_search_paths
.iter()
.map(|path| Some(SystemPath::absolute(path, cli_cwd)))
.collect();
}
configuration
}
}
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Start the language server
Server,
}
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
pub fn main() -> ExitStatus {
run().unwrap_or_else(|error| {
use std::io::Write;
// Use `writeln` instead of `eprintln` to avoid panicking when the stderr pipe is broken.
let mut stderr = std::io::stderr().lock();
// This communicates that this isn't a linter error but Red Knot itself hard-errored for
// some reason (e.g. failed to resolve the configuration)
writeln!(stderr, "{}", "Red Knot failed".red().bold()).ok();
// Currently we generally only see one error, but e.g. with io errors when resolving
// the configuration it is help to chain errors ("resolving configuration failed" ->
// "failed to read file: subdir/pyproject.toml")
for cause in error.chain() {
writeln!(stderr, " {} {cause}", "Cause:".bold()).ok();
}
ExitStatus::Error
})
}
fn run() -> anyhow::Result<ExitStatus> {
let args = Args::parse_from(std::env::args().collect::<Vec<_>>());
if matches!(args.command, Some(Command::Server)) {
return run_server().map(|()| ExitStatus::Success);
}
let verbosity = args.verbosity.level();
countme::enable(verbosity.is_trace());
let _guard = setup_tracing(verbosity)?;
// The base path to which all CLI arguments are relative to.
let cli_base_path = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd)
.map_err(|path| {
anyhow!(
"The current working directory '{}' contains non-unicode characters. Red Knot only supports unicode paths.",
path.display()
)
})?
};
let cwd = args
.current_directory
.as_ref()
.map(|cwd| {
if cwd.as_std_path().is_dir() {
Ok(SystemPath::absolute(cwd, &cli_base_path))
} else {
Err(anyhow!(
"Provided current-directory path '{cwd}' is not a directory."
))
}
})
.transpose()?
.unwrap_or_else(|| cli_base_path.clone());
let system = OsSystem::new(cwd.clone());
let cli_configuration = args.to_configuration(&cwd);
let workspace_metadata = WorkspaceMetadata::from_path(
system.current_directory(),
&system,
Some(cli_configuration.clone()),
)?;
// TODO: Use the `program_settings` to compute the key for the database's persistent
// cache and load the cache if it exists.
let mut db = RootDatabase::new(workspace_metadata, system)?;
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_configuration);
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
ctrlc::set_handler(move || {
let mut lock = main_loop_cancellation_token.lock().unwrap();
if let Some(token) = lock.take() {
token.stop();
}
})?;
let exit_status = if args.watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)
};
tracing::trace!("Counts for entire CLI run:\n{}", countme::get_all());
std::mem::forget(db);
Ok(exit_status)
}
#[derive(Copy, Clone)]
pub enum ExitStatus {
/// Checking was successful and there were no errors.
Success = 0,
/// Checking was successful but there were errors.
Failure = 1,
/// Checking failed.
Error = 2,
}
impl Termination for ExitStatus {
fn report(self) -> ExitCode {
ExitCode::from(self as u8)
}
}
struct MainLoop {
/// Sender that can be used to send messages to the main loop.
sender: crossbeam_channel::Sender<MainLoopMessage>,
/// Receiver for the messages sent **to** the main loop.
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
/// The file system watcher, if running in watch mode.
watcher: Option<WorkspaceWatcher>,
cli_configuration: Configuration,
}
impl MainLoop {
fn new(cli_configuration: Configuration) -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
(
Self {
sender: sender.clone(),
receiver,
watcher: None,
cli_configuration,
},
MainLoopCancellationToken { sender },
)
}
fn watch(mut self, db: &mut RootDatabase) -> anyhow::Result<ExitStatus> {
tracing::debug!("Starting watch mode");
let sender = self.sender.clone();
let watcher = watch::directory_watcher(move |event| {
sender.send(MainLoopMessage::ApplyChanges(event)).unwrap();
})?;
self.watcher = Some(WorkspaceWatcher::new(watcher, db));
self.run(db);
Ok(ExitStatus::Success)
}
fn run(mut self, db: &mut RootDatabase) -> ExitStatus {
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
let result = self.main_loop(db);
tracing::debug!("Exiting main loop");
result
}
fn main_loop(&mut self, db: &mut RootDatabase) -> ExitStatus {
// Schedule the first check.
tracing::debug!("Starting main loop");
let mut revision = 0u64;
while let Ok(message) = self.receiver.recv() {
match message {
MainLoopMessage::CheckWorkspace => {
let db = db.snapshot();
let sender = self.sender.clone();
// Spawn a new task that checks the workspace. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
if let Ok(result) = db.check() {
// Send the result back to the main loop for printing.
sender
.send(MainLoopMessage::CheckCompleted { result, revision })
.unwrap();
}
});
}
MainLoopMessage::CheckCompleted {
result,
revision: check_revision,
} => {
let has_diagnostics = !result.is_empty();
if check_revision == revision {
for diagnostic in result {
tracing::error!("{}", diagnostic);
}
} else {
tracing::debug!(
"Discarding check result for outdated revision: current: {revision}, result revision: {check_revision}"
);
}
if self.watcher.is_none() {
return if has_diagnostics {
ExitStatus::Failure
} else {
ExitStatus::Success
};
}
tracing::trace!("Counts after last check:\n{}", countme::get_all());
}
MainLoopMessage::ApplyChanges(changes) => {
revision += 1;
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes, Some(&self.cli_configuration));
if let Some(watcher) = self.watcher.as_mut() {
watcher.update(db);
}
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
}
MainLoopMessage::Exit => {
// Cancel any pending queries and wait for them to complete.
// TODO: Don't use Salsa internal APIs
// [Zulip-Thread](https://salsa.zulipchat.com/#narrow/stream/333573-salsa-3.2E0/topic/Expose.20an.20API.20to.20cancel.20other.20queries)
let _ = db.zalsa_mut();
return ExitStatus::Success;
}
}
tracing::debug!("Waiting for next main loop message.");
}
ExitStatus::Success
}
}
#[derive(Debug)]
struct MainLoopCancellationToken {
sender: crossbeam_channel::Sender<MainLoopMessage>,
}
impl MainLoopCancellationToken {
fn stop(self) {
self.sender.send(MainLoopMessage::Exit).unwrap();
}
}
/// Message sent from the orchestrator to the main loop.
#[derive(Debug)]
enum MainLoopMessage {
CheckWorkspace,
CheckCompleted { result: Vec<String>, revision: u64 },
ApplyChanges(Vec<watch::ChangeEvent>),
Exit,
}

View File

@@ -1,48 +0,0 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum TargetVersion {
Py37,
#[default]
Py38,
Py39,
Py310,
Py311,
Py312,
Py313,
}
impl TargetVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "py37",
Self::Py38 => "py38",
Self::Py39 => "py39",
Self::Py310 => "py310",
Self::Py311 => "py311",
Self::Py312 => "py312",
Self::Py313 => "py313",
}
}
}
impl std::fmt::Display for TargetVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<TargetVersion> for red_knot_python_semantic::PythonVersion {
fn from(value: TargetVersion) -> Self {
match value {
TargetVersion::Py37 => Self::PY37,
TargetVersion::Py38 => Self::PY38,
TargetVersion::Py39 => Self::PY39,
TargetVersion::Py310 => Self::PY310,
TargetVersion::Py311 => Self::PY311,
TargetVersion::Py312 => Self::PY312,
TargetVersion::Py313 => Self::PY313,
}
}
}

View File

@@ -1 +0,0 @@

File diff suppressed because it is too large Load Diff

View File

@@ -1,53 +0,0 @@
[package]
name = "red_knot_python_semantic"
version = "0.0.0"
publish = false
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff_db = { workspace = true }
ruff_index = { workspace = true }
ruff_python_ast = { workspace = true }
ruff_python_stdlib = { workspace = true }
ruff_source_file = { workspace = true }
ruff_text_size = { workspace = true }
anyhow = { workspace = true }
bitflags = { workspace = true }
camino = { workspace = true }
compact_str = { workspace = true }
countme = { workspace = true }
once_cell = { workspace = true }
ordermap = { workspace = true }
salsa = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
rustc-hash = { workspace = true }
hashbrown = { workspace = true }
smallvec = { workspace = true }
static_assertions = { workspace = true }
[build-dependencies]
path-slash = { workspace = true }
walkdir = { workspace = true }
zip = { workspace = true, features = ["zstd", "deflate"] }
[dev-dependencies]
ruff_db = { workspace = true, features = ["os", "testing"] }
ruff_python_parser = { workspace = true }
anyhow = { workspace = true }
insta = { workspace = true }
tempfile = { workspace = true }
walkdir = { workspace = true }
zip = { workspace = true }
[lints]
workspace = true

View File

@@ -1,9 +0,0 @@
# Red Knot
Semantic analysis for the red-knot project.
## Vendored types for the stdlib
This crate vendors [typeshed](https://github.com/python/typeshed)'s stubs for the standard library. The vendored stubs can be found in `crates/red_knot_python_semantic/vendor/typeshed`. The file `crates/red_knot_python_semantic/vendor/typeshed/source_commit.txt` tells you the typeshed commit that our vendored stdlib stubs currently correspond to.
The typeshed stubs are updated every two weeks via an automated PR using the `sync_typeshed.yaml` workflow in the `.github/workflows` directory. This workflow can also be triggered at any time via [workflow dispatch](https://docs.github.com/en/actions/using-workflows/manually-running-a-workflow#running-a-workflow).

View File

@@ -1,87 +0,0 @@
//! Build script to package our vendored typeshed files
//! into a zip archive that can be included in the Ruff binary.
//!
//! This script should be automatically run at build time
//! whenever the script itself changes, or whenever any files
//! in `crates/red_knot_python_semantic/vendor/typeshed` change.
use std::fs::File;
use std::path::Path;
use path_slash::PathExt;
use zip::result::ZipResult;
use zip::write::{FileOptions, ZipWriter};
use zip::CompressionMethod;
const TYPESHED_SOURCE_DIR: &str = "vendor/typeshed";
const TYPESHED_ZIP_LOCATION: &str = "/zipped_typeshed.zip";
/// Recursively zip the contents of an entire directory.
///
/// This routine is adapted from a recipe at
/// <https://github.com/zip-rs/zip-old/blob/5d0f198124946b7be4e5969719a7f29f363118cd/examples/write_dir.rs>
fn zip_dir(directory_path: &str, writer: File) -> ZipResult<File> {
let mut zip = ZipWriter::new(writer);
// Use deflated compression for WASM builds because compiling `zstd-sys` requires clang
// [source](https://github.com/gyscos/zstd-rs/wiki/Compile-for-WASM) which complicates the build
// by a lot. Deflated compression is slower but it shouldn't matter much for the WASM use case
// (WASM itself is already slower than a native build for a specific platform).
// We can't use `#[cfg(...)]` here because the target-arch in a build script is the
// architecture of the system running the build script and not the architecture of the build-target.
// That's why we use the `TARGET` environment variable here.
let method = if std::env::var("TARGET").unwrap().contains("wasm32") {
CompressionMethod::Deflated
} else {
CompressionMethod::Zstd
};
let options = FileOptions::default()
.compression_method(method)
.unix_permissions(0o644);
for entry in walkdir::WalkDir::new(directory_path) {
let dir_entry = entry.unwrap();
let absolute_path = dir_entry.path();
let normalized_relative_path = absolute_path
.strip_prefix(Path::new(directory_path))
.unwrap()
.to_slash()
.expect("Unexpected non-utf8 typeshed path!");
// Write file or directory explicitly
// Some unzip tools unzip files with directory paths correctly, some do not!
if absolute_path.is_file() {
println!("adding file {absolute_path:?} as {normalized_relative_path:?} ...");
zip.start_file(normalized_relative_path, options)?;
let mut f = File::open(absolute_path)?;
std::io::copy(&mut f, &mut zip).unwrap();
} else if !normalized_relative_path.is_empty() {
// Only if not root! Avoids path spec / warning
// and mapname conversion failed error on unzip
println!("adding dir {absolute_path:?} as {normalized_relative_path:?} ...");
zip.add_directory(normalized_relative_path, options)?;
}
}
zip.finish()
}
fn main() {
println!("cargo:rerun-if-changed={TYPESHED_SOURCE_DIR}");
assert!(
Path::new(TYPESHED_SOURCE_DIR).is_dir(),
"Where is typeshed?"
);
let out_dir = std::env::var("OUT_DIR").unwrap();
// N.B. Deliberately using `format!()` instead of `Path::join()` here,
// so that we use `/` as a path separator on all platforms.
// That enables us to load the typeshed zip at compile time in `module.rs`
// (otherwise we'd have to dynamically determine the exact path to the typeshed zip
// based on the default path separator for the specific platform we're on,
// which can't be done at compile time.)
let zipped_typeshed_location = format!("{out_dir}{TYPESHED_ZIP_LOCATION}");
let zipped_typeshed = File::create(zipped_typeshed_location).unwrap();
zip_dir(TYPESHED_SOURCE_DIR, zipped_typeshed).unwrap();
}

View File

@@ -1,163 +0,0 @@
use std::hash::Hash;
use std::ops::Deref;
use ruff_db::parsed::ParsedModule;
/// Ref-counted owned reference to an AST node.
///
/// The type holds an owned reference to the node's ref-counted [`ParsedModule`].
/// Holding on to the node's [`ParsedModule`] guarantees that the reference to the
/// node must still be valid.
///
/// Holding on to any [`AstNodeRef`] prevents the [`ParsedModule`] from being released.
///
/// ## Equality
/// Two `AstNodeRef` are considered equal if their wrapped nodes are equal.
#[derive(Clone)]
pub struct AstNodeRef<T> {
/// Owned reference to the node's [`ParsedModule`].
///
/// The node's reference is guaranteed to remain valid as long as it's enclosing
/// [`ParsedModule`] is alive.
_parsed: ParsedModule,
/// Pointer to the referenced node.
node: std::ptr::NonNull<T>,
}
#[allow(unsafe_code)]
impl<T> AstNodeRef<T> {
/// Creates a new `AstNodeRef` that reference `node`. The `parsed` is the [`ParsedModule`] to
/// which the `AstNodeRef` belongs.
///
/// ## Safety
/// Dereferencing the `node` can result in undefined behavior if `parsed` isn't the
/// [`ParsedModule`] to which `node` belongs. It's the caller's responsibility to ensure that
/// the invariant `node belongs to parsed` is upheld.
pub(super) unsafe fn new(parsed: ParsedModule, node: &T) -> Self {
Self {
_parsed: parsed,
node: std::ptr::NonNull::from(node),
}
}
/// Returns a reference to the wrapped node.
pub fn node(&self) -> &T {
// SAFETY: Holding on to `parsed` ensures that the AST to which `node` belongs is still
// alive and not moved.
unsafe { self.node.as_ref() }
}
}
impl<T> Deref for AstNodeRef<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.node()
}
}
impl<T> std::fmt::Debug for AstNodeRef<T>
where
T: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("AstNodeRef").field(&self.node()).finish()
}
}
impl<T> PartialEq for AstNodeRef<T>
where
T: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.node().eq(other.node())
}
}
impl<T> Eq for AstNodeRef<T> where T: Eq {}
impl<T> Hash for AstNodeRef<T>
where
T: Hash,
{
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.node().hash(state);
}
}
#[allow(unsafe_code)]
unsafe impl<T> Send for AstNodeRef<T> where T: Send {}
#[allow(unsafe_code)]
unsafe impl<T> Sync for AstNodeRef<T> where T: Sync {}
#[cfg(test)]
mod tests {
use crate::ast_node_ref::AstNodeRef;
use ruff_db::parsed::ParsedModule;
use ruff_python_ast::PySourceType;
use ruff_python_parser::parse_unchecked_source;
#[test]
#[allow(unsafe_code)]
fn equality() {
let parsed_raw = parse_unchecked_source("1 + 2", PySourceType::Python);
let parsed = ParsedModule::new(parsed_raw.clone());
let stmt = &parsed.syntax().body[0];
let node1 = unsafe { AstNodeRef::new(parsed.clone(), stmt) };
let node2 = unsafe { AstNodeRef::new(parsed.clone(), stmt) };
assert_eq!(node1, node2);
// Compare from different trees
let cloned = ParsedModule::new(parsed_raw);
let stmt_cloned = &cloned.syntax().body[0];
let cloned_node = unsafe { AstNodeRef::new(cloned.clone(), stmt_cloned) };
assert_eq!(node1, cloned_node);
let other_raw = parse_unchecked_source("2 + 2", PySourceType::Python);
let other = ParsedModule::new(other_raw);
let other_stmt = &other.syntax().body[0];
let other_node = unsafe { AstNodeRef::new(other.clone(), other_stmt) };
assert_ne!(node1, other_node);
}
#[allow(unsafe_code)]
#[test]
fn inequality() {
let parsed_raw = parse_unchecked_source("1 + 2", PySourceType::Python);
let parsed = ParsedModule::new(parsed_raw.clone());
let stmt = &parsed.syntax().body[0];
let node = unsafe { AstNodeRef::new(parsed.clone(), stmt) };
let other_raw = parse_unchecked_source("2 + 2", PySourceType::Python);
let other = ParsedModule::new(other_raw);
let other_stmt = &other.syntax().body[0];
let other_node = unsafe { AstNodeRef::new(other.clone(), other_stmt) };
assert_ne!(node, other_node);
}
#[test]
#[allow(unsafe_code)]
fn debug() {
let parsed_raw = parse_unchecked_source("1 + 2", PySourceType::Python);
let parsed = ParsedModule::new(parsed_raw.clone());
let stmt = &parsed.syntax().body[0];
let stmt_node = unsafe { AstNodeRef::new(parsed.clone(), stmt) };
let debug = format!("{stmt_node:?}");
assert_eq!(debug, format!("AstNodeRef({stmt:?})"));
}
}

View File

@@ -1,16 +0,0 @@
use crate::module_name::ModuleName;
use crate::module_resolver::resolve_module;
use crate::semantic_index::global_scope;
use crate::semantic_index::symbol::ScopeId;
use crate::Db;
/// Salsa query to get the builtins scope.
///
/// Can return None if a custom typeshed is used that is missing `builtins.pyi`.
#[salsa::tracked]
pub(crate) fn builtins_scope(db: &dyn Db) -> Option<ScopeId<'_>> {
let builtins_name =
ModuleName::new_static("builtins").expect("Expected 'builtins' to be a valid module name");
let builtins_file = resolve_module(db, builtins_name)?.file();
Some(global_scope(db, builtins_file))
}

View File

@@ -1,112 +0,0 @@
use ruff_db::files::File;
use ruff_db::{Db as SourceDb, Upcast};
/// Database giving access to semantic information about a Python program.
#[salsa::db]
pub trait Db: SourceDb + Upcast<dyn SourceDb> {
fn is_file_open(&self, file: File) -> bool;
}
#[cfg(test)]
pub(crate) mod tests {
use std::sync::Arc;
use crate::module_resolver::vendored_typeshed_stubs;
use ruff_db::files::{File, Files};
use ruff_db::system::{DbWithTestSystem, System, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
use ruff_db::{Db as SourceDb, Upcast};
use super::Db;
#[salsa::db]
pub(crate) struct TestDb {
storage: salsa::Storage<Self>,
files: Files,
system: TestSystem,
vendored: VendoredFileSystem,
events: std::sync::Arc<std::sync::Mutex<Vec<salsa::Event>>>,
}
impl TestDb {
pub(crate) fn new() -> Self {
Self {
storage: salsa::Storage::default(),
system: TestSystem::default(),
vendored: vendored_typeshed_stubs().clone(),
events: std::sync::Arc::default(),
files: Files::default(),
}
}
/// Takes the salsa events.
///
/// ## Panics
/// If there are any pending salsa snapshots.
pub(crate) fn take_salsa_events(&mut self) -> Vec<salsa::Event> {
let inner = Arc::get_mut(&mut self.events).expect("no pending salsa snapshots");
let events = inner.get_mut().unwrap();
std::mem::take(&mut *events)
}
/// Clears the salsa events.
///
/// ## Panics
/// If there are any pending salsa snapshots.
pub(crate) fn clear_salsa_events(&mut self) {
self.take_salsa_events();
}
}
impl DbWithTestSystem for TestDb {
fn test_system(&self) -> &TestSystem {
&self.system
}
fn test_system_mut(&mut self) -> &mut TestSystem {
&mut self.system
}
}
#[salsa::db]
impl SourceDb for TestDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
}
impl Upcast<dyn SourceDb> for TestDb {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
fn upcast_mut(&mut self) -> &mut (dyn SourceDb + 'static) {
self
}
}
#[salsa::db]
impl Db for TestDb {
fn is_file_open(&self, file: File) -> bool {
!file.path(self).is_vendored_path()
}
}
#[salsa::db]
impl salsa::Database for TestDb {
fn salsa_event(&self, event: &dyn Fn() -> salsa::Event) {
let event = event();
tracing::trace!("event: {event:?}");
let mut events = self.events.lock().unwrap();
events.push(event);
}
}
}

View File

@@ -1,25 +0,0 @@
use std::hash::BuildHasherDefault;
use rustc_hash::FxHasher;
pub use db::Db;
pub use module_name::ModuleName;
pub use module_resolver::{resolve_module, system_module_search_paths, vendored_typeshed_stubs};
pub use program::{Program, ProgramSettings, SearchPathSettings, SitePackages};
pub use python_version::PythonVersion;
pub use semantic_model::{HasTy, SemanticModel};
pub mod ast_node_ref;
mod builtins;
mod db;
mod module_name;
mod module_resolver;
mod node_key;
mod program;
mod python_version;
pub mod semantic_index;
mod semantic_model;
pub(crate) mod site_packages;
pub mod types;
type FxOrderSet<V> = ordermap::set::OrderSet<V, BuildHasherDefault<FxHasher>>;

View File

@@ -1,216 +0,0 @@
use std::fmt;
use std::ops::Deref;
use compact_str::{CompactString, ToCompactString};
use ruff_python_stdlib::identifiers::is_identifier;
/// A module name, e.g. `foo.bar`.
///
/// Always normalized to the absolute form (never a relative module name, i.e., never `.foo`).
#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct ModuleName(compact_str::CompactString);
impl ModuleName {
/// Creates a new module name for `name`. Returns `Some` if `name` is a valid, absolute
/// module name and `None` otherwise.
///
/// The module name is invalid if:
///
/// * The name is empty
/// * The name is relative
/// * The name ends with a `.`
/// * The name contains a sequence of multiple dots
/// * A component of a name (the part between two dots) isn't a valid python identifier.
#[inline]
#[must_use]
pub fn new(name: &str) -> Option<Self> {
Self::is_valid_name(name).then(|| Self(CompactString::from(name)))
}
/// Creates a new module name for `name` where `name` is a static string.
/// Returns `Some` if `name` is a valid, absolute module name and `None` otherwise.
///
/// The module name is invalid if:
///
/// * The name is empty
/// * The name is relative
/// * The name ends with a `.`
/// * The name contains a sequence of multiple dots
/// * A component of a name (the part between two dots) isn't a valid python identifier.
///
/// ## Examples
///
/// ```
/// use red_knot_python_semantic::ModuleName;
///
/// assert_eq!(ModuleName::new_static("foo.bar").as_deref(), Some("foo.bar"));
/// assert_eq!(ModuleName::new_static(""), None);
/// assert_eq!(ModuleName::new_static("..foo"), None);
/// assert_eq!(ModuleName::new_static(".foo"), None);
/// assert_eq!(ModuleName::new_static("foo."), None);
/// assert_eq!(ModuleName::new_static("foo..bar"), None);
/// assert_eq!(ModuleName::new_static("2000"), None);
/// ```
#[inline]
#[must_use]
pub fn new_static(name: &'static str) -> Option<Self> {
Self::is_valid_name(name).then(|| Self(CompactString::const_new(name)))
}
#[must_use]
fn is_valid_name(name: &str) -> bool {
!name.is_empty() && name.split('.').all(is_identifier)
}
/// An iterator over the components of the module name:
///
/// # Examples
///
/// ```
/// use red_knot_python_semantic::ModuleName;
///
/// assert_eq!(ModuleName::new_static("foo.bar.baz").unwrap().components().collect::<Vec<_>>(), vec!["foo", "bar", "baz"]);
/// ```
#[must_use]
pub fn components(&self) -> impl DoubleEndedIterator<Item = &str> {
self.0.split('.')
}
/// The name of this module's immediate parent, if it has a parent.
///
/// # Examples
///
/// ```
/// use red_knot_python_semantic::ModuleName;
///
/// assert_eq!(ModuleName::new_static("foo.bar").unwrap().parent(), Some(ModuleName::new_static("foo").unwrap()));
/// assert_eq!(ModuleName::new_static("foo.bar.baz").unwrap().parent(), Some(ModuleName::new_static("foo.bar").unwrap()));
/// assert_eq!(ModuleName::new_static("root").unwrap().parent(), None);
/// ```
#[must_use]
pub fn parent(&self) -> Option<ModuleName> {
let (parent, _) = self.0.rsplit_once('.')?;
Some(Self(parent.to_compact_string()))
}
/// Returns `true` if the name starts with `other`.
///
/// This is equivalent to checking if `self` is a sub-module of `other`.
///
/// # Examples
///
/// ```
/// use red_knot_python_semantic::ModuleName;
///
/// assert!(ModuleName::new_static("foo.bar").unwrap().starts_with(&ModuleName::new_static("foo").unwrap()));
///
/// assert!(!ModuleName::new_static("foo.bar").unwrap().starts_with(&ModuleName::new_static("bar").unwrap()));
/// assert!(!ModuleName::new_static("foo_bar").unwrap().starts_with(&ModuleName::new_static("foo").unwrap()));
/// ```
#[must_use]
pub fn starts_with(&self, other: &ModuleName) -> bool {
let mut self_components = self.components();
let other_components = other.components();
for other_component in other_components {
if self_components.next() != Some(other_component) {
return false;
}
}
true
}
#[must_use]
#[inline]
pub fn as_str(&self) -> &str {
&self.0
}
/// Construct a [`ModuleName`] from a sequence of parts.
///
/// # Examples
///
/// ```
/// use red_knot_python_semantic::ModuleName;
///
/// assert_eq!(&*ModuleName::from_components(["a"]).unwrap(), "a");
/// assert_eq!(&*ModuleName::from_components(["a", "b"]).unwrap(), "a.b");
/// assert_eq!(&*ModuleName::from_components(["a", "b", "c"]).unwrap(), "a.b.c");
///
/// assert_eq!(ModuleName::from_components(["a-b"]), None);
/// assert_eq!(ModuleName::from_components(["a", "a-b"]), None);
/// assert_eq!(ModuleName::from_components(["a", "b", "a-b-c"]), None);
/// ```
#[must_use]
pub fn from_components<'a>(components: impl IntoIterator<Item = &'a str>) -> Option<Self> {
let mut components = components.into_iter();
let first_part = components.next()?;
if !is_identifier(first_part) {
return None;
}
let name = if let Some(second_part) = components.next() {
if !is_identifier(second_part) {
return None;
}
let mut name = format!("{first_part}.{second_part}");
for part in components {
if !is_identifier(part) {
return None;
}
name.push('.');
name.push_str(part);
}
CompactString::from(&name)
} else {
CompactString::from(first_part)
};
Some(Self(name))
}
/// Extend `self` with the components of `other`
///
/// # Examples
///
/// ```
/// use red_knot_python_semantic::ModuleName;
///
/// let mut module_name = ModuleName::new_static("foo").unwrap();
/// module_name.extend(&ModuleName::new_static("bar").unwrap());
/// assert_eq!(&module_name, "foo.bar");
/// module_name.extend(&ModuleName::new_static("baz.eggs.ham").unwrap());
/// assert_eq!(&module_name, "foo.bar.baz.eggs.ham");
/// ```
pub fn extend(&mut self, other: &ModuleName) {
self.0.push('.');
self.0.push_str(other);
}
}
impl Deref for ModuleName {
type Target = str;
#[inline]
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl PartialEq<str> for ModuleName {
fn eq(&self, other: &str) -> bool {
self.as_str() == other
}
}
impl PartialEq<ModuleName> for str {
fn eq(&self, other: &ModuleName) -> bool {
self == other.as_str()
}
}
impl std::fmt::Display for ModuleName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}

View File

@@ -1,46 +0,0 @@
use std::iter::FusedIterator;
pub(crate) use module::Module;
pub use resolver::resolve_module;
pub(crate) use resolver::{file_to_module, SearchPaths};
use ruff_db::system::SystemPath;
pub use typeshed::vendored_typeshed_stubs;
use crate::module_resolver::resolver::search_paths;
use crate::Db;
use resolver::SearchPathIterator;
mod module;
mod path;
mod resolver;
mod typeshed;
#[cfg(test)]
mod testing;
/// Returns an iterator over all search paths pointing to a system path
pub fn system_module_search_paths(db: &dyn Db) -> SystemModuleSearchPathsIter {
SystemModuleSearchPathsIter {
inner: search_paths(db),
}
}
pub struct SystemModuleSearchPathsIter<'db> {
inner: SearchPathIterator<'db>,
}
impl<'db> Iterator for SystemModuleSearchPathsIter<'db> {
type Item = &'db SystemPath;
fn next(&mut self) -> Option<Self::Item> {
loop {
let next = self.inner.next()?;
if let Some(system_path) = next.as_system_path() {
return Some(system_path);
}
}
}
}
impl FusedIterator for SystemModuleSearchPathsIter<'_> {}

View File

@@ -1,85 +0,0 @@
use std::fmt::Formatter;
use std::sync::Arc;
use ruff_db::files::File;
use super::path::SearchPath;
use crate::module_name::ModuleName;
/// Representation of a Python module.
#[derive(Clone, PartialEq, Eq)]
pub struct Module {
inner: Arc<ModuleInner>,
}
impl Module {
pub(crate) fn new(
name: ModuleName,
kind: ModuleKind,
search_path: SearchPath,
file: File,
) -> Self {
Self {
inner: Arc::new(ModuleInner {
name,
kind,
search_path,
file,
}),
}
}
/// The absolute name of the module (e.g. `foo.bar`)
pub fn name(&self) -> &ModuleName {
&self.inner.name
}
/// The file to the source code that defines this module
pub fn file(&self) -> File {
self.inner.file
}
/// The search path from which the module was resolved.
pub(crate) fn search_path(&self) -> &SearchPath {
&self.inner.search_path
}
/// Determine whether this module is a single-file module or a package
pub fn kind(&self) -> ModuleKind {
self.inner.kind
}
}
impl std::fmt::Debug for Module {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Module")
.field("name", &self.name())
.field("kind", &self.kind())
.field("file", &self.file())
.field("search_path", &self.search_path())
.finish()
}
}
#[derive(PartialEq, Eq)]
struct ModuleInner {
name: ModuleName,
kind: ModuleKind,
search_path: SearchPath,
file: File,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub enum ModuleKind {
/// A single-file module (e.g. `foo.py` or `foo.pyi`)
Module,
/// A python package (`foo/__init__.py` or `foo/__init__.pyi`)
Package,
}
impl ModuleKind {
pub const fn is_package(self) -> bool {
matches!(self, ModuleKind::Package)
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,302 +0,0 @@
use ruff_db::system::{DbWithTestSystem, SystemPath, SystemPathBuf};
use ruff_db::vendored::VendoredPathBuf;
use crate::db::tests::TestDb;
use crate::program::{Program, SearchPathSettings};
use crate::python_version::PythonVersion;
use crate::{ProgramSettings, SitePackages};
/// A test case for the module resolver.
///
/// You generally shouldn't construct instances of this struct directly;
/// instead, use the [`TestCaseBuilder`].
pub(crate) struct TestCase<T> {
pub(crate) db: TestDb,
pub(crate) src: SystemPathBuf,
pub(crate) stdlib: T,
// Most test cases only ever need a single `site-packages` directory,
// so this is a single directory instead of a `Vec` of directories,
// like it is in `ruff_db::Program`.
pub(crate) site_packages: SystemPathBuf,
pub(crate) target_version: PythonVersion,
}
/// A `(file_name, file_contents)` tuple
pub(crate) type FileSpec = (&'static str, &'static str);
/// Specification for a typeshed mock to be created as part of a test
#[derive(Debug, Clone, Copy, Default)]
pub(crate) struct MockedTypeshed {
/// The stdlib files to be created in the typeshed mock
pub(crate) stdlib_files: &'static [FileSpec],
/// The contents of the `stdlib/VERSIONS` file
/// to be created in the typeshed mock
pub(crate) versions: &'static str,
}
#[derive(Debug)]
pub(crate) struct VendoredTypeshed;
#[derive(Debug)]
pub(crate) struct UnspecifiedTypeshed;
/// A builder for a module-resolver test case.
///
/// The builder takes care of creating a [`TestDb`]
/// instance, applying the module resolver settings,
/// and creating mock directories for the stdlib, `site-packages`,
/// first-party code, etc.
///
/// For simple tests that do not involve typeshed,
/// test cases can be created as follows:
///
/// ```rs
/// let test_case = TestCaseBuilder::new()
/// .with_src_files(...)
/// .build();
///
/// let test_case2 = TestCaseBuilder::new()
/// .with_site_packages_files(...)
/// .build();
/// ```
///
/// Any tests can specify the target Python version that should be used
/// in the module resolver settings:
///
/// ```rs
/// let test_case = TestCaseBuilder::new()
/// .with_src_files(...)
/// .with_target_version(...)
/// .build();
/// ```
///
/// For tests checking that standard-library module resolution is working
/// correctly, you should usually create a [`MockedTypeshed`] instance
/// and pass it to the [`TestCaseBuilder::with_custom_typeshed`] method.
/// If you need to check something that involves the vendored typeshed stubs
/// we include as part of the binary, you can instead use the
/// [`TestCaseBuilder::with_vendored_typeshed`] method.
/// For either of these, you should almost always try to be explicit
/// about the Python version you want to be specified in the module-resolver
/// settings for the test:
///
/// ```rs
/// const TYPESHED = MockedTypeshed { ... };
///
/// let test_case = resolver_test_case()
/// .with_custom_typeshed(TYPESHED)
/// .with_target_version(...)
/// .build();
///
/// let test_case2 = resolver_test_case()
/// .with_vendored_typeshed()
/// .with_target_version(...)
/// .build();
/// ```
///
/// If you have not called one of those options, the `stdlib` field
/// on the [`TestCase`] instance created from `.build()` will be set
/// to `()`.
pub(crate) struct TestCaseBuilder<T> {
typeshed_option: T,
target_version: PythonVersion,
first_party_files: Vec<FileSpec>,
site_packages_files: Vec<FileSpec>,
}
impl<T> TestCaseBuilder<T> {
/// Specify files to be created in the `src` mock directory
pub(crate) fn with_src_files(mut self, files: &[FileSpec]) -> Self {
self.first_party_files.extend(files.iter().copied());
self
}
/// Specify files to be created in the `site-packages` mock directory
pub(crate) fn with_site_packages_files(mut self, files: &[FileSpec]) -> Self {
self.site_packages_files.extend(files.iter().copied());
self
}
/// Specify the target Python version the module resolver should assume
pub(crate) fn with_target_version(mut self, target_version: PythonVersion) -> Self {
self.target_version = target_version;
self
}
fn write_mock_directory(
db: &mut TestDb,
location: impl AsRef<SystemPath>,
files: impl IntoIterator<Item = FileSpec>,
) -> SystemPathBuf {
let root = location.as_ref().to_path_buf();
// Make sure to create the directory even if the list of files is empty:
db.memory_file_system().create_directory_all(&root).unwrap();
db.write_files(
files
.into_iter()
.map(|(relative_path, contents)| (root.join(relative_path), contents)),
)
.unwrap();
root
}
}
impl TestCaseBuilder<UnspecifiedTypeshed> {
pub(crate) fn new() -> TestCaseBuilder<UnspecifiedTypeshed> {
Self {
typeshed_option: UnspecifiedTypeshed,
target_version: PythonVersion::default(),
first_party_files: vec![],
site_packages_files: vec![],
}
}
/// Use the vendored stdlib stubs included in the Ruff binary for this test case
pub(crate) fn with_vendored_typeshed(self) -> TestCaseBuilder<VendoredTypeshed> {
let TestCaseBuilder {
typeshed_option: _,
target_version,
first_party_files,
site_packages_files,
} = self;
TestCaseBuilder {
typeshed_option: VendoredTypeshed,
target_version,
first_party_files,
site_packages_files,
}
}
/// Use a mock typeshed directory for this test case
pub(crate) fn with_custom_typeshed(
self,
typeshed: MockedTypeshed,
) -> TestCaseBuilder<MockedTypeshed> {
let TestCaseBuilder {
typeshed_option: _,
target_version,
first_party_files,
site_packages_files,
} = self;
TestCaseBuilder {
typeshed_option: typeshed,
target_version,
first_party_files,
site_packages_files,
}
}
pub(crate) fn build(self) -> TestCase<()> {
let TestCase {
db,
src,
stdlib: _,
site_packages,
target_version,
} = self.with_custom_typeshed(MockedTypeshed::default()).build();
TestCase {
db,
src,
stdlib: (),
site_packages,
target_version,
}
}
}
impl TestCaseBuilder<MockedTypeshed> {
pub(crate) fn build(self) -> TestCase<SystemPathBuf> {
let TestCaseBuilder {
typeshed_option,
target_version,
first_party_files,
site_packages_files,
} = self;
let mut db = TestDb::new();
let site_packages =
Self::write_mock_directory(&mut db, "/site-packages", site_packages_files);
let src = Self::write_mock_directory(&mut db, "/src", first_party_files);
let typeshed = Self::build_typeshed_mock(&mut db, &typeshed_option);
Program::from_settings(
&db,
&ProgramSettings {
target_version,
search_paths: SearchPathSettings {
extra_paths: vec![],
src_root: src.clone(),
custom_typeshed: Some(typeshed.clone()),
site_packages: SitePackages::Known(vec![site_packages.clone()]),
},
},
)
.expect("Valid program settings");
TestCase {
db,
src,
stdlib: typeshed.join("stdlib"),
site_packages,
target_version,
}
}
fn build_typeshed_mock(db: &mut TestDb, typeshed_to_build: &MockedTypeshed) -> SystemPathBuf {
let typeshed = SystemPathBuf::from("/typeshed");
let MockedTypeshed {
stdlib_files,
versions,
} = typeshed_to_build;
Self::write_mock_directory(
db,
typeshed.join("stdlib"),
stdlib_files
.iter()
.copied()
.chain(std::iter::once(("VERSIONS", *versions))),
);
typeshed
}
}
impl TestCaseBuilder<VendoredTypeshed> {
pub(crate) fn build(self) -> TestCase<VendoredPathBuf> {
let TestCaseBuilder {
typeshed_option: VendoredTypeshed,
target_version,
first_party_files,
site_packages_files,
} = self;
let mut db = TestDb::new();
let site_packages =
Self::write_mock_directory(&mut db, "/site-packages", site_packages_files);
let src = Self::write_mock_directory(&mut db, "/src", first_party_files);
Program::from_settings(
&db,
&ProgramSettings {
target_version,
search_paths: SearchPathSettings {
site_packages: SitePackages::Known(vec![site_packages.clone()]),
..SearchPathSettings::new(src.clone())
},
},
)
.expect("Valid search path settings");
TestCase {
db,
src,
stdlib: VendoredPathBuf::from("stdlib"),
site_packages,
target_version,
}
}
}

View File

@@ -1,8 +0,0 @@
pub use self::vendored::vendored_typeshed_stubs;
pub(super) use self::versions::{
typeshed_versions, vendored_typeshed_versions, TypeshedVersions, TypeshedVersionsParseError,
TypeshedVersionsQueryResult,
};
mod vendored;
mod versions;

View File

@@ -1,99 +0,0 @@
use once_cell::sync::Lazy;
use ruff_db::vendored::VendoredFileSystem;
// The file path here is hardcoded in this crate's `build.rs` script.
// Luckily this crate will fail to build if this file isn't available at build time.
static TYPESHED_ZIP_BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/zipped_typeshed.zip"));
pub fn vendored_typeshed_stubs() -> &'static VendoredFileSystem {
static VENDORED_TYPESHED_STUBS: Lazy<VendoredFileSystem> =
Lazy::new(|| VendoredFileSystem::new_static(TYPESHED_ZIP_BYTES).unwrap());
&VENDORED_TYPESHED_STUBS
}
#[cfg(test)]
mod tests {
use std::io::{self, Read};
use std::path::Path;
use ruff_db::vendored::VendoredPath;
use super::*;
#[test]
fn typeshed_zip_created_at_build_time() {
let mut typeshed_zip_archive =
zip::ZipArchive::new(io::Cursor::new(TYPESHED_ZIP_BYTES)).unwrap();
let mut functools_module_stub = typeshed_zip_archive
.by_name("stdlib/functools.pyi")
.unwrap();
assert!(functools_module_stub.is_file());
let mut functools_module_stub_source = String::new();
functools_module_stub
.read_to_string(&mut functools_module_stub_source)
.unwrap();
assert!(functools_module_stub_source.contains("def update_wrapper("));
}
#[test]
fn typeshed_vfs_consistent_with_vendored_stubs() {
let vendored_typeshed_dir = Path::new("vendor/typeshed").canonicalize().unwrap();
let vendored_typeshed_stubs = vendored_typeshed_stubs();
let mut empty_iterator = true;
for entry in walkdir::WalkDir::new(&vendored_typeshed_dir).min_depth(1) {
empty_iterator = false;
let entry = entry.unwrap();
let absolute_path = entry.path();
let file_type = entry.file_type();
let relative_path = absolute_path
.strip_prefix(&vendored_typeshed_dir)
.unwrap_or_else(|_| {
panic!("Expected {absolute_path:?} to be a child of {vendored_typeshed_dir:?}")
});
let vendored_path = <&VendoredPath>::try_from(relative_path)
.unwrap_or_else(|_| panic!("Expected {relative_path:?} to be valid UTF-8"));
assert!(
vendored_typeshed_stubs.exists(vendored_path),
"Expected {vendored_path:?} to exist in the `VendoredFileSystem`!
Vendored file system:
{vendored_typeshed_stubs:#?}
"
);
let vendored_path_kind = vendored_typeshed_stubs
.metadata(vendored_path)
.unwrap_or_else(|_| {
panic!(
"Expected metadata for {vendored_path:?} to be retrievable from the `VendoredFileSystem!
Vendored file system:
{vendored_typeshed_stubs:#?}
"
)
})
.kind();
assert_eq!(
vendored_path_kind.is_directory(),
file_type.is_dir(),
"{vendored_path:?} had type {vendored_path_kind:?}, inconsistent with fs path {relative_path:?}: {file_type:?}"
);
}
assert!(
!empty_iterator,
"Expected there to be at least one file or directory in the vendored typeshed stubs!"
);
}
}

View File

@@ -1,707 +0,0 @@
use std::collections::BTreeMap;
use std::fmt;
use std::num::{NonZeroU16, NonZeroUsize};
use std::ops::{RangeFrom, RangeInclusive};
use std::str::FromStr;
use once_cell::sync::Lazy;
use rustc_hash::FxHashMap;
use super::vendored::vendored_typeshed_stubs;
use crate::db::Db;
use crate::module_name::ModuleName;
use crate::{Program, PythonVersion};
static VENDORED_VERSIONS: Lazy<TypeshedVersions> = Lazy::new(|| {
TypeshedVersions::from_str(
&vendored_typeshed_stubs()
.read_to_string("stdlib/VERSIONS")
.unwrap(),
)
.unwrap()
});
pub(crate) fn vendored_typeshed_versions() -> &'static TypeshedVersions {
&VENDORED_VERSIONS
}
pub(crate) fn typeshed_versions(db: &dyn Db) -> &TypeshedVersions {
Program::get(db).search_paths(db).typeshed_versions()
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) struct TypeshedVersionsParseError {
line_number: Option<NonZeroU16>,
reason: TypeshedVersionsParseErrorKind,
}
impl fmt::Display for TypeshedVersionsParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let TypeshedVersionsParseError {
line_number,
reason,
} = self;
if let Some(line_number) = line_number {
write!(
f,
"Error while parsing line {line_number} of typeshed's VERSIONS file: {reason}"
)
} else {
write!(f, "Error while parsing typeshed's VERSIONS file: {reason}")
}
}
}
impl std::error::Error for TypeshedVersionsParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
if let TypeshedVersionsParseErrorKind::IntegerParsingFailure { err, .. } = &self.reason {
Some(err)
} else {
None
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub(super) enum TypeshedVersionsParseErrorKind {
TooManyLines(NonZeroUsize),
UnexpectedNumberOfColons,
InvalidModuleName(String),
UnexpectedNumberOfHyphens,
UnexpectedNumberOfPeriods(String),
IntegerParsingFailure {
version: String,
err: std::num::ParseIntError,
},
}
impl fmt::Display for TypeshedVersionsParseErrorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::TooManyLines(num_lines) => write!(
f,
"File has too many lines ({num_lines}); maximum allowed is {}",
NonZeroU16::MAX
),
Self::UnexpectedNumberOfColons => {
f.write_str("Expected every non-comment line to have exactly one colon")
}
Self::InvalidModuleName(name) => write!(
f,
"Expected all components of '{name}' to be valid Python identifiers"
),
Self::UnexpectedNumberOfHyphens => {
f.write_str("Expected every non-comment line to have exactly one '-' character")
}
Self::UnexpectedNumberOfPeriods(format) => write!(
f,
"Expected all versions to be in the form {{MAJOR}}.{{MINOR}}; got '{format}'"
),
Self::IntegerParsingFailure { version, err } => write!(
f,
"Failed to convert '{version}' to a pair of integers due to {err}",
),
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct TypeshedVersions(FxHashMap<ModuleName, PyVersionRange>);
impl TypeshedVersions {
#[must_use]
fn exact(&self, module_name: &ModuleName) -> Option<&PyVersionRange> {
self.0.get(module_name)
}
#[must_use]
pub(in crate::module_resolver) fn query_module(
&self,
module: &ModuleName,
target_version: PythonVersion,
) -> TypeshedVersionsQueryResult {
if let Some(range) = self.exact(module) {
if range.contains(target_version) {
TypeshedVersionsQueryResult::Exists
} else {
TypeshedVersionsQueryResult::DoesNotExist
}
} else {
let mut module = module.parent();
while let Some(module_to_try) = module {
if let Some(range) = self.exact(&module_to_try) {
return {
if range.contains(target_version) {
TypeshedVersionsQueryResult::MaybeExists
} else {
TypeshedVersionsQueryResult::DoesNotExist
}
};
}
module = module_to_try.parent();
}
TypeshedVersionsQueryResult::DoesNotExist
}
}
}
/// Possible answers [`TypeshedVersions::query_module()`] could give to the question:
/// "Does this module exist in the stdlib at runtime on a certain target version?"
#[derive(Debug, Copy, PartialEq, Eq, Clone, Hash)]
pub(crate) enum TypeshedVersionsQueryResult {
/// The module definitely exists in the stdlib at runtime on the user-specified target version.
///
/// For example:
/// - The target version is Python 3.8
/// - We're querying whether the `asyncio.tasks` module exists in the stdlib
/// - The VERSIONS file contains the line `asyncio.tasks: 3.8-`
Exists,
/// The module definitely does not exist in the stdlib on the user-specified target version.
///
/// For example:
/// - We're querying whether the `foo` module exists in the stdlib
/// - There is no top-level `foo` module in VERSIONS
///
/// OR:
/// - The target version is Python 3.8
/// - We're querying whether the module `importlib.abc` exists in the stdlib
/// - The VERSIONS file contains the line `importlib.abc: 3.10-`,
/// indicating that the module was added in 3.10
///
/// OR:
/// - The target version is Python 3.8
/// - We're querying whether the module `collections.abc` exists in the stdlib
/// - The VERSIONS file does not contain any information about the `collections.abc` submodule,
/// but *does* contain the line `collections: 3.10-`,
/// indicating that the entire `collections` package was added in Python 3.10.
DoesNotExist,
/// The module potentially exists in the stdlib and, if it does,
/// it definitely exists on the user-specified target version.
///
/// This variant is only relevant for submodules,
/// for which the typeshed VERSIONS file does not provide comprehensive information.
/// (The VERSIONS file is guaranteed to provide information about all top-level stdlib modules and packages,
/// but not necessarily about all submodules within each top-level package.)
///
/// For example:
/// - The target version is Python 3.8
/// - We're querying whether the `asyncio.staggered` module exists in the stdlib
/// - The typeshed VERSIONS file contains the line `asyncio: 3.8`,
/// indicating that the `asyncio` package was added in Python 3.8,
/// but does not contain any explicit information about the `asyncio.staggered` submodule.
MaybeExists,
}
impl FromStr for TypeshedVersions {
type Err = TypeshedVersionsParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut map = FxHashMap::default();
for (line_index, line) in s.lines().enumerate() {
// humans expect line numbers to be 1-indexed
let line_number = NonZeroUsize::new(line_index.saturating_add(1)).unwrap();
let Ok(line_number) = NonZeroU16::try_from(line_number) else {
return Err(TypeshedVersionsParseError {
line_number: None,
reason: TypeshedVersionsParseErrorKind::TooManyLines(line_number),
});
};
let Some(content) = line.split('#').map(str::trim).next() else {
continue;
};
if content.is_empty() {
continue;
}
let mut parts = content.split(':').map(str::trim);
let (Some(module_name), Some(rest), None) = (parts.next(), parts.next(), parts.next())
else {
return Err(TypeshedVersionsParseError {
line_number: Some(line_number),
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfColons,
});
};
let Some(module_name) = ModuleName::new(module_name) else {
return Err(TypeshedVersionsParseError {
line_number: Some(line_number),
reason: TypeshedVersionsParseErrorKind::InvalidModuleName(
module_name.to_string(),
),
});
};
match PyVersionRange::from_str(rest) {
Ok(version) => map.insert(module_name, version),
Err(reason) => {
return Err(TypeshedVersionsParseError {
line_number: Some(line_number),
reason,
})
}
};
}
Ok(Self(map))
}
}
impl fmt::Display for TypeshedVersions {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let sorted_items: BTreeMap<&ModuleName, &PyVersionRange> = self.0.iter().collect();
for (module_name, range) in sorted_items {
writeln!(f, "{module_name}: {range}")?;
}
Ok(())
}
}
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
enum PyVersionRange {
AvailableFrom(RangeFrom<PythonVersion>),
AvailableWithin(RangeInclusive<PythonVersion>),
}
impl PyVersionRange {
#[must_use]
fn contains(&self, version: PythonVersion) -> bool {
match self {
Self::AvailableFrom(inner) => inner.contains(&version),
Self::AvailableWithin(inner) => inner.contains(&version),
}
}
}
impl FromStr for PyVersionRange {
type Err = TypeshedVersionsParseErrorKind;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut parts = s.split('-').map(str::trim);
match (parts.next(), parts.next(), parts.next()) {
(Some(lower), Some(""), None) => {
let lower = PythonVersion::from_versions_file_string(lower)?;
Ok(Self::AvailableFrom(lower..))
}
(Some(lower), Some(upper), None) => {
let lower = PythonVersion::from_versions_file_string(lower)?;
let upper = PythonVersion::from_versions_file_string(upper)?;
Ok(Self::AvailableWithin(lower..=upper))
}
_ => Err(TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens),
}
}
}
impl fmt::Display for PyVersionRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::AvailableFrom(range_from) => write!(f, "{}-", range_from.start),
Self::AvailableWithin(range_inclusive) => {
write!(f, "{}-{}", range_inclusive.start(), range_inclusive.end())
}
}
}
}
impl PythonVersion {
fn from_versions_file_string(s: &str) -> Result<Self, TypeshedVersionsParseErrorKind> {
let mut parts = s.split('.').map(str::trim);
let (Some(major), Some(minor), None) = (parts.next(), parts.next(), parts.next()) else {
return Err(TypeshedVersionsParseErrorKind::UnexpectedNumberOfPeriods(
s.to_string(),
));
};
PythonVersion::try_from((major, minor)).map_err(|int_parse_error| {
TypeshedVersionsParseErrorKind::IntegerParsingFailure {
version: s.to_string(),
err: int_parse_error,
}
})
}
}
#[cfg(test)]
mod tests {
use std::num::{IntErrorKind, NonZeroU16};
use std::path::Path;
use insta::assert_snapshot;
use super::*;
const TYPESHED_STDLIB_DIR: &str = "stdlib";
#[allow(unsafe_code)]
const ONE: Option<NonZeroU16> = Some(unsafe { NonZeroU16::new_unchecked(1) });
impl TypeshedVersions {
#[must_use]
fn contains_exact(&self, module: &ModuleName) -> bool {
self.exact(module).is_some()
}
#[must_use]
fn len(&self) -> usize {
self.0.len()
}
}
#[test]
fn can_parse_vendored_versions_file() {
let versions_data = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/vendor/typeshed/stdlib/VERSIONS"
));
let versions = TypeshedVersions::from_str(versions_data).unwrap();
assert!(versions.len() > 100);
assert!(versions.len() < 1000);
let asyncio = ModuleName::new_static("asyncio").unwrap();
let asyncio_staggered = ModuleName::new_static("asyncio.staggered").unwrap();
let audioop = ModuleName::new_static("audioop").unwrap();
assert!(versions.contains_exact(&asyncio));
assert_eq!(
versions.query_module(&asyncio, PythonVersion::PY310),
TypeshedVersionsQueryResult::Exists
);
assert!(versions.contains_exact(&asyncio_staggered));
assert_eq!(
versions.query_module(&asyncio_staggered, PythonVersion::PY38),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
versions.query_module(&asyncio_staggered, PythonVersion::PY37),
TypeshedVersionsQueryResult::DoesNotExist
);
assert!(versions.contains_exact(&audioop));
assert_eq!(
versions.query_module(&audioop, PythonVersion::PY312),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
versions.query_module(&audioop, PythonVersion::PY313),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn typeshed_versions_consistent_with_vendored_stubs() {
const VERSIONS_DATA: &str = include_str!("../../../vendor/typeshed/stdlib/VERSIONS");
let vendored_typeshed_dir = Path::new("vendor/typeshed").canonicalize().unwrap();
let vendored_typeshed_versions = TypeshedVersions::from_str(VERSIONS_DATA).unwrap();
let mut empty_iterator = true;
let stdlib_stubs_path = vendored_typeshed_dir.join(TYPESHED_STDLIB_DIR);
for entry in std::fs::read_dir(&stdlib_stubs_path).unwrap() {
empty_iterator = false;
let entry = entry.unwrap();
let absolute_path = entry.path();
let relative_path = absolute_path
.strip_prefix(&stdlib_stubs_path)
.unwrap_or_else(|_| panic!("Expected path to be a child of {stdlib_stubs_path:?} but found {absolute_path:?}"));
let relative_path_str = relative_path.as_os_str().to_str().unwrap_or_else(|| {
panic!("Expected all typeshed paths to be valid UTF-8; got {relative_path:?}")
});
if relative_path_str == "VERSIONS" {
continue;
}
let top_level_module = if let Some(extension) = relative_path.extension() {
// It was a file; strip off the file extension to get the module name:
let extension = extension
.to_str()
.unwrap_or_else(||panic!("Expected all file extensions to be UTF-8; was not true for {relative_path:?}"));
relative_path_str
.strip_suffix(extension)
.and_then(|string| string.strip_suffix('.')).unwrap_or_else(|| {
panic!("Expected path {relative_path_str:?} to end with computed extension {extension:?}")
})
} else {
// It was a directory; no need to do anything to get the module name
relative_path_str
};
let top_level_module = ModuleName::new(top_level_module)
.unwrap_or_else(|| panic!("{top_level_module:?} was not a valid module name!"));
assert!(vendored_typeshed_versions.contains_exact(&top_level_module));
}
assert!(
!empty_iterator,
"Expected there to be at least one file or directory in the vendored typeshed stubs"
);
}
#[test]
fn can_parse_mock_versions_file() {
const VERSIONS: &str = "\
# a comment
# some more comment
# yet more comment
# and some more comment
bar: 2.7-3.10
# more comment
bar.baz: 3.1-3.9
foo: 3.8- # trailing comment
";
let parsed_versions = TypeshedVersions::from_str(VERSIONS).unwrap();
assert_eq!(parsed_versions.len(), 3);
assert_snapshot!(parsed_versions.to_string(), @r###"
bar: 2.7-3.10
bar.baz: 3.1-3.9
foo: 3.8-
"###
);
}
#[test]
fn version_within_range_parsed_correctly() {
let parsed_versions = TypeshedVersions::from_str("bar: 2.7-3.10").unwrap();
let bar = ModuleName::new_static("bar").unwrap();
assert!(parsed_versions.contains_exact(&bar));
assert_eq!(
parsed_versions.query_module(&bar, PythonVersion::PY37),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar, PythonVersion::PY310),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar, PythonVersion::PY311),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn version_from_range_parsed_correctly() {
let parsed_versions = TypeshedVersions::from_str("foo: 3.8-").unwrap();
let foo = ModuleName::new_static("foo").unwrap();
assert!(parsed_versions.contains_exact(&foo));
assert_eq!(
parsed_versions.query_module(&foo, PythonVersion::PY37),
TypeshedVersionsQueryResult::DoesNotExist
);
assert_eq!(
parsed_versions.query_module(&foo, PythonVersion::PY38),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&foo, PythonVersion::PY311),
TypeshedVersionsQueryResult::Exists
);
}
#[test]
fn explicit_submodule_parsed_correctly() {
let parsed_versions = TypeshedVersions::from_str("bar.baz: 3.1-3.9").unwrap();
let bar_baz = ModuleName::new_static("bar.baz").unwrap();
assert!(parsed_versions.contains_exact(&bar_baz));
assert_eq!(
parsed_versions.query_module(&bar_baz, PythonVersion::PY37),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar_baz, PythonVersion::PY39),
TypeshedVersionsQueryResult::Exists
);
assert_eq!(
parsed_versions.query_module(&bar_baz, PythonVersion::PY310),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn implicit_submodule_queried_correctly() {
let parsed_versions = TypeshedVersions::from_str("bar: 2.7-3.10").unwrap();
let bar_eggs = ModuleName::new_static("bar.eggs").unwrap();
assert!(!parsed_versions.contains_exact(&bar_eggs));
assert_eq!(
parsed_versions.query_module(&bar_eggs, PythonVersion::PY37),
TypeshedVersionsQueryResult::MaybeExists
);
assert_eq!(
parsed_versions.query_module(&bar_eggs, PythonVersion::PY310),
TypeshedVersionsQueryResult::MaybeExists
);
assert_eq!(
parsed_versions.query_module(&bar_eggs, PythonVersion::PY311),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn nonexistent_module_queried_correctly() {
let parsed_versions = TypeshedVersions::from_str("eggs: 3.8-").unwrap();
let spam = ModuleName::new_static("spam").unwrap();
assert!(!parsed_versions.contains_exact(&spam));
assert_eq!(
parsed_versions.query_module(&spam, PythonVersion::PY37),
TypeshedVersionsQueryResult::DoesNotExist
);
assert_eq!(
parsed_versions.query_module(&spam, PythonVersion::PY313),
TypeshedVersionsQueryResult::DoesNotExist
);
}
#[test]
fn invalid_huge_versions_file() {
let offset = 100;
let too_many = u16::MAX as usize + offset;
let mut massive_versions_file = String::new();
for i in 0..too_many {
massive_versions_file.push_str(&format!("x{i}: 3.8-\n"));
}
assert_eq!(
TypeshedVersions::from_str(&massive_versions_file),
Err(TypeshedVersionsParseError {
line_number: None,
reason: TypeshedVersionsParseErrorKind::TooManyLines(
NonZeroUsize::new(too_many + 1 - offset).unwrap()
)
})
);
}
#[test]
fn invalid_typeshed_versions_bad_colon_number() {
assert_eq!(
TypeshedVersions::from_str("foo 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfColons
})
);
assert_eq!(
TypeshedVersions::from_str("foo:: 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfColons
})
);
}
#[test]
fn invalid_typeshed_versions_non_identifier_modules() {
assert_eq!(
TypeshedVersions::from_str("not!an!identifier!: 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::InvalidModuleName(
"not!an!identifier!".to_string()
)
})
);
assert_eq!(
TypeshedVersions::from_str("(also_not).(an_identifier): 3.7"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::InvalidModuleName(
"(also_not).(an_identifier)".to_string()
)
})
);
}
#[test]
fn invalid_typeshed_versions_bad_hyphen_number() {
assert_eq!(
TypeshedVersions::from_str("foo: 3.8"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3.8--"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3.8--3.9"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfHyphens
})
);
}
#[test]
fn invalid_typeshed_versions_bad_period_number() {
assert_eq!(
TypeshedVersions::from_str("foo: 38-"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfPeriods("38".to_string())
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3..8-"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfPeriods(
"3..8".to_string()
)
})
);
assert_eq!(
TypeshedVersions::from_str("foo: 3.8-3..11"),
Err(TypeshedVersionsParseError {
line_number: ONE,
reason: TypeshedVersionsParseErrorKind::UnexpectedNumberOfPeriods(
"3..11".to_string()
)
})
);
}
#[test]
fn invalid_typeshed_versions_non_digits() {
let err = TypeshedVersions::from_str("foo: 1.two-").unwrap_err();
assert_eq!(err.line_number, ONE);
let TypeshedVersionsParseErrorKind::IntegerParsingFailure { version, err } = err.reason
else {
panic!()
};
assert_eq!(version, "1.two".to_string());
assert_eq!(*err.kind(), IntErrorKind::InvalidDigit);
let err = TypeshedVersions::from_str("foo: 3.8-four.9").unwrap_err();
assert_eq!(err.line_number, ONE);
let TypeshedVersionsParseErrorKind::IntegerParsingFailure { version, err } = err.reason
else {
panic!()
};
assert_eq!(version, "four.9".to_string());
assert_eq!(*err.kind(), IntErrorKind::InvalidDigit);
}
}

View File

@@ -1,24 +0,0 @@
use ruff_python_ast::{AnyNodeRef, NodeKind};
use ruff_text_size::{Ranged, TextRange};
/// Compact key for a node for use in a hash map.
///
/// Compares two nodes by their kind and text range.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub(super) struct NodeKey {
kind: NodeKind,
range: TextRange,
}
impl NodeKey {
pub(super) fn from_node<'a, N>(node: N) -> Self
where
N: Into<AnyNodeRef<'a>>,
{
let node = node.into();
NodeKey {
kind: node.kind(),
range: node.range(),
}
}
}

View File

@@ -1,100 +0,0 @@
use crate::python_version::PythonVersion;
use anyhow::Context;
use salsa::Durability;
use salsa::Setter;
use ruff_db::system::{SystemPath, SystemPathBuf};
use crate::module_resolver::SearchPaths;
use crate::Db;
#[salsa::input(singleton)]
pub struct Program {
pub target_version: PythonVersion,
#[return_ref]
pub(crate) search_paths: SearchPaths,
}
impl Program {
pub fn from_settings(db: &dyn Db, settings: &ProgramSettings) -> anyhow::Result<Self> {
let ProgramSettings {
target_version,
search_paths,
} = settings;
tracing::info!("Target version: Python {target_version}");
let search_paths = SearchPaths::from_settings(db, search_paths)
.with_context(|| "Invalid search path settings")?;
Ok(Program::builder(settings.target_version, search_paths)
.durability(Durability::HIGH)
.new(db))
}
pub fn update_search_paths(
self,
db: &mut dyn Db,
search_path_settings: &SearchPathSettings,
) -> anyhow::Result<()> {
let search_paths = SearchPaths::from_settings(db, search_path_settings)?;
if self.search_paths(db) != &search_paths {
tracing::debug!("Update search paths");
self.set_search_paths(db).to(search_paths);
}
Ok(())
}
pub fn custom_stdlib_search_path(self, db: &dyn Db) -> Option<&SystemPath> {
self.search_paths(db).custom_stdlib()
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ProgramSettings {
pub target_version: PythonVersion,
pub search_paths: SearchPathSettings,
}
/// Configures the search paths for module resolution.
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct SearchPathSettings {
/// List of user-provided paths that should take first priority in the module resolution.
/// Examples in other type checkers are mypy's MYPYPATH environment variable,
/// or pyright's stubPath configuration setting.
pub extra_paths: Vec<SystemPathBuf>,
/// The root of the workspace, used for finding first-party modules.
pub src_root: SystemPathBuf,
/// Optional path to a "custom typeshed" directory on disk for us to use for standard-library types.
/// If this is not provided, we will fallback to our vendored typeshed stubs for the stdlib,
/// bundled as a zip file in the binary
pub custom_typeshed: Option<SystemPathBuf>,
/// The path to the user's `site-packages` directory, where third-party packages from ``PyPI`` are installed.
pub site_packages: SitePackages,
}
impl SearchPathSettings {
pub fn new(src_root: SystemPathBuf) -> Self {
Self {
src_root,
extra_paths: vec![],
custom_typeshed: None,
site_packages: SitePackages::Known(vec![]),
}
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum SitePackages {
Derived {
venv_path: SystemPathBuf,
},
/// Resolved site packages paths
Known(Vec<SystemPathBuf>),
}

View File

@@ -1,62 +0,0 @@
use std::fmt;
/// Representation of a Python version.
///
/// Unlike the `TargetVersion` enums in the CLI crates,
/// this does not necessarily represent a Python version that we actually support.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct PythonVersion {
pub major: u8,
pub minor: u8,
}
impl PythonVersion {
pub const PY37: PythonVersion = PythonVersion { major: 3, minor: 7 };
pub const PY38: PythonVersion = PythonVersion { major: 3, minor: 8 };
pub const PY39: PythonVersion = PythonVersion { major: 3, minor: 9 };
pub const PY310: PythonVersion = PythonVersion {
major: 3,
minor: 10,
};
pub const PY311: PythonVersion = PythonVersion {
major: 3,
minor: 11,
};
pub const PY312: PythonVersion = PythonVersion {
major: 3,
minor: 12,
};
pub const PY313: PythonVersion = PythonVersion {
major: 3,
minor: 13,
};
pub fn free_threaded_build_available(self) -> bool {
self >= PythonVersion::PY313
}
}
impl Default for PythonVersion {
fn default() -> Self {
Self::PY38
}
}
impl TryFrom<(&str, &str)> for PythonVersion {
type Error = std::num::ParseIntError;
fn try_from(value: (&str, &str)) -> Result<Self, Self::Error> {
let (major, minor) = value;
Ok(Self {
major: major.parse()?,
minor: minor.parse()?,
})
}
}
impl fmt::Display for PythonVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let PythonVersion { major, minor } = self;
write!(f, "{major}.{minor}")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,210 +0,0 @@
use rustc_hash::FxHashMap;
use ruff_index::newtype_index;
use ruff_python_ast as ast;
use ruff_python_ast::ExpressionRef;
use crate::semantic_index::ast_ids::node_key::ExpressionNodeKey;
use crate::semantic_index::semantic_index;
use crate::semantic_index::symbol::ScopeId;
use crate::Db;
/// AST ids for a single scope.
///
/// The motivation for building the AST ids per scope isn't about reducing invalidation because
/// the struct changes whenever the parsed AST changes. Instead, it's mainly that we can
/// build the AST ids struct when building the symbol table and also keep the property that
/// IDs of outer scopes are unaffected by changes in inner scopes.
///
/// For example, we don't want that adding new statements to `foo` changes the statement id of `x = foo()` in:
///
/// ```python
/// def foo():
/// return 5
///
/// x = foo()
/// ```
#[derive(Debug)]
pub(crate) struct AstIds {
/// Maps expressions to their expression id.
expressions_map: FxHashMap<ExpressionNodeKey, ScopedExpressionId>,
/// Maps expressions which "use" a symbol (that is, [`ast::ExprName`]) to a use id.
uses_map: FxHashMap<ExpressionNodeKey, ScopedUseId>,
}
impl AstIds {
fn expression_id(&self, key: impl Into<ExpressionNodeKey>) -> ScopedExpressionId {
self.expressions_map[&key.into()]
}
fn use_id(&self, key: impl Into<ExpressionNodeKey>) -> ScopedUseId {
self.uses_map[&key.into()]
}
}
fn ast_ids<'db>(db: &'db dyn Db, scope: ScopeId) -> &'db AstIds {
semantic_index(db, scope.file(db)).ast_ids(scope.file_scope_id(db))
}
pub trait HasScopedUseId {
/// The type of the ID uniquely identifying the use.
type Id: Copy;
/// Returns the ID that uniquely identifies the use in `scope`.
fn scoped_use_id(&self, db: &dyn Db, scope: ScopeId) -> Self::Id;
}
/// Uniquely identifies a use of a name in a [`crate::semantic_index::symbol::FileScopeId`].
#[newtype_index]
pub struct ScopedUseId;
impl HasScopedUseId for ast::ExprName {
type Id = ScopedUseId;
fn scoped_use_id(&self, db: &dyn Db, scope: ScopeId) -> Self::Id {
let expression_ref = ExpressionRef::from(self);
expression_ref.scoped_use_id(db, scope)
}
}
impl HasScopedUseId for ast::ExpressionRef<'_> {
type Id = ScopedUseId;
fn scoped_use_id(&self, db: &dyn Db, scope: ScopeId) -> Self::Id {
let ast_ids = ast_ids(db, scope);
ast_ids.use_id(*self)
}
}
pub trait HasScopedAstId {
/// The type of the ID uniquely identifying the node.
type Id: Copy;
/// Returns the ID that uniquely identifies the node in `scope`.
fn scoped_ast_id(&self, db: &dyn Db, scope: ScopeId) -> Self::Id;
}
/// Uniquely identifies an [`ast::Expr`] in a [`crate::semantic_index::symbol::FileScopeId`].
#[newtype_index]
pub struct ScopedExpressionId;
macro_rules! impl_has_scoped_expression_id {
($ty: ty) => {
impl HasScopedAstId for $ty {
type Id = ScopedExpressionId;
fn scoped_ast_id(&self, db: &dyn Db, scope: ScopeId) -> Self::Id {
let expression_ref = ExpressionRef::from(self);
expression_ref.scoped_ast_id(db, scope)
}
}
};
}
impl_has_scoped_expression_id!(ast::ExprBoolOp);
impl_has_scoped_expression_id!(ast::ExprName);
impl_has_scoped_expression_id!(ast::ExprBinOp);
impl_has_scoped_expression_id!(ast::ExprUnaryOp);
impl_has_scoped_expression_id!(ast::ExprLambda);
impl_has_scoped_expression_id!(ast::ExprIf);
impl_has_scoped_expression_id!(ast::ExprDict);
impl_has_scoped_expression_id!(ast::ExprSet);
impl_has_scoped_expression_id!(ast::ExprListComp);
impl_has_scoped_expression_id!(ast::ExprSetComp);
impl_has_scoped_expression_id!(ast::ExprDictComp);
impl_has_scoped_expression_id!(ast::ExprGenerator);
impl_has_scoped_expression_id!(ast::ExprAwait);
impl_has_scoped_expression_id!(ast::ExprYield);
impl_has_scoped_expression_id!(ast::ExprYieldFrom);
impl_has_scoped_expression_id!(ast::ExprCompare);
impl_has_scoped_expression_id!(ast::ExprCall);
impl_has_scoped_expression_id!(ast::ExprFString);
impl_has_scoped_expression_id!(ast::ExprStringLiteral);
impl_has_scoped_expression_id!(ast::ExprBytesLiteral);
impl_has_scoped_expression_id!(ast::ExprNumberLiteral);
impl_has_scoped_expression_id!(ast::ExprBooleanLiteral);
impl_has_scoped_expression_id!(ast::ExprNoneLiteral);
impl_has_scoped_expression_id!(ast::ExprEllipsisLiteral);
impl_has_scoped_expression_id!(ast::ExprAttribute);
impl_has_scoped_expression_id!(ast::ExprSubscript);
impl_has_scoped_expression_id!(ast::ExprStarred);
impl_has_scoped_expression_id!(ast::ExprNamed);
impl_has_scoped_expression_id!(ast::ExprList);
impl_has_scoped_expression_id!(ast::ExprTuple);
impl_has_scoped_expression_id!(ast::ExprSlice);
impl_has_scoped_expression_id!(ast::ExprIpyEscapeCommand);
impl_has_scoped_expression_id!(ast::Expr);
impl HasScopedAstId for ast::ExpressionRef<'_> {
type Id = ScopedExpressionId;
fn scoped_ast_id(&self, db: &dyn Db, scope: ScopeId) -> Self::Id {
let ast_ids = ast_ids(db, scope);
ast_ids.expression_id(*self)
}
}
#[derive(Debug)]
pub(super) struct AstIdsBuilder {
expressions_map: FxHashMap<ExpressionNodeKey, ScopedExpressionId>,
uses_map: FxHashMap<ExpressionNodeKey, ScopedUseId>,
}
impl AstIdsBuilder {
pub(super) fn new() -> Self {
Self {
expressions_map: FxHashMap::default(),
uses_map: FxHashMap::default(),
}
}
/// Adds `expr` to the expression ids map and returns its id.
pub(super) fn record_expression(&mut self, expr: &ast::Expr) -> ScopedExpressionId {
let expression_id = self.expressions_map.len().into();
self.expressions_map.insert(expr.into(), expression_id);
expression_id
}
/// Adds `expr` to the use ids map and returns its id.
pub(super) fn record_use(&mut self, expr: &ast::Expr) -> ScopedUseId {
let use_id = self.uses_map.len().into();
self.uses_map.insert(expr.into(), use_id);
use_id
}
pub(super) fn finish(mut self) -> AstIds {
self.expressions_map.shrink_to_fit();
self.uses_map.shrink_to_fit();
AstIds {
expressions_map: self.expressions_map,
uses_map: self.uses_map,
}
}
}
/// Node key that can only be constructed for expressions.
pub(crate) mod node_key {
use ruff_python_ast as ast;
use crate::node_key::NodeKey;
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub(crate) struct ExpressionNodeKey(NodeKey);
impl From<ast::ExpressionRef<'_>> for ExpressionNodeKey {
fn from(value: ast::ExpressionRef<'_>) -> Self {
Self(NodeKey::from_node(value))
}
}
impl From<&ast::Expr> for ExpressionNodeKey {
fn from(value: &ast::Expr) -> Self {
Self(NodeKey::from_node(value))
}
}
}

View File

@@ -1,835 +0,0 @@
use std::sync::Arc;
use rustc_hash::FxHashMap;
use ruff_db::files::File;
use ruff_db::parsed::ParsedModule;
use ruff_index::IndexVec;
use ruff_python_ast as ast;
use ruff_python_ast::name::Name;
use ruff_python_ast::visitor::{walk_expr, walk_pattern, walk_stmt, Visitor};
use ruff_python_ast::AnyParameterRef;
use crate::ast_node_ref::AstNodeRef;
use crate::semantic_index::ast_ids::node_key::ExpressionNodeKey;
use crate::semantic_index::ast_ids::AstIdsBuilder;
use crate::semantic_index::definition::{
AssignmentDefinitionNodeRef, ComprehensionDefinitionNodeRef, Definition, DefinitionNodeKey,
DefinitionNodeRef, ImportFromDefinitionNodeRef,
};
use crate::semantic_index::expression::Expression;
use crate::semantic_index::symbol::{
FileScopeId, NodeWithScopeKey, NodeWithScopeRef, Scope, ScopeId, ScopedSymbolId, SymbolFlags,
SymbolTableBuilder,
};
use crate::semantic_index::use_def::{FlowSnapshot, UseDefMapBuilder};
use crate::semantic_index::SemanticIndex;
use crate::Db;
use super::definition::WithItemDefinitionNodeRef;
pub(super) struct SemanticIndexBuilder<'db> {
// Builder state
db: &'db dyn Db,
file: File,
module: &'db ParsedModule,
scope_stack: Vec<FileScopeId>,
/// The assignment we're currently visiting.
current_assignment: Option<CurrentAssignment<'db>>,
/// Flow states at each `break` in the current loop.
loop_break_states: Vec<FlowSnapshot>,
// Semantic Index fields
scopes: IndexVec<FileScopeId, Scope>,
scope_ids_by_scope: IndexVec<FileScopeId, ScopeId<'db>>,
symbol_tables: IndexVec<FileScopeId, SymbolTableBuilder>,
ast_ids: IndexVec<FileScopeId, AstIdsBuilder>,
use_def_maps: IndexVec<FileScopeId, UseDefMapBuilder<'db>>,
scopes_by_node: FxHashMap<NodeWithScopeKey, FileScopeId>,
scopes_by_expression: FxHashMap<ExpressionNodeKey, FileScopeId>,
definitions_by_node: FxHashMap<DefinitionNodeKey, Definition<'db>>,
expressions_by_node: FxHashMap<ExpressionNodeKey, Expression<'db>>,
}
impl<'db> SemanticIndexBuilder<'db> {
pub(super) fn new(db: &'db dyn Db, file: File, parsed: &'db ParsedModule) -> Self {
let mut builder = Self {
db,
file,
module: parsed,
scope_stack: Vec::new(),
current_assignment: None,
loop_break_states: vec![],
scopes: IndexVec::new(),
symbol_tables: IndexVec::new(),
ast_ids: IndexVec::new(),
scope_ids_by_scope: IndexVec::new(),
use_def_maps: IndexVec::new(),
scopes_by_expression: FxHashMap::default(),
scopes_by_node: FxHashMap::default(),
definitions_by_node: FxHashMap::default(),
expressions_by_node: FxHashMap::default(),
};
builder.push_scope_with_parent(NodeWithScopeRef::Module, None);
builder
}
fn current_scope(&self) -> FileScopeId {
*self
.scope_stack
.last()
.expect("Always to have a root scope")
}
fn push_scope(&mut self, node: NodeWithScopeRef) {
let parent = self.current_scope();
self.push_scope_with_parent(node, Some(parent));
}
fn push_scope_with_parent(&mut self, node: NodeWithScopeRef, parent: Option<FileScopeId>) {
let children_start = self.scopes.next_index() + 1;
let scope = Scope {
parent,
kind: node.scope_kind(),
descendents: children_start..children_start,
};
let file_scope_id = self.scopes.push(scope);
self.symbol_tables.push(SymbolTableBuilder::new());
self.use_def_maps.push(UseDefMapBuilder::new());
let ast_id_scope = self.ast_ids.push(AstIdsBuilder::new());
#[allow(unsafe_code)]
// SAFETY: `node` is guaranteed to be a child of `self.module`
let scope_id = ScopeId::new(
self.db,
self.file,
file_scope_id,
unsafe { node.to_kind(self.module.clone()) },
countme::Count::default(),
);
self.scope_ids_by_scope.push(scope_id);
self.scopes_by_node.insert(node.node_key(), file_scope_id);
debug_assert_eq!(ast_id_scope, file_scope_id);
self.scope_stack.push(file_scope_id);
}
fn pop_scope(&mut self) -> FileScopeId {
let id = self.scope_stack.pop().expect("Root scope to be present");
let children_end = self.scopes.next_index();
let scope = &mut self.scopes[id];
scope.descendents = scope.descendents.start..children_end;
id
}
fn current_symbol_table(&mut self) -> &mut SymbolTableBuilder {
let scope_id = self.current_scope();
&mut self.symbol_tables[scope_id]
}
fn current_use_def_map_mut(&mut self) -> &mut UseDefMapBuilder<'db> {
let scope_id = self.current_scope();
&mut self.use_def_maps[scope_id]
}
fn current_use_def_map(&self) -> &UseDefMapBuilder<'db> {
let scope_id = self.current_scope();
&self.use_def_maps[scope_id]
}
fn current_ast_ids(&mut self) -> &mut AstIdsBuilder {
let scope_id = self.current_scope();
&mut self.ast_ids[scope_id]
}
fn flow_snapshot(&self) -> FlowSnapshot {
self.current_use_def_map().snapshot()
}
fn flow_restore(&mut self, state: FlowSnapshot) {
self.current_use_def_map_mut().restore(state);
}
fn flow_merge(&mut self, state: FlowSnapshot) {
self.current_use_def_map_mut().merge(state);
}
fn add_or_update_symbol(&mut self, name: Name, flags: SymbolFlags) -> ScopedSymbolId {
let symbol_table = self.current_symbol_table();
let (symbol_id, added) = symbol_table.add_or_update_symbol(name, flags);
if added {
let use_def_map = self.current_use_def_map_mut();
use_def_map.add_symbol(symbol_id);
}
symbol_id
}
fn add_definition<'a>(
&mut self,
symbol: ScopedSymbolId,
definition_node: impl Into<DefinitionNodeRef<'a>>,
) -> Definition<'db> {
let definition_node: DefinitionNodeRef<'_> = definition_node.into();
let definition = Definition::new(
self.db,
self.file,
self.current_scope(),
symbol,
#[allow(unsafe_code)]
unsafe {
definition_node.into_owned(self.module.clone())
},
countme::Count::default(),
);
self.definitions_by_node
.insert(definition_node.key(), definition);
self.current_use_def_map_mut()
.record_definition(symbol, definition);
definition
}
fn add_constraint(&mut self, constraint_node: &ast::Expr) -> Expression<'db> {
let expression = self.add_standalone_expression(constraint_node);
self.current_use_def_map_mut().record_constraint(expression);
expression
}
/// Record an expression that needs to be a Salsa ingredient, because we need to infer its type
/// standalone (type narrowing tests, RHS of an assignment.)
fn add_standalone_expression(&mut self, expression_node: &ast::Expr) -> Expression<'db> {
let expression = Expression::new(
self.db,
self.file,
self.current_scope(),
#[allow(unsafe_code)]
unsafe {
AstNodeRef::new(self.module.clone(), expression_node)
},
countme::Count::default(),
);
self.expressions_by_node
.insert(expression_node.into(), expression);
expression
}
fn with_type_params(
&mut self,
with_scope: NodeWithScopeRef,
type_params: Option<&'db ast::TypeParams>,
nested: impl FnOnce(&mut Self) -> FileScopeId,
) -> FileScopeId {
if let Some(type_params) = type_params {
self.push_scope(with_scope);
for type_param in &type_params.type_params {
let (name, bound, default) = match type_param {
ast::TypeParam::TypeVar(ast::TypeParamTypeVar {
range: _,
name,
bound,
default,
}) => (name, bound, default),
ast::TypeParam::ParamSpec(ast::TypeParamParamSpec {
name, default, ..
}) => (name, &None, default),
ast::TypeParam::TypeVarTuple(ast::TypeParamTypeVarTuple {
name,
default,
..
}) => (name, &None, default),
};
// TODO create Definition for typevars
self.add_or_update_symbol(name.id.clone(), SymbolFlags::IS_DEFINED);
if let Some(bound) = bound {
self.visit_expr(bound);
}
if let Some(default) = default {
self.visit_expr(default);
}
}
}
let nested_scope = nested(self);
if type_params.is_some() {
self.pop_scope();
}
nested_scope
}
/// Visit a list of [`Comprehension`] nodes, assumed to be the "generators" that compose a
/// comprehension (that is, the `for x in y` and `for y in z` parts of `x for x in y for y in z`.)
///
/// [`Comprehension`]: ast::Comprehension
fn visit_generators(&mut self, scope: NodeWithScopeRef, generators: &'db [ast::Comprehension]) {
let mut generators_iter = generators.iter();
let Some(generator) = generators_iter.next() else {
unreachable!("Expression must contain at least one generator");
};
// The `iter` of the first generator is evaluated in the outer scope, while all subsequent
// nodes are evaluated in the inner scope.
self.visit_expr(&generator.iter);
self.push_scope(scope);
self.current_assignment = Some(CurrentAssignment::Comprehension {
node: generator,
first: true,
});
self.visit_expr(&generator.target);
self.current_assignment = None;
for expr in &generator.ifs {
self.visit_expr(expr);
}
for generator in generators_iter {
self.visit_expr(&generator.iter);
self.current_assignment = Some(CurrentAssignment::Comprehension {
node: generator,
first: false,
});
self.visit_expr(&generator.target);
self.current_assignment = None;
for expr in &generator.ifs {
self.visit_expr(expr);
}
}
}
fn declare_parameter(&mut self, parameter: AnyParameterRef) {
let symbol =
self.add_or_update_symbol(parameter.name().id().clone(), SymbolFlags::IS_DEFINED);
let definition = self.add_definition(symbol, parameter);
if let AnyParameterRef::NonVariadic(with_default) = parameter {
// Insert a mapping from the parameter to the same definition.
// This ensures that calling `HasTy::ty` on the inner parameter returns
// a valid type (and doesn't panic)
self.definitions_by_node.insert(
DefinitionNodeRef::from(AnyParameterRef::Variadic(&with_default.parameter)).key(),
definition,
);
}
}
pub(super) fn build(mut self) -> SemanticIndex<'db> {
let module = self.module;
self.visit_body(module.suite());
// Pop the root scope
self.pop_scope();
assert!(self.scope_stack.is_empty());
assert!(self.current_assignment.is_none());
let mut symbol_tables: IndexVec<_, _> = self
.symbol_tables
.into_iter()
.map(|builder| Arc::new(builder.finish()))
.collect();
let mut use_def_maps: IndexVec<_, _> = self
.use_def_maps
.into_iter()
.map(|builder| Arc::new(builder.finish()))
.collect();
let mut ast_ids: IndexVec<_, _> = self
.ast_ids
.into_iter()
.map(super::ast_ids::AstIdsBuilder::finish)
.collect();
self.scopes.shrink_to_fit();
symbol_tables.shrink_to_fit();
use_def_maps.shrink_to_fit();
ast_ids.shrink_to_fit();
self.scopes_by_expression.shrink_to_fit();
self.definitions_by_node.shrink_to_fit();
self.scope_ids_by_scope.shrink_to_fit();
self.scopes_by_node.shrink_to_fit();
SemanticIndex {
symbol_tables,
scopes: self.scopes,
definitions_by_node: self.definitions_by_node,
expressions_by_node: self.expressions_by_node,
scope_ids_by_scope: self.scope_ids_by_scope,
ast_ids,
scopes_by_expression: self.scopes_by_expression,
scopes_by_node: self.scopes_by_node,
use_def_maps,
}
}
}
impl<'db, 'ast> Visitor<'ast> for SemanticIndexBuilder<'db>
where
'ast: 'db,
{
fn visit_stmt(&mut self, stmt: &'ast ast::Stmt) {
match stmt {
ast::Stmt::FunctionDef(function_def) => {
for decorator in &function_def.decorator_list {
self.visit_decorator(decorator);
}
let symbol = self
.add_or_update_symbol(function_def.name.id.clone(), SymbolFlags::IS_DEFINED);
self.add_definition(symbol, function_def);
// The default value of the parameters needs to be evaluated in the
// enclosing scope.
for default in function_def
.parameters
.iter_non_variadic_params()
.filter_map(|param| param.default.as_deref())
{
self.visit_expr(default);
}
self.with_type_params(
NodeWithScopeRef::FunctionTypeParameters(function_def),
function_def.type_params.as_deref(),
|builder| {
builder.visit_parameters(&function_def.parameters);
if let Some(expr) = &function_def.returns {
builder.visit_annotation(expr);
}
builder.push_scope(NodeWithScopeRef::Function(function_def));
// Add symbols and definitions for the parameters to the function scope.
for parameter in &*function_def.parameters {
builder.declare_parameter(parameter);
}
builder.visit_body(&function_def.body);
builder.pop_scope()
},
);
}
ast::Stmt::ClassDef(class) => {
for decorator in &class.decorator_list {
self.visit_decorator(decorator);
}
let symbol =
self.add_or_update_symbol(class.name.id.clone(), SymbolFlags::IS_DEFINED);
self.add_definition(symbol, class);
self.with_type_params(
NodeWithScopeRef::ClassTypeParameters(class),
class.type_params.as_deref(),
|builder| {
if let Some(arguments) = &class.arguments {
builder.visit_arguments(arguments);
}
builder.push_scope(NodeWithScopeRef::Class(class));
builder.visit_body(&class.body);
builder.pop_scope()
},
);
}
ast::Stmt::Import(node) => {
for alias in &node.names {
let symbol_name = if let Some(asname) = &alias.asname {
asname.id.clone()
} else {
Name::new(alias.name.id.split('.').next().unwrap())
};
let symbol = self.add_or_update_symbol(symbol_name, SymbolFlags::IS_DEFINED);
self.add_definition(symbol, alias);
}
}
ast::Stmt::ImportFrom(node) => {
for (alias_index, alias) in node.names.iter().enumerate() {
let symbol_name = if let Some(asname) = &alias.asname {
&asname.id
} else {
&alias.name.id
};
let symbol =
self.add_or_update_symbol(symbol_name.clone(), SymbolFlags::IS_DEFINED);
self.add_definition(symbol, ImportFromDefinitionNodeRef { node, alias_index });
}
}
ast::Stmt::Assign(node) => {
debug_assert!(self.current_assignment.is_none());
self.visit_expr(&node.value);
self.add_standalone_expression(&node.value);
self.current_assignment = Some(node.into());
for target in &node.targets {
self.visit_expr(target);
}
self.current_assignment = None;
}
ast::Stmt::AnnAssign(node) => {
debug_assert!(self.current_assignment.is_none());
// TODO deferred annotation visiting
self.visit_expr(&node.annotation);
if let Some(value) = &node.value {
self.visit_expr(value);
}
self.current_assignment = Some(node.into());
self.visit_expr(&node.target);
self.current_assignment = None;
}
ast::Stmt::AugAssign(
aug_assign @ ast::StmtAugAssign {
range: _,
target,
op: _,
value,
},
) => {
debug_assert!(self.current_assignment.is_none());
self.visit_expr(value);
self.current_assignment = Some(aug_assign.into());
self.visit_expr(target);
self.current_assignment = None;
}
ast::Stmt::If(node) => {
self.visit_expr(&node.test);
let pre_if = self.flow_snapshot();
self.add_constraint(&node.test);
self.visit_body(&node.body);
let mut post_clauses: Vec<FlowSnapshot> = vec![];
for clause in &node.elif_else_clauses {
// snapshot after every block except the last; the last one will just become
// the state that we merge the other snapshots into
post_clauses.push(self.flow_snapshot());
// we can only take an elif/else branch if none of the previous ones were
// taken, so the block entry state is always `pre_if`
self.flow_restore(pre_if.clone());
self.visit_elif_else_clause(clause);
}
for post_clause_state in post_clauses {
self.flow_merge(post_clause_state);
}
let has_else = node
.elif_else_clauses
.last()
.is_some_and(|clause| clause.test.is_none());
if !has_else {
// if there's no else clause, then it's possible we took none of the branches,
// and the pre_if state can reach here
self.flow_merge(pre_if);
}
}
ast::Stmt::While(node) => {
self.visit_expr(&node.test);
let pre_loop = self.flow_snapshot();
// Save aside any break states from an outer loop
let saved_break_states = std::mem::take(&mut self.loop_break_states);
self.visit_body(&node.body);
// Get the break states from the body of this loop, and restore the saved outer
// ones.
let break_states =
std::mem::replace(&mut self.loop_break_states, saved_break_states);
// We may execute the `else` clause without ever executing the body, so merge in
// the pre-loop state before visiting `else`.
self.flow_merge(pre_loop);
self.visit_body(&node.orelse);
// Breaking out of a while loop bypasses the `else` clause, so merge in the break
// states after visiting `else`.
for break_state in break_states {
self.flow_merge(break_state);
}
}
ast::Stmt::With(ast::StmtWith { items, body, .. }) => {
for item in items {
self.visit_expr(&item.context_expr);
if let Some(optional_vars) = item.optional_vars.as_deref() {
self.add_standalone_expression(&item.context_expr);
self.current_assignment = Some(item.into());
self.visit_expr(optional_vars);
self.current_assignment = None;
}
}
self.visit_body(body);
}
ast::Stmt::Break(_) => {
self.loop_break_states.push(self.flow_snapshot());
}
_ => {
walk_stmt(self, stmt);
}
}
}
fn visit_expr(&mut self, expr: &'ast ast::Expr) {
self.scopes_by_expression
.insert(expr.into(), self.current_scope());
self.current_ast_ids().record_expression(expr);
match expr {
ast::Expr::Name(name_node @ ast::ExprName { id, ctx, .. }) => {
let mut flags = match ctx {
ast::ExprContext::Load => SymbolFlags::IS_USED,
ast::ExprContext::Store => SymbolFlags::IS_DEFINED,
ast::ExprContext::Del => SymbolFlags::IS_DEFINED,
ast::ExprContext::Invalid => SymbolFlags::empty(),
};
if matches!(
self.current_assignment,
Some(CurrentAssignment::AugAssign(_))
) && !ctx.is_invalid()
{
// For augmented assignment, the target expression is also used, so we should
// record that as a use.
flags |= SymbolFlags::IS_USED;
}
let symbol = self.add_or_update_symbol(id.clone(), flags);
if flags.contains(SymbolFlags::IS_DEFINED) {
match self.current_assignment {
Some(CurrentAssignment::Assign(assignment)) => {
self.add_definition(
symbol,
AssignmentDefinitionNodeRef {
assignment,
target: name_node,
},
);
}
Some(CurrentAssignment::AnnAssign(ann_assign)) => {
self.add_definition(symbol, ann_assign);
}
Some(CurrentAssignment::AugAssign(aug_assign)) => {
self.add_definition(symbol, aug_assign);
}
Some(CurrentAssignment::Named(named)) => {
// TODO(dhruvmanila): If the current scope is a comprehension, then the
// named expression is implicitly nonlocal. This is yet to be
// implemented.
self.add_definition(symbol, named);
}
Some(CurrentAssignment::Comprehension { node, first }) => {
self.add_definition(
symbol,
ComprehensionDefinitionNodeRef { node, first },
);
}
Some(CurrentAssignment::WithItem(with_item)) => {
self.add_definition(
symbol,
WithItemDefinitionNodeRef {
node: with_item,
target: name_node,
},
);
}
None => {}
}
}
if flags.contains(SymbolFlags::IS_USED) {
let use_id = self.current_ast_ids().record_use(expr);
self.current_use_def_map_mut().record_use(symbol, use_id);
}
walk_expr(self, expr);
}
ast::Expr::Named(node) => {
debug_assert!(self.current_assignment.is_none());
self.current_assignment = Some(node.into());
// TODO walrus in comprehensions is implicitly nonlocal
self.visit_expr(&node.target);
self.current_assignment = None;
self.visit_expr(&node.value);
}
ast::Expr::Lambda(lambda) => {
if let Some(parameters) = &lambda.parameters {
// The default value of the parameters needs to be evaluated in the
// enclosing scope.
for default in parameters
.iter_non_variadic_params()
.filter_map(|param| param.default.as_deref())
{
self.visit_expr(default);
}
self.visit_parameters(parameters);
}
self.push_scope(NodeWithScopeRef::Lambda(lambda));
// Add symbols and definitions for the parameters to the lambda scope.
if let Some(parameters) = &lambda.parameters {
for parameter in &**parameters {
self.declare_parameter(parameter);
}
}
self.visit_expr(lambda.body.as_ref());
}
ast::Expr::If(ast::ExprIf {
body, test, orelse, ..
}) => {
// TODO detect statically known truthy or falsy test (via type inference, not naive
// AST inspection, so we can't simplify here, need to record test expression for
// later checking)
self.visit_expr(test);
let pre_if = self.flow_snapshot();
self.visit_expr(body);
let post_body = self.flow_snapshot();
self.flow_restore(pre_if);
self.visit_expr(orelse);
self.flow_merge(post_body);
}
ast::Expr::ListComp(
list_comprehension @ ast::ExprListComp {
elt, generators, ..
},
) => {
self.visit_generators(
NodeWithScopeRef::ListComprehension(list_comprehension),
generators,
);
self.visit_expr(elt);
}
ast::Expr::SetComp(
set_comprehension @ ast::ExprSetComp {
elt, generators, ..
},
) => {
self.visit_generators(
NodeWithScopeRef::SetComprehension(set_comprehension),
generators,
);
self.visit_expr(elt);
}
ast::Expr::Generator(
generator @ ast::ExprGenerator {
elt, generators, ..
},
) => {
self.visit_generators(NodeWithScopeRef::GeneratorExpression(generator), generators);
self.visit_expr(elt);
}
ast::Expr::DictComp(
dict_comprehension @ ast::ExprDictComp {
key,
value,
generators,
..
},
) => {
self.visit_generators(
NodeWithScopeRef::DictComprehension(dict_comprehension),
generators,
);
self.visit_expr(key);
self.visit_expr(value);
}
_ => {
walk_expr(self, expr);
}
}
if matches!(
expr,
ast::Expr::Lambda(_)
| ast::Expr::ListComp(_)
| ast::Expr::SetComp(_)
| ast::Expr::Generator(_)
| ast::Expr::DictComp(_)
) {
self.pop_scope();
}
}
fn visit_parameters(&mut self, parameters: &'ast ruff_python_ast::Parameters) {
// Intentionally avoid walking default expressions, as we handle them in the enclosing
// scope.
for parameter in parameters.iter().map(ast::AnyParameterRef::as_parameter) {
self.visit_parameter(parameter);
}
}
fn visit_pattern(&mut self, pattern: &'ast ast::Pattern) {
if let ast::Pattern::MatchAs(ast::PatternMatchAs {
name: Some(name), ..
})
| ast::Pattern::MatchStar(ast::PatternMatchStar {
name: Some(name),
range: _,
})
| ast::Pattern::MatchMapping(ast::PatternMatchMapping {
rest: Some(name), ..
}) = pattern
{
// TODO(dhruvmanila): Add definition
self.add_or_update_symbol(name.id.clone(), SymbolFlags::IS_DEFINED);
}
walk_pattern(self, pattern);
}
}
#[derive(Copy, Clone, Debug)]
enum CurrentAssignment<'a> {
Assign(&'a ast::StmtAssign),
AnnAssign(&'a ast::StmtAnnAssign),
AugAssign(&'a ast::StmtAugAssign),
Named(&'a ast::ExprNamed),
Comprehension {
node: &'a ast::Comprehension,
first: bool,
},
WithItem(&'a ast::WithItem),
}
impl<'a> From<&'a ast::StmtAssign> for CurrentAssignment<'a> {
fn from(value: &'a ast::StmtAssign) -> Self {
Self::Assign(value)
}
}
impl<'a> From<&'a ast::StmtAnnAssign> for CurrentAssignment<'a> {
fn from(value: &'a ast::StmtAnnAssign) -> Self {
Self::AnnAssign(value)
}
}
impl<'a> From<&'a ast::StmtAugAssign> for CurrentAssignment<'a> {
fn from(value: &'a ast::StmtAugAssign) -> Self {
Self::AugAssign(value)
}
}
impl<'a> From<&'a ast::ExprNamed> for CurrentAssignment<'a> {
fn from(value: &'a ast::ExprNamed) -> Self {
Self::Named(value)
}
}
impl<'a> From<&'a ast::WithItem> for CurrentAssignment<'a> {
fn from(value: &'a ast::WithItem) -> Self {
Self::WithItem(value)
}
}

View File

@@ -1,366 +0,0 @@
use ruff_db::files::File;
use ruff_db::parsed::ParsedModule;
use ruff_python_ast as ast;
use crate::ast_node_ref::AstNodeRef;
use crate::node_key::NodeKey;
use crate::semantic_index::symbol::{FileScopeId, ScopeId, ScopedSymbolId};
use crate::Db;
#[salsa::tracked]
pub struct Definition<'db> {
/// The file in which the definition occurs.
#[id]
pub(crate) file: File,
/// The scope in which the definition occurs.
#[id]
pub(crate) file_scope: FileScopeId,
/// The symbol defined.
#[id]
pub(crate) symbol: ScopedSymbolId,
#[no_eq]
#[return_ref]
pub(crate) node: DefinitionKind,
#[no_eq]
count: countme::Count<Definition<'static>>,
}
impl<'db> Definition<'db> {
pub(crate) fn scope(self, db: &'db dyn Db) -> ScopeId<'db> {
self.file_scope(db).to_scope_id(db, self.file(db))
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) enum DefinitionNodeRef<'a> {
Import(&'a ast::Alias),
ImportFrom(ImportFromDefinitionNodeRef<'a>),
Function(&'a ast::StmtFunctionDef),
Class(&'a ast::StmtClassDef),
NamedExpression(&'a ast::ExprNamed),
Assignment(AssignmentDefinitionNodeRef<'a>),
AnnotatedAssignment(&'a ast::StmtAnnAssign),
AugmentedAssignment(&'a ast::StmtAugAssign),
Comprehension(ComprehensionDefinitionNodeRef<'a>),
Parameter(ast::AnyParameterRef<'a>),
WithItem(WithItemDefinitionNodeRef<'a>),
}
impl<'a> From<&'a ast::StmtFunctionDef> for DefinitionNodeRef<'a> {
fn from(node: &'a ast::StmtFunctionDef) -> Self {
Self::Function(node)
}
}
impl<'a> From<&'a ast::StmtClassDef> for DefinitionNodeRef<'a> {
fn from(node: &'a ast::StmtClassDef) -> Self {
Self::Class(node)
}
}
impl<'a> From<&'a ast::ExprNamed> for DefinitionNodeRef<'a> {
fn from(node: &'a ast::ExprNamed) -> Self {
Self::NamedExpression(node)
}
}
impl<'a> From<&'a ast::StmtAnnAssign> for DefinitionNodeRef<'a> {
fn from(node: &'a ast::StmtAnnAssign) -> Self {
Self::AnnotatedAssignment(node)
}
}
impl<'a> From<&'a ast::StmtAugAssign> for DefinitionNodeRef<'a> {
fn from(node: &'a ast::StmtAugAssign) -> Self {
Self::AugmentedAssignment(node)
}
}
impl<'a> From<&'a ast::Alias> for DefinitionNodeRef<'a> {
fn from(node_ref: &'a ast::Alias) -> Self {
Self::Import(node_ref)
}
}
impl<'a> From<ImportFromDefinitionNodeRef<'a>> for DefinitionNodeRef<'a> {
fn from(node_ref: ImportFromDefinitionNodeRef<'a>) -> Self {
Self::ImportFrom(node_ref)
}
}
impl<'a> From<AssignmentDefinitionNodeRef<'a>> for DefinitionNodeRef<'a> {
fn from(node_ref: AssignmentDefinitionNodeRef<'a>) -> Self {
Self::Assignment(node_ref)
}
}
impl<'a> From<WithItemDefinitionNodeRef<'a>> for DefinitionNodeRef<'a> {
fn from(node_ref: WithItemDefinitionNodeRef<'a>) -> Self {
Self::WithItem(node_ref)
}
}
impl<'a> From<ComprehensionDefinitionNodeRef<'a>> for DefinitionNodeRef<'a> {
fn from(node: ComprehensionDefinitionNodeRef<'a>) -> Self {
Self::Comprehension(node)
}
}
impl<'a> From<ast::AnyParameterRef<'a>> for DefinitionNodeRef<'a> {
fn from(node: ast::AnyParameterRef<'a>) -> Self {
Self::Parameter(node)
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct ImportFromDefinitionNodeRef<'a> {
pub(crate) node: &'a ast::StmtImportFrom,
pub(crate) alias_index: usize,
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct AssignmentDefinitionNodeRef<'a> {
pub(crate) assignment: &'a ast::StmtAssign,
pub(crate) target: &'a ast::ExprName,
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct WithItemDefinitionNodeRef<'a> {
pub(crate) node: &'a ast::WithItem,
pub(crate) target: &'a ast::ExprName,
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct ComprehensionDefinitionNodeRef<'a> {
pub(crate) node: &'a ast::Comprehension,
pub(crate) first: bool,
}
impl DefinitionNodeRef<'_> {
#[allow(unsafe_code)]
pub(super) unsafe fn into_owned(self, parsed: ParsedModule) -> DefinitionKind {
match self {
DefinitionNodeRef::Import(alias) => {
DefinitionKind::Import(AstNodeRef::new(parsed, alias))
}
DefinitionNodeRef::ImportFrom(ImportFromDefinitionNodeRef { node, alias_index }) => {
DefinitionKind::ImportFrom(ImportFromDefinitionKind {
node: AstNodeRef::new(parsed, node),
alias_index,
})
}
DefinitionNodeRef::Function(function) => {
DefinitionKind::Function(AstNodeRef::new(parsed, function))
}
DefinitionNodeRef::Class(class) => {
DefinitionKind::Class(AstNodeRef::new(parsed, class))
}
DefinitionNodeRef::NamedExpression(named) => {
DefinitionKind::NamedExpression(AstNodeRef::new(parsed, named))
}
DefinitionNodeRef::Assignment(AssignmentDefinitionNodeRef { assignment, target }) => {
DefinitionKind::Assignment(AssignmentDefinitionKind {
assignment: AstNodeRef::new(parsed.clone(), assignment),
target: AstNodeRef::new(parsed, target),
})
}
DefinitionNodeRef::AnnotatedAssignment(assign) => {
DefinitionKind::AnnotatedAssignment(AstNodeRef::new(parsed, assign))
}
DefinitionNodeRef::AugmentedAssignment(augmented_assignment) => {
DefinitionKind::AugmentedAssignment(AstNodeRef::new(parsed, augmented_assignment))
}
DefinitionNodeRef::Comprehension(ComprehensionDefinitionNodeRef { node, first }) => {
DefinitionKind::Comprehension(ComprehensionDefinitionKind {
node: AstNodeRef::new(parsed, node),
first,
})
}
DefinitionNodeRef::Parameter(parameter) => match parameter {
ast::AnyParameterRef::Variadic(parameter) => {
DefinitionKind::Parameter(AstNodeRef::new(parsed, parameter))
}
ast::AnyParameterRef::NonVariadic(parameter) => {
DefinitionKind::ParameterWithDefault(AstNodeRef::new(parsed, parameter))
}
},
DefinitionNodeRef::WithItem(WithItemDefinitionNodeRef { node, target }) => {
DefinitionKind::WithItem(WithItemDefinitionKind {
node: AstNodeRef::new(parsed.clone(), node),
target: AstNodeRef::new(parsed, target),
})
}
}
}
pub(super) fn key(self) -> DefinitionNodeKey {
match self {
Self::Import(node) => node.into(),
Self::ImportFrom(ImportFromDefinitionNodeRef { node, alias_index }) => {
(&node.names[alias_index]).into()
}
Self::Function(node) => node.into(),
Self::Class(node) => node.into(),
Self::NamedExpression(node) => node.into(),
Self::Assignment(AssignmentDefinitionNodeRef {
assignment: _,
target,
}) => target.into(),
Self::AnnotatedAssignment(node) => node.into(),
Self::AugmentedAssignment(node) => node.into(),
Self::Comprehension(ComprehensionDefinitionNodeRef { node, first: _ }) => node.into(),
Self::Parameter(node) => match node {
ast::AnyParameterRef::Variadic(parameter) => parameter.into(),
ast::AnyParameterRef::NonVariadic(parameter) => parameter.into(),
},
Self::WithItem(WithItemDefinitionNodeRef { node: _, target }) => target.into(),
}
}
}
#[derive(Clone, Debug)]
pub enum DefinitionKind {
Import(AstNodeRef<ast::Alias>),
ImportFrom(ImportFromDefinitionKind),
Function(AstNodeRef<ast::StmtFunctionDef>),
Class(AstNodeRef<ast::StmtClassDef>),
NamedExpression(AstNodeRef<ast::ExprNamed>),
Assignment(AssignmentDefinitionKind),
AnnotatedAssignment(AstNodeRef<ast::StmtAnnAssign>),
AugmentedAssignment(AstNodeRef<ast::StmtAugAssign>),
Comprehension(ComprehensionDefinitionKind),
Parameter(AstNodeRef<ast::Parameter>),
ParameterWithDefault(AstNodeRef<ast::ParameterWithDefault>),
WithItem(WithItemDefinitionKind),
}
#[derive(Clone, Debug)]
pub struct ComprehensionDefinitionKind {
node: AstNodeRef<ast::Comprehension>,
first: bool,
}
impl ComprehensionDefinitionKind {
pub(crate) fn node(&self) -> &ast::Comprehension {
self.node.node()
}
pub(crate) fn is_first(&self) -> bool {
self.first
}
}
#[derive(Clone, Debug)]
pub struct ImportFromDefinitionKind {
node: AstNodeRef<ast::StmtImportFrom>,
alias_index: usize,
}
impl ImportFromDefinitionKind {
pub(crate) fn import(&self) -> &ast::StmtImportFrom {
self.node.node()
}
pub(crate) fn alias(&self) -> &ast::Alias {
&self.node.node().names[self.alias_index]
}
}
#[derive(Clone, Debug)]
pub struct AssignmentDefinitionKind {
assignment: AstNodeRef<ast::StmtAssign>,
target: AstNodeRef<ast::ExprName>,
}
impl AssignmentDefinitionKind {
pub(crate) fn assignment(&self) -> &ast::StmtAssign {
self.assignment.node()
}
pub(crate) fn target(&self) -> &ast::ExprName {
self.target.node()
}
}
#[derive(Clone, Debug)]
pub struct WithItemDefinitionKind {
node: AstNodeRef<ast::WithItem>,
target: AstNodeRef<ast::ExprName>,
}
impl WithItemDefinitionKind {
pub(crate) fn node(&self) -> &ast::WithItem {
self.node.node()
}
pub(crate) fn target(&self) -> &ast::ExprName {
self.target.node()
}
}
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub(crate) struct DefinitionNodeKey(NodeKey);
impl From<&ast::Alias> for DefinitionNodeKey {
fn from(node: &ast::Alias) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::StmtFunctionDef> for DefinitionNodeKey {
fn from(node: &ast::StmtFunctionDef) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::StmtClassDef> for DefinitionNodeKey {
fn from(node: &ast::StmtClassDef) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::ExprName> for DefinitionNodeKey {
fn from(node: &ast::ExprName) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::ExprNamed> for DefinitionNodeKey {
fn from(node: &ast::ExprNamed) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::StmtAnnAssign> for DefinitionNodeKey {
fn from(node: &ast::StmtAnnAssign) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::StmtAugAssign> for DefinitionNodeKey {
fn from(node: &ast::StmtAugAssign) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::Comprehension> for DefinitionNodeKey {
fn from(node: &ast::Comprehension) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::Parameter> for DefinitionNodeKey {
fn from(node: &ast::Parameter) -> Self {
Self(NodeKey::from_node(node))
}
}
impl From<&ast::ParameterWithDefault> for DefinitionNodeKey {
fn from(node: &ast::ParameterWithDefault) -> Self {
Self(NodeKey::from_node(node))
}
}

View File

@@ -1,34 +0,0 @@
use crate::ast_node_ref::AstNodeRef;
use crate::db::Db;
use crate::semantic_index::symbol::{FileScopeId, ScopeId};
use ruff_db::files::File;
use ruff_python_ast as ast;
use salsa;
/// An independently type-inferable expression.
///
/// Includes constraint expressions (e.g. if tests) and the RHS of an unpacking assignment.
#[salsa::tracked]
pub(crate) struct Expression<'db> {
/// The file in which the expression occurs.
#[id]
pub(crate) file: File,
/// The scope in which the expression occurs.
#[id]
pub(crate) file_scope: FileScopeId,
/// The expression node.
#[no_eq]
#[return_ref]
pub(crate) node_ref: AstNodeRef<ast::Expr>,
#[no_eq]
count: countme::Count<Expression<'static>>,
}
impl<'db> Expression<'db> {
pub(crate) fn scope(self, db: &'db dyn Db) -> ScopeId<'db> {
self.file_scope(db).to_scope_id(db, self.file(db))
}
}

View File

@@ -1,437 +0,0 @@
use std::hash::{Hash, Hasher};
use std::ops::Range;
use bitflags::bitflags;
use hashbrown::hash_map::RawEntryMut;
use ruff_db::files::File;
use ruff_db::parsed::ParsedModule;
use ruff_index::{newtype_index, IndexVec};
use ruff_python_ast::name::Name;
use ruff_python_ast::{self as ast};
use rustc_hash::FxHasher;
use crate::ast_node_ref::AstNodeRef;
use crate::node_key::NodeKey;
use crate::semantic_index::{semantic_index, SymbolMap};
use crate::Db;
#[derive(Eq, PartialEq, Debug)]
pub struct Symbol {
name: Name,
flags: SymbolFlags,
}
impl Symbol {
fn new(name: Name) -> Self {
Self {
name,
flags: SymbolFlags::empty(),
}
}
fn insert_flags(&mut self, flags: SymbolFlags) {
self.flags.insert(flags);
}
/// The symbol's name.
pub fn name(&self) -> &Name {
&self.name
}
/// Is the symbol used in its containing scope?
pub fn is_used(&self) -> bool {
self.flags.contains(SymbolFlags::IS_USED)
}
/// Is the symbol defined in its containing scope?
pub fn is_defined(&self) -> bool {
self.flags.contains(SymbolFlags::IS_DEFINED)
}
}
bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(super) struct SymbolFlags: u8 {
const IS_USED = 1 << 0;
const IS_DEFINED = 1 << 1;
/// TODO: This flag is not yet set by anything
const MARKED_GLOBAL = 1 << 2;
/// TODO: This flag is not yet set by anything
const MARKED_NONLOCAL = 1 << 3;
}
}
/// ID that uniquely identifies a symbol in a file.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub struct FileSymbolId {
scope: FileScopeId,
scoped_symbol_id: ScopedSymbolId,
}
impl FileSymbolId {
pub fn scope(self) -> FileScopeId {
self.scope
}
pub(crate) fn scoped_symbol_id(self) -> ScopedSymbolId {
self.scoped_symbol_id
}
}
impl From<FileSymbolId> for ScopedSymbolId {
fn from(val: FileSymbolId) -> Self {
val.scoped_symbol_id()
}
}
/// Symbol ID that uniquely identifies a symbol inside a [`Scope`].
#[newtype_index]
pub struct ScopedSymbolId;
/// A cross-module identifier of a scope that can be used as a salsa query parameter.
#[salsa::tracked]
pub struct ScopeId<'db> {
#[id]
pub file: File,
#[id]
pub file_scope_id: FileScopeId,
/// The node that introduces this scope.
#[no_eq]
#[return_ref]
pub node: NodeWithScopeKind,
#[no_eq]
count: countme::Count<ScopeId<'static>>,
}
impl<'db> ScopeId<'db> {
pub(crate) fn is_function_like(self, db: &'db dyn Db) -> bool {
// Type parameter scopes behave like function scopes in terms of name resolution; CPython
// symbol table also uses the term "function-like" for these scopes.
matches!(
self.node(db),
NodeWithScopeKind::ClassTypeParameters(_)
| NodeWithScopeKind::FunctionTypeParameters(_)
| NodeWithScopeKind::Function(_)
| NodeWithScopeKind::ListComprehension(_)
| NodeWithScopeKind::SetComprehension(_)
| NodeWithScopeKind::DictComprehension(_)
| NodeWithScopeKind::GeneratorExpression(_)
)
}
#[cfg(test)]
pub(crate) fn name(self, db: &'db dyn Db) -> &'db str {
match self.node(db) {
NodeWithScopeKind::Module => "<module>",
NodeWithScopeKind::Class(class) | NodeWithScopeKind::ClassTypeParameters(class) => {
class.name.as_str()
}
NodeWithScopeKind::Function(function)
| NodeWithScopeKind::FunctionTypeParameters(function) => function.name.as_str(),
NodeWithScopeKind::Lambda(_) => "<lambda>",
NodeWithScopeKind::ListComprehension(_) => "<listcomp>",
NodeWithScopeKind::SetComprehension(_) => "<setcomp>",
NodeWithScopeKind::DictComprehension(_) => "<dictcomp>",
NodeWithScopeKind::GeneratorExpression(_) => "<generator>",
}
}
}
/// ID that uniquely identifies a scope inside of a module.
#[newtype_index]
pub struct FileScopeId;
impl FileScopeId {
/// Returns the scope id of the module-global scope.
pub fn global() -> Self {
FileScopeId::from_u32(0)
}
pub fn to_scope_id(self, db: &dyn Db, file: File) -> ScopeId<'_> {
let index = semantic_index(db, file);
index.scope_ids_by_scope[self]
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct Scope {
pub(super) parent: Option<FileScopeId>,
pub(super) kind: ScopeKind,
pub(super) descendents: Range<FileScopeId>,
}
impl Scope {
pub fn parent(self) -> Option<FileScopeId> {
self.parent
}
pub fn kind(&self) -> ScopeKind {
self.kind
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ScopeKind {
Module,
Annotation,
Class,
Function,
Comprehension,
}
impl ScopeKind {
pub const fn is_comprehension(self) -> bool {
matches!(self, ScopeKind::Comprehension)
}
}
/// Symbol table for a specific [`Scope`].
#[derive(Debug)]
pub struct SymbolTable {
/// The symbols in this scope.
symbols: IndexVec<ScopedSymbolId, Symbol>,
/// The symbols indexed by name.
symbols_by_name: SymbolMap,
}
impl SymbolTable {
fn new() -> Self {
Self {
symbols: IndexVec::new(),
symbols_by_name: SymbolMap::default(),
}
}
fn shrink_to_fit(&mut self) {
self.symbols.shrink_to_fit();
}
pub(crate) fn symbol(&self, symbol_id: impl Into<ScopedSymbolId>) -> &Symbol {
&self.symbols[symbol_id.into()]
}
#[allow(unused)]
pub(crate) fn symbol_ids(&self) -> impl Iterator<Item = ScopedSymbolId> {
self.symbols.indices()
}
pub fn symbols(&self) -> impl Iterator<Item = &Symbol> {
self.symbols.iter()
}
/// Returns the symbol named `name`.
pub(crate) fn symbol_by_name(&self, name: &str) -> Option<&Symbol> {
let id = self.symbol_id_by_name(name)?;
Some(self.symbol(id))
}
/// Returns the [`ScopedSymbolId`] of the symbol named `name`.
pub(crate) fn symbol_id_by_name(&self, name: &str) -> Option<ScopedSymbolId> {
let (id, ()) = self
.symbols_by_name
.raw_entry()
.from_hash(Self::hash_name(name), |id| {
self.symbol(*id).name().as_str() == name
})?;
Some(*id)
}
fn hash_name(name: &str) -> u64 {
let mut hasher = FxHasher::default();
name.hash(&mut hasher);
hasher.finish()
}
}
impl PartialEq for SymbolTable {
fn eq(&self, other: &Self) -> bool {
// We don't need to compare the symbols_by_name because the name is already captured in `Symbol`.
self.symbols == other.symbols
}
}
impl Eq for SymbolTable {}
#[derive(Debug)]
pub(super) struct SymbolTableBuilder {
table: SymbolTable,
}
impl SymbolTableBuilder {
pub(super) fn new() -> Self {
Self {
table: SymbolTable::new(),
}
}
pub(super) fn add_or_update_symbol(
&mut self,
name: Name,
flags: SymbolFlags,
) -> (ScopedSymbolId, bool) {
let hash = SymbolTable::hash_name(&name);
let entry = self
.table
.symbols_by_name
.raw_entry_mut()
.from_hash(hash, |id| self.table.symbols[*id].name() == &name);
match entry {
RawEntryMut::Occupied(entry) => {
let symbol = &mut self.table.symbols[*entry.key()];
symbol.insert_flags(flags);
(*entry.key(), false)
}
RawEntryMut::Vacant(entry) => {
let mut symbol = Symbol::new(name);
symbol.insert_flags(flags);
let id = self.table.symbols.push(symbol);
entry.insert_with_hasher(hash, id, (), |id| {
SymbolTable::hash_name(self.table.symbols[*id].name().as_str())
});
(id, true)
}
}
}
pub(super) fn finish(mut self) -> SymbolTable {
self.table.shrink_to_fit();
self.table
}
}
/// Reference to a node that introduces a new scope.
#[derive(Copy, Clone, Debug)]
pub(crate) enum NodeWithScopeRef<'a> {
Module,
Class(&'a ast::StmtClassDef),
Function(&'a ast::StmtFunctionDef),
Lambda(&'a ast::ExprLambda),
FunctionTypeParameters(&'a ast::StmtFunctionDef),
ClassTypeParameters(&'a ast::StmtClassDef),
ListComprehension(&'a ast::ExprListComp),
SetComprehension(&'a ast::ExprSetComp),
DictComprehension(&'a ast::ExprDictComp),
GeneratorExpression(&'a ast::ExprGenerator),
}
impl NodeWithScopeRef<'_> {
/// Converts the unowned reference to an owned [`NodeWithScopeKind`].
///
/// # Safety
/// The node wrapped by `self` must be a child of `module`.
#[allow(unsafe_code)]
pub(super) unsafe fn to_kind(self, module: ParsedModule) -> NodeWithScopeKind {
match self {
NodeWithScopeRef::Module => NodeWithScopeKind::Module,
NodeWithScopeRef::Class(class) => {
NodeWithScopeKind::Class(AstNodeRef::new(module, class))
}
NodeWithScopeRef::Function(function) => {
NodeWithScopeKind::Function(AstNodeRef::new(module, function))
}
NodeWithScopeRef::Lambda(lambda) => {
NodeWithScopeKind::Lambda(AstNodeRef::new(module, lambda))
}
NodeWithScopeRef::FunctionTypeParameters(function) => {
NodeWithScopeKind::FunctionTypeParameters(AstNodeRef::new(module, function))
}
NodeWithScopeRef::ClassTypeParameters(class) => {
NodeWithScopeKind::ClassTypeParameters(AstNodeRef::new(module, class))
}
NodeWithScopeRef::ListComprehension(comprehension) => {
NodeWithScopeKind::ListComprehension(AstNodeRef::new(module, comprehension))
}
NodeWithScopeRef::SetComprehension(comprehension) => {
NodeWithScopeKind::SetComprehension(AstNodeRef::new(module, comprehension))
}
NodeWithScopeRef::DictComprehension(comprehension) => {
NodeWithScopeKind::DictComprehension(AstNodeRef::new(module, comprehension))
}
NodeWithScopeRef::GeneratorExpression(generator) => {
NodeWithScopeKind::GeneratorExpression(AstNodeRef::new(module, generator))
}
}
}
pub(super) fn scope_kind(self) -> ScopeKind {
match self {
NodeWithScopeRef::Module => ScopeKind::Module,
NodeWithScopeRef::Class(_) => ScopeKind::Class,
NodeWithScopeRef::Function(_) => ScopeKind::Function,
NodeWithScopeRef::Lambda(_) => ScopeKind::Function,
NodeWithScopeRef::FunctionTypeParameters(_)
| NodeWithScopeRef::ClassTypeParameters(_) => ScopeKind::Annotation,
NodeWithScopeRef::ListComprehension(_)
| NodeWithScopeRef::SetComprehension(_)
| NodeWithScopeRef::DictComprehension(_)
| NodeWithScopeRef::GeneratorExpression(_) => ScopeKind::Comprehension,
}
}
pub(crate) fn node_key(self) -> NodeWithScopeKey {
match self {
NodeWithScopeRef::Module => NodeWithScopeKey::Module,
NodeWithScopeRef::Class(class) => NodeWithScopeKey::Class(NodeKey::from_node(class)),
NodeWithScopeRef::Function(function) => {
NodeWithScopeKey::Function(NodeKey::from_node(function))
}
NodeWithScopeRef::Lambda(lambda) => {
NodeWithScopeKey::Lambda(NodeKey::from_node(lambda))
}
NodeWithScopeRef::FunctionTypeParameters(function) => {
NodeWithScopeKey::FunctionTypeParameters(NodeKey::from_node(function))
}
NodeWithScopeRef::ClassTypeParameters(class) => {
NodeWithScopeKey::ClassTypeParameters(NodeKey::from_node(class))
}
NodeWithScopeRef::ListComprehension(comprehension) => {
NodeWithScopeKey::ListComprehension(NodeKey::from_node(comprehension))
}
NodeWithScopeRef::SetComprehension(comprehension) => {
NodeWithScopeKey::SetComprehension(NodeKey::from_node(comprehension))
}
NodeWithScopeRef::DictComprehension(comprehension) => {
NodeWithScopeKey::DictComprehension(NodeKey::from_node(comprehension))
}
NodeWithScopeRef::GeneratorExpression(generator) => {
NodeWithScopeKey::GeneratorExpression(NodeKey::from_node(generator))
}
}
}
}
/// Node that introduces a new scope.
#[derive(Clone, Debug)]
pub enum NodeWithScopeKind {
Module,
Class(AstNodeRef<ast::StmtClassDef>),
ClassTypeParameters(AstNodeRef<ast::StmtClassDef>),
Function(AstNodeRef<ast::StmtFunctionDef>),
FunctionTypeParameters(AstNodeRef<ast::StmtFunctionDef>),
Lambda(AstNodeRef<ast::ExprLambda>),
ListComprehension(AstNodeRef<ast::ExprListComp>),
SetComprehension(AstNodeRef<ast::ExprSetComp>),
DictComprehension(AstNodeRef<ast::ExprDictComp>),
GeneratorExpression(AstNodeRef<ast::ExprGenerator>),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
pub(crate) enum NodeWithScopeKey {
Module,
Class(NodeKey),
ClassTypeParameters(NodeKey),
Function(NodeKey),
FunctionTypeParameters(NodeKey),
Lambda(NodeKey),
ListComprehension(NodeKey),
SetComprehension(NodeKey),
DictComprehension(NodeKey),
GeneratorExpression(NodeKey),
}

View File

@@ -1,375 +0,0 @@
//! Build a map from each use of a symbol to the definitions visible from that use, and the
//! type-narrowing constraints that apply to each definition.
//!
//! Let's take this code sample:
//!
//! ```python
//! x = 1
//! x = 2
//! y = x
//! if y is not None:
//! x = 3
//! else:
//! x = 4
//! z = x
//! ```
//!
//! In this snippet, we have four definitions of `x` (the statements assigning `1`, `2`, `3`,
//! and `4` to it), and two uses of `x` (the `y = x` and `z = x` assignments). The first
//! [`Definition`] of `x` is never visible to any use, because it's immediately replaced by the
//! second definition, before any use happens. (A linter could thus flag the statement `x = 1`
//! as likely superfluous.)
//!
//! The first use of `x` has one definition visible to it: the assignment `x = 2`.
//!
//! Things get a bit more complex when we have branches. We will definitely take either the `if` or
//! the `else` branch. Thus, the second use of `x` has two definitions visible to it: `x = 3` and
//! `x = 4`. The `x = 2` definition is no longer visible, because it must be replaced by either `x
//! = 3` or `x = 4`, no matter which branch was taken. We don't know which branch was taken, so we
//! must consider both definitions as visible, which means eventually we would (in type inference)
//! look at these two definitions and infer a type of `Literal[3, 4]` -- the union of `Literal[3]`
//! and `Literal[4]` -- for the second use of `x`.
//!
//! So that's one question our use-def map needs to answer: given a specific use of a symbol, which
//! definition(s) is/are visible from that use. In
//! [`AstIds`](crate::semantic_index::ast_ids::AstIds) we number all uses (that means a `Name` node
//! with `Load` context) so we have a `ScopedUseId` to efficiently represent each use.
//!
//! Another case we need to handle is when a symbol is referenced from a different scope (the most
//! obvious example of this is an import). We call this "public" use of a symbol. So the other
//! question we need to be able to answer is, what are the publicly-visible definitions of each
//! symbol?
//!
//! Technically, public use of a symbol could also occur from any point in control flow of the
//! scope where the symbol is defined (via inline imports and import cycles, in the case of an
//! import, or via a function call partway through the local scope that ends up using a symbol from
//! the scope via a global or nonlocal reference.) But modeling this fully accurately requires
//! whole-program analysis that isn't tractable for an efficient incremental compiler, since it
//! means a given symbol could have a different type every place it's referenced throughout the
//! program, depending on the shape of arbitrarily-sized call/import graphs. So we follow other
//! Python type-checkers in making the simplifying assumption that usually the scope will finish
//! execution before its symbols are made visible to other scopes; for instance, most imports will
//! import from a complete module, not a partially-executed module. (We may want to get a little
//! smarter than this in the future, in particular for closures, but for now this is where we
//! start.)
//!
//! So this means that the publicly-visible definitions of a symbol are the definitions still
//! visible at the end of the scope; effectively we have an implicit "use" of every symbol at the
//! end of the scope.
//!
//! We also need to know, for a given definition of a symbol, what type-narrowing constraints apply
//! to it. For instance, in this code sample:
//!
//! ```python
//! x = 1 if flag else None
//! if x is not None:
//! y = x
//! ```
//!
//! At the use of `x` in `y = x`, the visible definition of `x` is `1 if flag else None`, which
//! would infer as the type `Literal[1] | None`. But the constraint `x is not None` dominates this
//! use, which means we can rule out the possibility that `x` is `None` here, which should give us
//! the type `Literal[1]` for this use.
//!
//! The data structure we build to answer these questions is the `UseDefMap`. It has a
//! `definitions_by_use` vector indexed by [`ScopedUseId`] and a `public_definitions` vector
//! indexed by [`ScopedSymbolId`]. The values in each of these vectors are (in principle) a list of
//! visible definitions at that use, or at the end of the scope for that symbol, with a list of the
//! dominating constraints for each of those definitions.
//!
//! In order to avoid vectors-of-vectors-of-vectors and all the allocations that would entail, we
//! don't actually store these "list of visible definitions" as a vector of [`Definition`].
//! Instead, the values in `definitions_by_use` and `public_definitions` are a [`SymbolState`]
//! struct which uses bit-sets to track definitions and constraints in terms of
//! [`ScopedDefinitionId`] and [`ScopedConstraintId`], which are indices into the `all_definitions`
//! and `all_constraints` indexvecs in the [`UseDefMap`].
//!
//! There is another special kind of possible "definition" for a symbol: there might be a path from
//! the scope entry to a given use in which the symbol is never bound.
//!
//! The simplest way to model "unbound" would be as an actual [`Definition`] itself: the initial
//! visible [`Definition`] for each symbol in a scope. But actually modeling it this way would
//! unnecessarily increase the number of [`Definition`] that Salsa must track. Since "unbound" is a
//! special definition in that all symbols share it, and it doesn't have any additional per-symbol
//! state, and constraints are irrelevant to it, we can represent it more efficiently: we use the
//! `may_be_unbound` boolean on the [`SymbolState`] struct. If this flag is `true`, it means the
//! symbol/use really has one additional visible "definition", which is the unbound state. If this
//! flag is `false`, it means we've eliminated the possibility of unbound: every path we've
//! followed includes a definition for this symbol.
//!
//! To build a [`UseDefMap`], the [`UseDefMapBuilder`] is notified of each new use, definition, and
//! constraint as they are encountered by the
//! [`SemanticIndexBuilder`](crate::semantic_index::builder::SemanticIndexBuilder) AST visit. For
//! each symbol, the builder tracks the `SymbolState` for that symbol. When we hit a use of a
//! symbol, it records the current state for that symbol for that use. When we reach the end of the
//! scope, it records the state for each symbol as the public definitions of that symbol.
//!
//! Let's walk through the above example. Initially we record for `x` that it has no visible
//! definitions, and may be unbound. When we see `x = 1`, we record that as the sole visible
//! definition of `x`, and flip `may_be_unbound` to `false`. Then we see `x = 2`, and it replaces
//! `x = 1` as the sole visible definition of `x`. When we get to `y = x`, we record that the
//! visible definitions for that use of `x` are just the `x = 2` definition.
//!
//! Then we hit the `if` branch. We visit the `test` node (`flag` in this case), since that will
//! happen regardless. Then we take a pre-branch snapshot of the currently visible definitions for
//! all symbols, which we'll need later. Then we record `flag` as a possible constraint on the
//! currently visible definition (`x = 2`), and go ahead and visit the `if` body. When we see `x =
//! 3`, it replaces `x = 2` (constrained by `flag`) as the sole visible definition of `x`. At the
//! end of the `if` body, we take another snapshot of the currently-visible definitions; we'll call
//! this the post-if-body snapshot.
//!
//! Now we need to visit the `else` clause. The conditions when entering the `else` clause should
//! be the pre-if conditions; if we are entering the `else` clause, we know that the `if` test
//! failed and we didn't execute the `if` body. So we first reset the builder to the pre-if state,
//! using the snapshot we took previously (meaning we now have `x = 2` as the sole visible
//! definition for `x` again), then visit the `else` clause, where `x = 4` replaces `x = 2` as the
//! sole visible definition of `x`.
//!
//! Now we reach the end of the if/else, and want to visit the following code. The state here needs
//! to reflect that we might have gone through the `if` branch, or we might have gone through the
//! `else` branch, and we don't know which. So we need to "merge" our current builder state
//! (reflecting the end-of-else state, with `x = 4` as the only visible definition) with our
//! post-if-body snapshot (which has `x = 3` as the only visible definition). The result of this
//! merge is that we now have two visible definitions of `x`: `x = 3` and `x = 4`.
//!
//! The [`UseDefMapBuilder`] itself just exposes methods for taking a snapshot, resetting to a
//! snapshot, and merging a snapshot into the current state. The logic using these methods lives in
//! [`SemanticIndexBuilder`](crate::semantic_index::builder::SemanticIndexBuilder), e.g. where it
//! visits a `StmtIf` node.
//!
//! (In the future we may have some other questions we want to answer as well, such as "is this
//! definition used?", which will require tracking a bit more info in our map, e.g. a "used" bit
//! for each [`Definition`] which is flipped to true when we record that definition for a use.)
use self::symbol_state::{
ConstraintIdIterator, DefinitionIdWithConstraintsIterator, ScopedConstraintId,
ScopedDefinitionId, SymbolState,
};
use crate::semantic_index::ast_ids::ScopedUseId;
use crate::semantic_index::definition::Definition;
use crate::semantic_index::expression::Expression;
use crate::semantic_index::symbol::ScopedSymbolId;
use ruff_index::IndexVec;
mod bitset;
mod symbol_state;
/// Applicable definitions and constraints for every use of a name.
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct UseDefMap<'db> {
/// Array of [`Definition`] in this scope.
all_definitions: IndexVec<ScopedDefinitionId, Definition<'db>>,
/// Array of constraints (as [`Expression`]) in this scope.
all_constraints: IndexVec<ScopedConstraintId, Expression<'db>>,
/// [`SymbolState`] visible at a [`ScopedUseId`].
definitions_by_use: IndexVec<ScopedUseId, SymbolState>,
/// [`SymbolState`] visible at end of scope for each symbol.
public_definitions: IndexVec<ScopedSymbolId, SymbolState>,
}
impl<'db> UseDefMap<'db> {
pub(crate) fn use_definitions(
&self,
use_id: ScopedUseId,
) -> DefinitionWithConstraintsIterator<'_, 'db> {
DefinitionWithConstraintsIterator {
all_definitions: &self.all_definitions,
all_constraints: &self.all_constraints,
inner: self.definitions_by_use[use_id].visible_definitions(),
}
}
pub(crate) fn use_may_be_unbound(&self, use_id: ScopedUseId) -> bool {
self.definitions_by_use[use_id].may_be_unbound()
}
pub(crate) fn public_definitions(
&self,
symbol: ScopedSymbolId,
) -> DefinitionWithConstraintsIterator<'_, 'db> {
DefinitionWithConstraintsIterator {
all_definitions: &self.all_definitions,
all_constraints: &self.all_constraints,
inner: self.public_definitions[symbol].visible_definitions(),
}
}
pub(crate) fn public_may_be_unbound(&self, symbol: ScopedSymbolId) -> bool {
self.public_definitions[symbol].may_be_unbound()
}
}
#[derive(Debug)]
pub(crate) struct DefinitionWithConstraintsIterator<'map, 'db> {
all_definitions: &'map IndexVec<ScopedDefinitionId, Definition<'db>>,
all_constraints: &'map IndexVec<ScopedConstraintId, Expression<'db>>,
inner: DefinitionIdWithConstraintsIterator<'map>,
}
impl<'map, 'db> Iterator for DefinitionWithConstraintsIterator<'map, 'db> {
type Item = DefinitionWithConstraints<'map, 'db>;
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|def_id_with_constraints| DefinitionWithConstraints {
definition: self.all_definitions[def_id_with_constraints.definition],
constraints: ConstraintsIterator {
all_constraints: self.all_constraints,
constraint_ids: def_id_with_constraints.constraint_ids,
},
})
}
}
impl std::iter::FusedIterator for DefinitionWithConstraintsIterator<'_, '_> {}
pub(crate) struct DefinitionWithConstraints<'map, 'db> {
pub(crate) definition: Definition<'db>,
pub(crate) constraints: ConstraintsIterator<'map, 'db>,
}
pub(crate) struct ConstraintsIterator<'map, 'db> {
all_constraints: &'map IndexVec<ScopedConstraintId, Expression<'db>>,
constraint_ids: ConstraintIdIterator<'map>,
}
impl<'map, 'db> Iterator for ConstraintsIterator<'map, 'db> {
type Item = Expression<'db>;
fn next(&mut self) -> Option<Self::Item> {
self.constraint_ids
.next()
.map(|constraint_id| self.all_constraints[constraint_id])
}
}
impl std::iter::FusedIterator for ConstraintsIterator<'_, '_> {}
/// A snapshot of the definitions and constraints state at a particular point in control flow.
#[derive(Clone, Debug)]
pub(super) struct FlowSnapshot {
definitions_by_symbol: IndexVec<ScopedSymbolId, SymbolState>,
}
#[derive(Debug, Default)]
pub(super) struct UseDefMapBuilder<'db> {
/// Append-only array of [`Definition`]; None is unbound.
all_definitions: IndexVec<ScopedDefinitionId, Definition<'db>>,
/// Append-only array of constraints (as [`Expression`]).
all_constraints: IndexVec<ScopedConstraintId, Expression<'db>>,
/// Visible definitions at each so-far-recorded use.
definitions_by_use: IndexVec<ScopedUseId, SymbolState>,
/// Currently visible definitions for each symbol.
definitions_by_symbol: IndexVec<ScopedSymbolId, SymbolState>,
}
impl<'db> UseDefMapBuilder<'db> {
pub(super) fn new() -> Self {
Self::default()
}
pub(super) fn add_symbol(&mut self, symbol: ScopedSymbolId) {
let new_symbol = self.definitions_by_symbol.push(SymbolState::unbound());
debug_assert_eq!(symbol, new_symbol);
}
pub(super) fn record_definition(
&mut self,
symbol: ScopedSymbolId,
definition: Definition<'db>,
) {
// We have a new definition of a symbol; this replaces any previous definitions in this
// path.
let def_id = self.all_definitions.push(definition);
self.definitions_by_symbol[symbol] = SymbolState::with(def_id);
}
pub(super) fn record_constraint(&mut self, constraint: Expression<'db>) {
let constraint_id = self.all_constraints.push(constraint);
for definitions in &mut self.definitions_by_symbol {
definitions.add_constraint(constraint_id);
}
}
pub(super) fn record_use(&mut self, symbol: ScopedSymbolId, use_id: ScopedUseId) {
// We have a use of a symbol; clone the currently visible definitions for that symbol, and
// record them as the visible definitions for this use.
let new_use = self
.definitions_by_use
.push(self.definitions_by_symbol[symbol].clone());
debug_assert_eq!(use_id, new_use);
}
/// Take a snapshot of the current visible-symbols state.
pub(super) fn snapshot(&self) -> FlowSnapshot {
FlowSnapshot {
definitions_by_symbol: self.definitions_by_symbol.clone(),
}
}
/// Restore the current builder visible-definitions state to the given snapshot.
pub(super) fn restore(&mut self, snapshot: FlowSnapshot) {
// We never remove symbols from `definitions_by_symbol` (it's an IndexVec, and the symbol
// IDs must line up), so the current number of known symbols must always be equal to or
// greater than the number of known symbols in a previously-taken snapshot.
let num_symbols = self.definitions_by_symbol.len();
debug_assert!(num_symbols >= snapshot.definitions_by_symbol.len());
// Restore the current visible-definitions state to the given snapshot.
self.definitions_by_symbol = snapshot.definitions_by_symbol;
// If the snapshot we are restoring is missing some symbols we've recorded since, we need
// to fill them in so the symbol IDs continue to line up. Since they don't exist in the
// snapshot, the correct state to fill them in with is "unbound".
self.definitions_by_symbol
.resize(num_symbols, SymbolState::unbound());
}
/// Merge the given snapshot into the current state, reflecting that we might have taken either
/// path to get here. The new visible-definitions state for each symbol should include
/// definitions from both the prior state and the snapshot.
pub(super) fn merge(&mut self, snapshot: FlowSnapshot) {
// The tricky thing about merging two Ranges pointing into `all_definitions` is that if the
// two Ranges aren't already adjacent in `all_definitions`, we will have to copy at least
// one or the other of the ranges to the end of `all_definitions` so as to make them
// adjacent. We can't ever move things around in `all_definitions` because previously
// recorded uses may still have ranges pointing to any part of it; all we can do is append.
// It's possible we may end up with some old entries in `all_definitions` that nobody is
// pointing to, but that's OK.
// We never remove symbols from `definitions_by_symbol` (it's an IndexVec, and the symbol
// IDs must line up), so the current number of known symbols must always be equal to or
// greater than the number of known symbols in a previously-taken snapshot.
debug_assert!(self.definitions_by_symbol.len() >= snapshot.definitions_by_symbol.len());
let mut snapshot_definitions_iter = snapshot.definitions_by_symbol.into_iter();
for current in &mut self.definitions_by_symbol {
if let Some(snapshot) = snapshot_definitions_iter.next() {
current.merge(snapshot);
} else {
// Symbol not present in snapshot, so it's unbound from that path.
current.add_unbound();
}
}
}
pub(super) fn finish(mut self) -> UseDefMap<'db> {
self.all_definitions.shrink_to_fit();
self.all_constraints.shrink_to_fit();
self.definitions_by_symbol.shrink_to_fit();
self.definitions_by_use.shrink_to_fit();
UseDefMap {
all_definitions: self.all_definitions,
all_constraints: self.all_constraints,
definitions_by_use: self.definitions_by_use,
public_definitions: self.definitions_by_symbol,
}
}
}

View File

@@ -1,228 +0,0 @@
/// Ordered set of `u32`.
///
/// Uses an inline bit-set for small values (up to 64 * B), falls back to heap allocated vector of
/// blocks for larger values.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) enum BitSet<const B: usize> {
/// Bit-set (in 64-bit blocks) for the first 64 * B entries.
Inline([u64; B]),
/// Overflow beyond 64 * B.
Heap(Vec<u64>),
}
impl<const B: usize> Default for BitSet<B> {
fn default() -> Self {
// B * 64 must fit in a u32, or else we have unusable bits; this assertion makes the
// truncating casts to u32 below safe. This would be better as a const assertion, but
// that's not possible on stable with const generic params. (B should never really be
// anywhere close to this large.)
assert!(B * 64 < (u32::MAX as usize));
// This implementation requires usize >= 32 bits.
static_assertions::const_assert!(usize::BITS >= 32);
Self::Inline([0; B])
}
}
impl<const B: usize> BitSet<B> {
/// Create and return a new [`BitSet`] with a single `value` inserted.
pub(super) fn with(value: u32) -> Self {
let mut bitset = Self::default();
bitset.insert(value);
bitset
}
/// Convert from Inline to Heap, if needed, and resize the Heap vector, if needed.
fn resize(&mut self, value: u32) {
let num_blocks_needed = (value / 64) + 1;
match self {
Self::Inline(blocks) => {
let mut vec = blocks.to_vec();
vec.resize(num_blocks_needed as usize, 0);
*self = Self::Heap(vec);
}
Self::Heap(vec) => {
vec.resize(num_blocks_needed as usize, 0);
}
}
}
fn blocks_mut(&mut self) -> &mut [u64] {
match self {
Self::Inline(blocks) => blocks.as_mut_slice(),
Self::Heap(blocks) => blocks.as_mut_slice(),
}
}
fn blocks(&self) -> &[u64] {
match self {
Self::Inline(blocks) => blocks.as_slice(),
Self::Heap(blocks) => blocks.as_slice(),
}
}
/// Insert a value into the [`BitSet`].
///
/// Return true if the value was newly inserted, false if already present.
pub(super) fn insert(&mut self, value: u32) -> bool {
let value_usize = value as usize;
let (block, index) = (value_usize / 64, value_usize % 64);
if block >= self.blocks().len() {
self.resize(value);
}
let blocks = self.blocks_mut();
let missing = blocks[block] & (1 << index) == 0;
blocks[block] |= 1 << index;
missing
}
/// Intersect in-place with another [`BitSet`].
pub(super) fn intersect(&mut self, other: &BitSet<B>) {
let my_blocks = self.blocks_mut();
let other_blocks = other.blocks();
let min_len = my_blocks.len().min(other_blocks.len());
for i in 0..min_len {
my_blocks[i] &= other_blocks[i];
}
for block in my_blocks.iter_mut().skip(min_len) {
*block = 0;
}
}
/// Return an iterator over the values (in ascending order) in this [`BitSet`].
pub(super) fn iter(&self) -> BitSetIterator<'_, B> {
let blocks = self.blocks();
BitSetIterator {
blocks,
current_block_index: 0,
current_block: blocks[0],
}
}
}
/// Iterator over values in a [`BitSet`].
#[derive(Debug)]
pub(super) struct BitSetIterator<'a, const B: usize> {
/// The blocks we are iterating over.
blocks: &'a [u64],
/// The index of the block we are currently iterating through.
current_block_index: usize,
/// The block we are currently iterating through (and zeroing as we go.)
current_block: u64,
}
impl<const B: usize> Iterator for BitSetIterator<'_, B> {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
while self.current_block == 0 {
if self.current_block_index + 1 >= self.blocks.len() {
return None;
}
self.current_block_index += 1;
self.current_block = self.blocks[self.current_block_index];
}
let lowest_bit_set = self.current_block.trailing_zeros();
// reset the lowest set bit, without a data dependency on `lowest_bit_set`
self.current_block &= self.current_block.wrapping_sub(1);
// SAFETY: `lowest_bit_set` cannot be more than 64, `current_block_index` cannot be more
// than `B - 1`, and we check above that `B * 64 < u32::MAX`. So both `64 *
// current_block_index` and the final value here must fit in u32.
#[allow(clippy::cast_possible_truncation)]
Some(lowest_bit_set + (64 * self.current_block_index) as u32)
}
}
impl<const B: usize> std::iter::FusedIterator for BitSetIterator<'_, B> {}
#[cfg(test)]
mod tests {
use super::BitSet;
fn assert_bitset<const B: usize>(bitset: &BitSet<B>, contents: &[u32]) {
assert_eq!(bitset.iter().collect::<Vec<_>>(), contents);
}
#[test]
fn iter() {
let mut b = BitSet::<1>::with(3);
b.insert(27);
b.insert(6);
assert!(matches!(b, BitSet::Inline(_)));
assert_bitset(&b, &[3, 6, 27]);
}
#[test]
fn iter_overflow() {
let mut b = BitSet::<1>::with(140);
b.insert(100);
b.insert(129);
assert!(matches!(b, BitSet::Heap(_)));
assert_bitset(&b, &[100, 129, 140]);
}
#[test]
fn intersect() {
let mut b1 = BitSet::<1>::with(4);
let mut b2 = BitSet::<1>::with(4);
b1.insert(23);
b2.insert(5);
b1.intersect(&b2);
assert_bitset(&b1, &[4]);
}
#[test]
fn intersect_mixed_1() {
let mut b1 = BitSet::<1>::with(4);
let mut b2 = BitSet::<1>::with(4);
b1.insert(89);
b2.insert(5);
b1.intersect(&b2);
assert_bitset(&b1, &[4]);
}
#[test]
fn intersect_mixed_2() {
let mut b1 = BitSet::<1>::with(4);
let mut b2 = BitSet::<1>::with(4);
b1.insert(23);
b2.insert(89);
b1.intersect(&b2);
assert_bitset(&b1, &[4]);
}
#[test]
fn intersect_heap() {
let mut b1 = BitSet::<1>::with(4);
let mut b2 = BitSet::<1>::with(4);
b1.insert(89);
b2.insert(90);
b1.intersect(&b2);
assert_bitset(&b1, &[4]);
}
#[test]
fn intersect_heap_2() {
let mut b1 = BitSet::<1>::with(89);
let mut b2 = BitSet::<1>::with(89);
b1.insert(91);
b2.insert(90);
b1.intersect(&b2);
assert_bitset(&b1, &[89]);
}
#[test]
fn multiple_blocks() {
let mut b = BitSet::<2>::with(120);
b.insert(45);
assert!(matches!(b, BitSet::Inline(_)));
assert_bitset(&b, &[45, 120]);
}
}

View File

@@ -1,374 +0,0 @@
//! Track visible definitions of a symbol, and applicable constraints per definition.
//!
//! These data structures operate entirely on scope-local newtype-indices for definitions and
//! constraints, referring to their location in the `all_definitions` and `all_constraints`
//! indexvecs in [`super::UseDefMapBuilder`].
//!
//! We need to track arbitrary associations between definitions and constraints, not just a single
//! set of currently dominating constraints (where "dominating" means "control flow must have
//! passed through it to reach this point"), because we can have dominating constraints that apply
//! to some definitions but not others, as in this code:
//!
//! ```python
//! x = 1 if flag else None
//! if x is not None:
//! if flag2:
//! x = 2 if flag else None
//! x
//! ```
//!
//! The `x is not None` constraint dominates the final use of `x`, but it applies only to the first
//! definition of `x`, not the second, so `None` is a possible value for `x`.
//!
//! And we can't just track, for each definition, an index into a list of dominating constraints,
//! either, because we can have definitions which are still visible, but subject to constraints
//! that are no longer dominating, as in this code:
//!
//! ```python
//! x = 0
//! if flag1:
//! x = 1 if flag2 else None
//! assert x is not None
//! x
//! ```
//!
//! From the point of view of the final use of `x`, the `x is not None` constraint no longer
//! dominates, but it does dominate the `x = 1 if flag2 else None` definition, so we have to keep
//! track of that.
//!
//! The data structures used here ([`BitSet`] and [`smallvec::SmallVec`]) optimize for keeping all
//! data inline (avoiding lots of scattered allocations) in small-to-medium cases, and falling back
//! to heap allocation to be able to scale to arbitrary numbers of definitions and constraints when
//! needed.
use super::bitset::{BitSet, BitSetIterator};
use ruff_index::newtype_index;
use smallvec::SmallVec;
/// A newtype-index for a definition in a particular scope.
#[newtype_index]
pub(super) struct ScopedDefinitionId;
/// A newtype-index for a constraint expression in a particular scope.
#[newtype_index]
pub(super) struct ScopedConstraintId;
/// Can reference this * 64 total definitions inline; more will fall back to the heap.
const INLINE_DEFINITION_BLOCKS: usize = 3;
/// A [`BitSet`] of [`ScopedDefinitionId`], representing visible definitions of a symbol in a scope.
type Definitions = BitSet<INLINE_DEFINITION_BLOCKS>;
type DefinitionsIterator<'a> = BitSetIterator<'a, INLINE_DEFINITION_BLOCKS>;
/// Can reference this * 64 total constraints inline; more will fall back to the heap.
const INLINE_CONSTRAINT_BLOCKS: usize = 2;
/// Can keep inline this many visible definitions per symbol at a given time; more will go to heap.
const INLINE_VISIBLE_DEFINITIONS_PER_SYMBOL: usize = 4;
/// One [`BitSet`] of applicable [`ScopedConstraintId`] per visible definition.
type InlineConstraintArray =
[BitSet<INLINE_CONSTRAINT_BLOCKS>; INLINE_VISIBLE_DEFINITIONS_PER_SYMBOL];
type Constraints = SmallVec<InlineConstraintArray>;
type ConstraintsIterator<'a> = std::slice::Iter<'a, BitSet<INLINE_CONSTRAINT_BLOCKS>>;
type ConstraintsIntoIterator = smallvec::IntoIter<InlineConstraintArray>;
/// Visible definitions and narrowing constraints for a single symbol at some point in control flow.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(super) struct SymbolState {
/// [`BitSet`]: which [`ScopedDefinitionId`] are visible for this symbol?
visible_definitions: Definitions,
/// For each definition, which [`ScopedConstraintId`] apply?
///
/// This is a [`smallvec::SmallVec`] which should always have one [`BitSet`] of constraints per
/// definition in `visible_definitions`.
constraints: Constraints,
/// Could the symbol be unbound at this point?
may_be_unbound: bool,
}
/// A single [`ScopedDefinitionId`] with an iterator of its applicable [`ScopedConstraintId`].
#[derive(Debug)]
pub(super) struct DefinitionIdWithConstraints<'a> {
pub(super) definition: ScopedDefinitionId,
pub(super) constraint_ids: ConstraintIdIterator<'a>,
}
impl SymbolState {
/// Return a new [`SymbolState`] representing an unbound symbol.
pub(super) fn unbound() -> Self {
Self {
visible_definitions: Definitions::default(),
constraints: Constraints::default(),
may_be_unbound: true,
}
}
/// Return a new [`SymbolState`] representing a symbol with a single visible definition.
pub(super) fn with(definition_id: ScopedDefinitionId) -> Self {
let mut constraints = Constraints::with_capacity(1);
constraints.push(BitSet::default());
Self {
visible_definitions: Definitions::with(definition_id.into()),
constraints,
may_be_unbound: false,
}
}
/// Add Unbound as a possibility for this symbol.
pub(super) fn add_unbound(&mut self) {
self.may_be_unbound = true;
}
/// Add given constraint to all currently-visible definitions.
pub(super) fn add_constraint(&mut self, constraint_id: ScopedConstraintId) {
for bitset in &mut self.constraints {
bitset.insert(constraint_id.into());
}
}
/// Merge another [`SymbolState`] into this one.
pub(super) fn merge(&mut self, b: SymbolState) {
let mut a = Self {
visible_definitions: Definitions::default(),
constraints: Constraints::default(),
may_be_unbound: self.may_be_unbound || b.may_be_unbound,
};
std::mem::swap(&mut a, self);
let mut a_defs_iter = a.visible_definitions.iter();
let mut b_defs_iter = b.visible_definitions.iter();
let mut a_constraints_iter = a.constraints.into_iter();
let mut b_constraints_iter = b.constraints.into_iter();
let mut opt_a_def: Option<u32> = a_defs_iter.next();
let mut opt_b_def: Option<u32> = b_defs_iter.next();
// Iterate through the definitions from `a` and `b`, always processing the lower definition
// ID first, and pushing each definition onto the merged `SymbolState` with its
// constraints. If a definition is found in both `a` and `b`, push it with the intersection
// of the constraints from the two paths; a constraint that applies from only one possible
// path is irrelevant.
// Helper to push `def`, with constraints in `constraints_iter`, onto `self`.
let push = |def, constraints_iter: &mut ConstraintsIntoIterator, merged: &mut Self| {
merged.visible_definitions.insert(def);
// SAFETY: we only ever create SymbolState with either no definitions and no constraint
// bitsets (`::unbound`) or one definition and one constraint bitset (`::with`), and
// `::merge` always pushes one definition and one constraint bitset together (just
// below), so the number of definitions and the number of constraint bitsets can never
// get out of sync.
let constraints = constraints_iter
.next()
.expect("definitions and constraints length mismatch");
merged.constraints.push(constraints);
};
loop {
match (opt_a_def, opt_b_def) {
(Some(a_def), Some(b_def)) => match a_def.cmp(&b_def) {
std::cmp::Ordering::Less => {
// Next definition ID is only in `a`, push it to `self` and advance `a`.
push(a_def, &mut a_constraints_iter, self);
opt_a_def = a_defs_iter.next();
}
std::cmp::Ordering::Greater => {
// Next definition ID is only in `b`, push it to `self` and advance `b`.
push(b_def, &mut b_constraints_iter, self);
opt_b_def = b_defs_iter.next();
}
std::cmp::Ordering::Equal => {
// Next definition is in both; push to `self` and intersect constraints.
push(a_def, &mut b_constraints_iter, self);
// SAFETY: we only ever create SymbolState with either no definitions and
// no constraint bitsets (`::unbound`) or one definition and one constraint
// bitset (`::with`), and `::merge` always pushes one definition and one
// constraint bitset together (just below), so the number of definitions
// and the number of constraint bitsets can never get out of sync.
let a_constraints = a_constraints_iter
.next()
.expect("definitions and constraints length mismatch");
// If the same definition is visible through both paths, any constraint
// that applies on only one path is irrelevant to the resulting type from
// unioning the two paths, so we intersect the constraints.
self.constraints
.last_mut()
.unwrap()
.intersect(&a_constraints);
opt_a_def = a_defs_iter.next();
opt_b_def = b_defs_iter.next();
}
},
(Some(a_def), None) => {
// We've exhausted `b`, just push the def from `a` and move on to the next.
push(a_def, &mut a_constraints_iter, self);
opt_a_def = a_defs_iter.next();
}
(None, Some(b_def)) => {
// We've exhausted `a`, just push the def from `b` and move on to the next.
push(b_def, &mut b_constraints_iter, self);
opt_b_def = b_defs_iter.next();
}
(None, None) => break,
}
}
}
/// Get iterator over visible definitions with constraints.
pub(super) fn visible_definitions(&self) -> DefinitionIdWithConstraintsIterator {
DefinitionIdWithConstraintsIterator {
definitions: self.visible_definitions.iter(),
constraints: self.constraints.iter(),
}
}
/// Could the symbol be unbound?
pub(super) fn may_be_unbound(&self) -> bool {
self.may_be_unbound
}
}
/// The default state of a symbol (if we've seen no definitions of it) is unbound.
impl Default for SymbolState {
fn default() -> Self {
SymbolState::unbound()
}
}
#[derive(Debug)]
pub(super) struct DefinitionIdWithConstraintsIterator<'a> {
definitions: DefinitionsIterator<'a>,
constraints: ConstraintsIterator<'a>,
}
impl<'a> Iterator for DefinitionIdWithConstraintsIterator<'a> {
type Item = DefinitionIdWithConstraints<'a>;
fn next(&mut self) -> Option<Self::Item> {
match (self.definitions.next(), self.constraints.next()) {
(None, None) => None,
(Some(def), Some(constraints)) => Some(DefinitionIdWithConstraints {
definition: ScopedDefinitionId::from_u32(def),
constraint_ids: ConstraintIdIterator {
wrapped: constraints.iter(),
},
}),
// SAFETY: see above.
_ => unreachable!("definitions and constraints length mismatch"),
}
}
}
impl std::iter::FusedIterator for DefinitionIdWithConstraintsIterator<'_> {}
#[derive(Debug)]
pub(super) struct ConstraintIdIterator<'a> {
wrapped: BitSetIterator<'a, INLINE_CONSTRAINT_BLOCKS>,
}
impl Iterator for ConstraintIdIterator<'_> {
type Item = ScopedConstraintId;
fn next(&mut self) -> Option<Self::Item> {
self.wrapped.next().map(ScopedConstraintId::from_u32)
}
}
impl std::iter::FusedIterator for ConstraintIdIterator<'_> {}
#[cfg(test)]
mod tests {
use super::{ScopedConstraintId, ScopedDefinitionId, SymbolState};
impl SymbolState {
pub(crate) fn assert(&self, may_be_unbound: bool, expected: &[&str]) {
assert_eq!(self.may_be_unbound(), may_be_unbound);
let actual = self
.visible_definitions()
.map(|def_id_with_constraints| {
format!(
"{}<{}>",
def_id_with_constraints.definition.as_u32(),
def_id_with_constraints
.constraint_ids
.map(ScopedConstraintId::as_u32)
.map(|idx| idx.to_string())
.collect::<Vec<_>>()
.join(", ")
)
})
.collect::<Vec<_>>();
assert_eq!(actual, expected);
}
}
#[test]
fn unbound() {
let cd = SymbolState::unbound();
cd.assert(true, &[]);
}
#[test]
fn with() {
let cd = SymbolState::with(ScopedDefinitionId::from_u32(0));
cd.assert(false, &["0<>"]);
}
#[test]
fn add_unbound() {
let mut cd = SymbolState::with(ScopedDefinitionId::from_u32(0));
cd.add_unbound();
cd.assert(true, &["0<>"]);
}
#[test]
fn add_constraint() {
let mut cd = SymbolState::with(ScopedDefinitionId::from_u32(0));
cd.add_constraint(ScopedConstraintId::from_u32(0));
cd.assert(false, &["0<0>"]);
}
#[test]
fn merge() {
// merging the same definition with the same constraint keeps the constraint
let mut cd0a = SymbolState::with(ScopedDefinitionId::from_u32(0));
cd0a.add_constraint(ScopedConstraintId::from_u32(0));
let mut cd0b = SymbolState::with(ScopedDefinitionId::from_u32(0));
cd0b.add_constraint(ScopedConstraintId::from_u32(0));
cd0a.merge(cd0b);
let mut cd0 = cd0a;
cd0.assert(false, &["0<0>"]);
// merging the same definition with differing constraints drops all constraints
let mut cd1a = SymbolState::with(ScopedDefinitionId::from_u32(1));
cd1a.add_constraint(ScopedConstraintId::from_u32(1));
let mut cd1b = SymbolState::with(ScopedDefinitionId::from_u32(1));
cd1b.add_constraint(ScopedConstraintId::from_u32(2));
cd1a.merge(cd1b);
let cd1 = cd1a;
cd1.assert(false, &["1<>"]);
// merging a constrained definition with unbound keeps both
let mut cd2a = SymbolState::with(ScopedDefinitionId::from_u32(2));
cd2a.add_constraint(ScopedConstraintId::from_u32(3));
let cd2b = SymbolState::unbound();
cd2a.merge(cd2b);
let cd2 = cd2a;
cd2.assert(true, &["2<3>"]);
// merging different definitions keeps them each with their existing constraints
cd0.merge(cd2);
let cd = cd0;
cd.assert(true, &["0<0>", "2<3>"]);
}
}

View File

@@ -1,250 +0,0 @@
use ruff_db::files::{File, FilePath};
use ruff_db::source::line_index;
use ruff_python_ast as ast;
use ruff_python_ast::{Expr, ExpressionRef};
use ruff_source_file::LineIndex;
use crate::module_name::ModuleName;
use crate::module_resolver::{resolve_module, Module};
use crate::semantic_index::ast_ids::HasScopedAstId;
use crate::semantic_index::semantic_index;
use crate::types::{definition_ty, global_symbol_ty_by_name, infer_scope_types, Type};
use crate::Db;
pub struct SemanticModel<'db> {
db: &'db dyn Db,
file: File,
}
impl<'db> SemanticModel<'db> {
pub fn new(db: &'db dyn Db, file: File) -> Self {
Self { db, file }
}
// TODO we don't actually want to expose the Db directly to lint rules, but we need to find a
// solution for exposing information from types
pub fn db(&self) -> &dyn Db {
self.db
}
pub fn file_path(&self) -> &FilePath {
self.file.path(self.db)
}
pub fn line_index(&self) -> LineIndex {
line_index(self.db.upcast(), self.file)
}
pub fn resolve_module(&self, module_name: ModuleName) -> Option<Module> {
resolve_module(self.db, module_name)
}
pub fn global_symbol_ty(&self, module: &Module, symbol_name: &str) -> Type<'db> {
global_symbol_ty_by_name(self.db, module.file(), symbol_name)
}
}
pub trait HasTy {
/// Returns the inferred type of `self`.
///
/// ## Panics
/// May panic if `self` is from another file than `model`.
fn ty<'db>(&self, model: &SemanticModel<'db>) -> Type<'db>;
}
impl HasTy for ast::ExpressionRef<'_> {
fn ty<'db>(&self, model: &SemanticModel<'db>) -> Type<'db> {
let index = semantic_index(model.db, model.file);
let file_scope = index.expression_scope_id(*self);
let scope = file_scope.to_scope_id(model.db, model.file);
let expression_id = self.scoped_ast_id(model.db, scope);
infer_scope_types(model.db, scope).expression_ty(expression_id)
}
}
macro_rules! impl_expression_has_ty {
($ty: ty) => {
impl HasTy for $ty {
#[inline]
fn ty<'db>(&self, model: &SemanticModel<'db>) -> Type<'db> {
let expression_ref = ExpressionRef::from(self);
expression_ref.ty(model)
}
}
};
}
impl_expression_has_ty!(ast::ExprBoolOp);
impl_expression_has_ty!(ast::ExprNamed);
impl_expression_has_ty!(ast::ExprBinOp);
impl_expression_has_ty!(ast::ExprUnaryOp);
impl_expression_has_ty!(ast::ExprLambda);
impl_expression_has_ty!(ast::ExprIf);
impl_expression_has_ty!(ast::ExprDict);
impl_expression_has_ty!(ast::ExprSet);
impl_expression_has_ty!(ast::ExprListComp);
impl_expression_has_ty!(ast::ExprSetComp);
impl_expression_has_ty!(ast::ExprDictComp);
impl_expression_has_ty!(ast::ExprGenerator);
impl_expression_has_ty!(ast::ExprAwait);
impl_expression_has_ty!(ast::ExprYield);
impl_expression_has_ty!(ast::ExprYieldFrom);
impl_expression_has_ty!(ast::ExprCompare);
impl_expression_has_ty!(ast::ExprCall);
impl_expression_has_ty!(ast::ExprFString);
impl_expression_has_ty!(ast::ExprStringLiteral);
impl_expression_has_ty!(ast::ExprBytesLiteral);
impl_expression_has_ty!(ast::ExprNumberLiteral);
impl_expression_has_ty!(ast::ExprBooleanLiteral);
impl_expression_has_ty!(ast::ExprNoneLiteral);
impl_expression_has_ty!(ast::ExprEllipsisLiteral);
impl_expression_has_ty!(ast::ExprAttribute);
impl_expression_has_ty!(ast::ExprSubscript);
impl_expression_has_ty!(ast::ExprStarred);
impl_expression_has_ty!(ast::ExprName);
impl_expression_has_ty!(ast::ExprList);
impl_expression_has_ty!(ast::ExprTuple);
impl_expression_has_ty!(ast::ExprSlice);
impl_expression_has_ty!(ast::ExprIpyEscapeCommand);
impl HasTy for ast::Expr {
fn ty<'db>(&self, model: &SemanticModel<'db>) -> Type<'db> {
match self {
Expr::BoolOp(inner) => inner.ty(model),
Expr::Named(inner) => inner.ty(model),
Expr::BinOp(inner) => inner.ty(model),
Expr::UnaryOp(inner) => inner.ty(model),
Expr::Lambda(inner) => inner.ty(model),
Expr::If(inner) => inner.ty(model),
Expr::Dict(inner) => inner.ty(model),
Expr::Set(inner) => inner.ty(model),
Expr::ListComp(inner) => inner.ty(model),
Expr::SetComp(inner) => inner.ty(model),
Expr::DictComp(inner) => inner.ty(model),
Expr::Generator(inner) => inner.ty(model),
Expr::Await(inner) => inner.ty(model),
Expr::Yield(inner) => inner.ty(model),
Expr::YieldFrom(inner) => inner.ty(model),
Expr::Compare(inner) => inner.ty(model),
Expr::Call(inner) => inner.ty(model),
Expr::FString(inner) => inner.ty(model),
Expr::StringLiteral(inner) => inner.ty(model),
Expr::BytesLiteral(inner) => inner.ty(model),
Expr::NumberLiteral(inner) => inner.ty(model),
Expr::BooleanLiteral(inner) => inner.ty(model),
Expr::NoneLiteral(inner) => inner.ty(model),
Expr::EllipsisLiteral(inner) => inner.ty(model),
Expr::Attribute(inner) => inner.ty(model),
Expr::Subscript(inner) => inner.ty(model),
Expr::Starred(inner) => inner.ty(model),
Expr::Name(inner) => inner.ty(model),
Expr::List(inner) => inner.ty(model),
Expr::Tuple(inner) => inner.ty(model),
Expr::Slice(inner) => inner.ty(model),
Expr::IpyEscapeCommand(inner) => inner.ty(model),
}
}
}
macro_rules! impl_definition_has_ty {
($ty: ty) => {
impl HasTy for $ty {
#[inline]
fn ty<'db>(&self, model: &SemanticModel<'db>) -> Type<'db> {
let index = semantic_index(model.db, model.file);
let definition = index.definition(self);
definition_ty(model.db, definition)
}
}
};
}
impl_definition_has_ty!(ast::StmtFunctionDef);
impl_definition_has_ty!(ast::StmtClassDef);
impl_definition_has_ty!(ast::Alias);
impl_definition_has_ty!(ast::Parameter);
impl_definition_has_ty!(ast::ParameterWithDefault);
#[cfg(test)]
mod tests {
use ruff_db::files::system_path_to_file;
use ruff_db::parsed::parsed_module;
use ruff_db::system::{DbWithTestSystem, SystemPathBuf};
use crate::db::tests::TestDb;
use crate::program::{Program, SearchPathSettings};
use crate::python_version::PythonVersion;
use crate::types::Type;
use crate::{HasTy, ProgramSettings, SemanticModel};
fn setup_db<'a>(files: impl IntoIterator<Item = (&'a str, &'a str)>) -> anyhow::Result<TestDb> {
let mut db = TestDb::new();
db.write_files(files)?;
Program::from_settings(
&db,
&ProgramSettings {
target_version: PythonVersion::default(),
search_paths: SearchPathSettings::new(SystemPathBuf::from("/src")),
},
)?;
Ok(db)
}
#[test]
fn function_ty() -> anyhow::Result<()> {
let db = setup_db([("/src/foo.py", "def test(): pass")])?;
let foo = system_path_to_file(&db, "/src/foo.py").unwrap();
let ast = parsed_module(&db, foo);
let function = ast.suite()[0].as_function_def_stmt().unwrap();
let model = SemanticModel::new(&db, foo);
let ty = function.ty(&model);
assert!(matches!(ty, Type::Function(_)));
Ok(())
}
#[test]
fn class_ty() -> anyhow::Result<()> {
let db = setup_db([("/src/foo.py", "class Test: pass")])?;
let foo = system_path_to_file(&db, "/src/foo.py").unwrap();
let ast = parsed_module(&db, foo);
let class = ast.suite()[0].as_class_def_stmt().unwrap();
let model = SemanticModel::new(&db, foo);
let ty = class.ty(&model);
assert!(matches!(ty, Type::Class(_)));
Ok(())
}
#[test]
fn alias_ty() -> anyhow::Result<()> {
let db = setup_db([
("/src/foo.py", "class Test: pass"),
("/src/bar.py", "from foo import Test"),
])?;
let bar = system_path_to_file(&db, "/src/bar.py").unwrap();
let ast = parsed_module(&db, bar);
let import = ast.suite()[0].as_import_from_stmt().unwrap();
let alias = &import.names[0];
let model = SemanticModel::new(&db, bar);
let ty = alias.ty(&model);
assert!(matches!(ty, Type::Class(_)));
Ok(())
}
}

View File

@@ -1,842 +0,0 @@
//! Utilities for finding the `site-packages` directory,
//! into which third-party packages are installed.
//!
//! The routines exposed by this module have different behaviour depending
//! on the platform of the *host machine*, which may be
//! different from the *target platform for type checking*. (A user
//! might be running red-knot on a Windows machine, but might
//! reasonably ask us to type-check code assuming that the code runs
//! on Linux.)
use std::fmt;
use std::io;
use std::num::NonZeroUsize;
use std::ops::Deref;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use crate::PythonVersion;
type SitePackagesDiscoveryResult<T> = Result<T, SitePackagesDiscoveryError>;
/// Abstraction for a Python virtual environment.
///
/// Most of this information is derived from the virtual environment's `pyvenv.cfg` file.
/// The format of this file is not defined anywhere, and exactly which keys are present
/// depends on the tool that was used to create the virtual environment.
#[derive(Debug)]
pub(crate) struct VirtualEnvironment {
venv_path: SysPrefixPath,
base_executable_home_path: PythonHomePath,
include_system_site_packages: bool,
/// The version of the Python executable that was used to create this virtual environment.
///
/// The Python version is encoded under different keys and in different formats
/// by different virtual-environment creation tools,
/// and the key is never read by the standard-library `site.py` module,
/// so it's possible that we might not be able to find this information
/// in an acceptable format under any of the keys we expect.
/// This field will be `None` if so.
version: Option<PythonVersion>,
}
impl VirtualEnvironment {
pub(crate) fn new(
path: impl AsRef<SystemPath>,
system: &dyn System,
) -> SitePackagesDiscoveryResult<Self> {
Self::new_impl(path.as_ref(), system)
}
fn new_impl(path: &SystemPath, system: &dyn System) -> SitePackagesDiscoveryResult<Self> {
fn pyvenv_cfg_line_number(index: usize) -> NonZeroUsize {
index.checked_add(1).and_then(NonZeroUsize::new).unwrap()
}
let venv_path = SysPrefixPath::new(path, system)?;
let pyvenv_cfg_path = venv_path.join("pyvenv.cfg");
tracing::debug!("Attempting to parse virtual environment metadata at '{pyvenv_cfg_path}'");
let pyvenv_cfg = system
.read_to_string(&pyvenv_cfg_path)
.map_err(SitePackagesDiscoveryError::NoPyvenvCfgFile)?;
let mut include_system_site_packages = false;
let mut base_executable_home_path = None;
let mut version_info_string = None;
// A `pyvenv.cfg` file *looks* like a `.ini` file, but actually isn't valid `.ini` syntax!
// The Python standard-library's `site` module parses these files by splitting each line on
// '=' characters, so that's what we should do as well.
//
// See also: https://snarky.ca/how-virtual-environments-work/
for (index, line) in pyvenv_cfg.lines().enumerate() {
if let Some((key, value)) = line.split_once('=') {
let key = key.trim();
if key.is_empty() {
return Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
pyvenv_cfg_path,
PyvenvCfgParseErrorKind::MalformedKeyValuePair {
line_number: pyvenv_cfg_line_number(index),
},
));
}
let value = value.trim();
if value.is_empty() {
return Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
pyvenv_cfg_path,
PyvenvCfgParseErrorKind::MalformedKeyValuePair {
line_number: pyvenv_cfg_line_number(index),
},
));
}
if value.contains('=') {
return Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
pyvenv_cfg_path,
PyvenvCfgParseErrorKind::TooManyEquals {
line_number: pyvenv_cfg_line_number(index),
},
));
}
match key {
"include-system-site-packages" => {
include_system_site_packages = value.eq_ignore_ascii_case("true");
}
"home" => base_executable_home_path = Some(value),
// `virtualenv` and `uv` call this key `version_info`,
// but the stdlib venv module calls it `version`
"version" | "version_info" => version_info_string = Some(value),
_ => continue,
}
}
}
// The `home` key is read by the standard library's `site.py` module,
// so if it's missing from the `pyvenv.cfg` file
// (or the provided value is invalid),
// it's reasonable to consider the virtual environment irredeemably broken.
let Some(base_executable_home_path) = base_executable_home_path else {
return Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
pyvenv_cfg_path,
PyvenvCfgParseErrorKind::NoHomeKey,
));
};
let base_executable_home_path = PythonHomePath::new(base_executable_home_path, system)
.map_err(|io_err| {
SitePackagesDiscoveryError::PyvenvCfgParseError(
pyvenv_cfg_path,
PyvenvCfgParseErrorKind::InvalidHomeValue(io_err),
)
})?;
// but the `version`/`version_info` key is not read by the standard library,
// and is provided under different keys depending on which virtual-environment creation tool
// created the `pyvenv.cfg` file. Lenient parsing is appropriate here:
// the file isn't really *invalid* if it doesn't have this key,
// or if the value doesn't parse according to our expectations.
let version = version_info_string.and_then(|version_string| {
let mut version_info_parts = version_string.split('.');
let (major, minor) = (version_info_parts.next()?, version_info_parts.next()?);
PythonVersion::try_from((major, minor)).ok()
});
let metadata = Self {
venv_path,
base_executable_home_path,
include_system_site_packages,
version,
};
tracing::trace!("Resolved metadata for virtual environment: {metadata:?}");
Ok(metadata)
}
/// Return a list of `site-packages` directories that are available from this virtual environment
///
/// See the documentation for `site_packages_dir_from_sys_prefix` for more details.
pub(crate) fn site_packages_directories(
&self,
system: &dyn System,
) -> SitePackagesDiscoveryResult<Vec<SystemPathBuf>> {
let VirtualEnvironment {
venv_path,
base_executable_home_path,
include_system_site_packages,
version,
} = self;
let mut site_packages_directories = vec![site_packages_directory_from_sys_prefix(
venv_path, *version, system,
)?];
if *include_system_site_packages {
let system_sys_prefix =
SysPrefixPath::from_executable_home_path(base_executable_home_path);
// If we fail to resolve the `sys.prefix` path from the base executable home path,
// or if we fail to resolve the `site-packages` from the `sys.prefix` path,
// we should probably print a warning but *not* abort type checking
if let Some(sys_prefix_path) = system_sys_prefix {
match site_packages_directory_from_sys_prefix(&sys_prefix_path, *version, system) {
Ok(site_packages_directory) => {
site_packages_directories.push(site_packages_directory);
}
Err(error) => tracing::warn!(
"{error}. System site-packages will not be used for module resolution."
),
}
} else {
tracing::warn!(
"Failed to resolve `sys.prefix` of the system Python installation \
from the `home` value in the `pyvenv.cfg` file at '{}'. \
System site-packages will not be used for module resolution.",
venv_path.join("pyvenv.cfg")
);
}
}
tracing::debug!("Resolved site-packages directories for this virtual environment are: {site_packages_directories:?}");
Ok(site_packages_directories)
}
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum SitePackagesDiscoveryError {
#[error("Invalid --venv-path argument: {0} could not be canonicalized")]
VenvDirCanonicalizationError(SystemPathBuf, #[source] io::Error),
#[error("Invalid --venv-path argument: {0} does not point to a directory on disk")]
VenvDirIsNotADirectory(SystemPathBuf),
#[error("--venv-path points to a broken venv with no pyvenv.cfg file")]
NoPyvenvCfgFile(#[source] io::Error),
#[error("Failed to parse the pyvenv.cfg file at {0} because {1}")]
PyvenvCfgParseError(SystemPathBuf, PyvenvCfgParseErrorKind),
#[error("Failed to search the `lib` directory of the Python installation at {1} for `site-packages`")]
CouldNotReadLibDirectory(#[source] io::Error, SysPrefixPath),
#[error("Could not find the `site-packages` directory for the Python installation at {0}")]
NoSitePackagesDirFound(SysPrefixPath),
}
/// The various ways in which parsing a `pyvenv.cfg` file could fail
#[derive(Debug)]
pub(crate) enum PyvenvCfgParseErrorKind {
TooManyEquals { line_number: NonZeroUsize },
MalformedKeyValuePair { line_number: NonZeroUsize },
NoHomeKey,
InvalidHomeValue(io::Error),
}
impl fmt::Display for PyvenvCfgParseErrorKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::TooManyEquals { line_number } => {
write!(f, "line {line_number} has too many '=' characters")
}
Self::MalformedKeyValuePair { line_number } => write!(
f,
"line {line_number} has a malformed `<key> = <value>` pair"
),
Self::NoHomeKey => f.write_str("the file does not have a `home` key"),
Self::InvalidHomeValue(io_err) => {
write!(
f,
"the following error was encountered \
when trying to resolve the `home` value to a directory on disk: {io_err}"
)
}
}
}
}
/// Attempt to retrieve the `site-packages` directory
/// associated with a given Python installation.
///
/// The location of the `site-packages` directory can vary according to the
/// Python version that this installation represents. The Python version may
/// or may not be known at this point, which is why the `python_version`
/// parameter is an `Option`.
fn site_packages_directory_from_sys_prefix(
sys_prefix_path: &SysPrefixPath,
python_version: Option<PythonVersion>,
system: &dyn System,
) -> SitePackagesDiscoveryResult<SystemPathBuf> {
tracing::debug!("Searching for site-packages directory in {sys_prefix_path}");
if cfg!(target_os = "windows") {
let site_packages = sys_prefix_path.join(r"Lib\site-packages");
return system
.is_directory(&site_packages)
.then_some(site_packages)
.ok_or(SitePackagesDiscoveryError::NoSitePackagesDirFound(
sys_prefix_path.to_owned(),
));
}
// In the Python standard library's `site.py` module (used for finding `site-packages`
// at runtime), we can find this in [the non-Windows branch]:
//
// ```py
// libdirs = [sys.platlibdir]
// if sys.platlibdir != "lib":
// libdirs.append("lib")
// ```
//
// Pyright therefore searches for both a `lib/python3.X/site-packages` directory
// and a `lib64/python3.X/site-packages` directory on non-MacOS Unix systems,
// since `sys.platlibdir` can sometimes be equal to `"lib64"`.
//
// However, we only care about the `site-packages` directory insofar as it allows
// us to discover Python source code that can be used for inferring type
// information regarding third-party dependencies. That means that we don't need
// to care about any possible `lib64/site-packages` directories, since
// [the `sys`-module documentation] states that `sys.platlibdir` is *only* ever
// used for C extensions, never for pure-Python modules.
//
// [the non-Windows branch]: https://github.com/python/cpython/blob/a8be8fc6c4682089be45a87bd5ee1f686040116c/Lib/site.py#L401-L410
// [the `sys`-module documentation]: https://docs.python.org/3/library/sys.html#sys.platlibdir
// If we were able to figure out what Python version this installation is,
// we should be able to avoid iterating through all items in the `lib/` directory:
if let Some(version) = python_version {
let expected_path = sys_prefix_path.join(format!("lib/python{version}/site-packages"));
if system.is_directory(&expected_path) {
return Ok(expected_path);
}
if version.free_threaded_build_available() {
// Nearly the same as `expected_path`, but with an additional `t` after {version}:
let alternative_path =
sys_prefix_path.join(format!("lib/python{version}t/site-packages"));
if system.is_directory(&alternative_path) {
return Ok(alternative_path);
}
}
}
// Either we couldn't figure out the version before calling this function
// (e.g., from a `pyvenv.cfg` file if this was a venv),
// or we couldn't find a `site-packages` folder at the expected location given
// the parsed version
//
// Note: the `python3.x` part of the `site-packages` path can't be computed from
// the `--target-version` the user has passed, as they might be running Python 3.12 locally
// even if they've requested that we type check their code "as if" they're running 3.8.
for entry_result in system
.read_directory(&sys_prefix_path.join("lib"))
.map_err(|io_err| {
SitePackagesDiscoveryError::CouldNotReadLibDirectory(io_err, sys_prefix_path.to_owned())
})?
{
let Ok(entry) = entry_result else {
continue;
};
if !entry.file_type().is_directory() {
continue;
}
let mut path = entry.into_path();
let name = path
.file_name()
.expect("File name to be non-null because path is guaranteed to be a child of `lib`");
if !name.starts_with("python3.") {
continue;
}
path.push("site-packages");
if system.is_directory(&path) {
return Ok(path);
}
}
Err(SitePackagesDiscoveryError::NoSitePackagesDirFound(
sys_prefix_path.to_owned(),
))
}
/// A path that represents the value of [`sys.prefix`] at runtime in Python
/// for a given Python executable.
///
/// For the case of a virtual environment, where a
/// Python binary is at `/.venv/bin/python`, `sys.prefix` is the path to
/// the virtual environment the Python binary lies inside, i.e. `/.venv`,
/// and `site-packages` will be at `.venv/lib/python3.X/site-packages`.
/// System Python installations generally work the same way: if a system
/// Python installation lies at `/opt/homebrew/bin/python`, `sys.prefix`
/// will be `/opt/homebrew`, and `site-packages` will be at
/// `/opt/homebrew/lib/python3.X/site-packages`.
///
/// [`sys.prefix`]: https://docs.python.org/3/library/sys.html#sys.prefix
#[derive(Debug, PartialEq, Eq, Clone)]
pub(crate) struct SysPrefixPath(SystemPathBuf);
impl SysPrefixPath {
fn new(
unvalidated_path: impl AsRef<SystemPath>,
system: &dyn System,
) -> SitePackagesDiscoveryResult<Self> {
Self::new_impl(unvalidated_path.as_ref(), system)
}
fn new_impl(
unvalidated_path: &SystemPath,
system: &dyn System,
) -> SitePackagesDiscoveryResult<Self> {
// It's important to resolve symlinks here rather than simply making the path absolute,
// since system Python installations often only put symlinks in the "expected"
// locations for `home` and `site-packages`
let canonicalized = system
.canonicalize_path(unvalidated_path)
.map_err(|io_err| {
SitePackagesDiscoveryError::VenvDirCanonicalizationError(
unvalidated_path.to_path_buf(),
io_err,
)
})?;
system
.is_directory(&canonicalized)
.then_some(Self(canonicalized))
.ok_or_else(|| {
SitePackagesDiscoveryError::VenvDirIsNotADirectory(unvalidated_path.to_path_buf())
})
}
fn from_executable_home_path(path: &PythonHomePath) -> Option<Self> {
// No need to check whether `path.parent()` is a directory:
// the parent of a canonicalised path that is known to exist
// is guaranteed to be a directory.
if cfg!(target_os = "windows") {
Some(Self(path.to_path_buf()))
} else {
path.parent().map(|path| Self(path.to_path_buf()))
}
}
}
impl Deref for SysPrefixPath {
type Target = SystemPath;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for SysPrefixPath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "`sys.prefix` path '{}'", self.0)
}
}
/// The value given by the `home` key in `pyvenv.cfg` files.
///
/// This is equivalent to `{sys_prefix_path}/bin`, and points
/// to a directory in which a Python executable can be found.
/// Confusingly, it is *not* the same as the [`PYTHONHOME`]
/// environment variable that Python provides! However, it's
/// consistent among all mainstream creators of Python virtual
/// environments (the stdlib Python `venv` module, the third-party
/// `virtualenv` library, and `uv`), was specified by
/// [the original PEP adding the `venv` module],
/// and it's one of the few fields that's read by the Python
/// standard library's `site.py` module.
///
/// Although it doesn't appear to be specified anywhere,
/// all existing virtual environment tools always use an absolute path
/// for the `home` value, and the Python standard library also assumes
/// that the `home` value will be an absolute path.
///
/// Other values, such as the path to the Python executable or the
/// base-executable `sys.prefix` value, are either only provided in
/// `pyvenv.cfg` files by some virtual-environment creators,
/// or are included under different keys depending on which
/// virtual-environment creation tool you've used.
///
/// [`PYTHONHOME`]: https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHOME
/// [the original PEP adding the `venv` module]: https://peps.python.org/pep-0405/
#[derive(Debug, PartialEq, Eq)]
struct PythonHomePath(SystemPathBuf);
impl PythonHomePath {
fn new(path: impl AsRef<SystemPath>, system: &dyn System) -> io::Result<Self> {
let path = path.as_ref();
// It's important to resolve symlinks here rather than simply making the path absolute,
// since system Python installations often only put symlinks in the "expected"
// locations for `home` and `site-packages`
let canonicalized = system.canonicalize_path(path)?;
system
.is_directory(&canonicalized)
.then_some(Self(canonicalized))
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "not a directory"))
}
}
impl Deref for PythonHomePath {
type Target = SystemPath;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for PythonHomePath {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "`home` location '{}'", self.0)
}
}
impl PartialEq<SystemPath> for PythonHomePath {
fn eq(&self, other: &SystemPath) -> bool {
&*self.0 == other
}
}
impl PartialEq<SystemPathBuf> for PythonHomePath {
fn eq(&self, other: &SystemPathBuf) -> bool {
self == &**other
}
}
#[cfg(test)]
mod tests {
use ruff_db::system::TestSystem;
use super::*;
struct VirtualEnvironmentTester {
system: TestSystem,
minor_version: u8,
free_threaded: bool,
system_site_packages: bool,
pyvenv_cfg_version_field: Option<&'static str>,
}
impl VirtualEnvironmentTester {
/// Builds a mock virtual environment, and returns the path to the venv
fn build_mock_venv(&self) -> SystemPathBuf {
let VirtualEnvironmentTester {
system,
minor_version,
system_site_packages,
free_threaded,
pyvenv_cfg_version_field,
} = self;
let memory_fs = system.memory_file_system();
let unix_site_packages = if *free_threaded {
format!("lib/python3.{minor_version}t/site-packages")
} else {
format!("lib/python3.{minor_version}/site-packages")
};
let system_install_sys_prefix =
SystemPathBuf::from(&*format!("/Python3.{minor_version}"));
let (system_home_path, system_exe_path, system_site_packages_path) =
if cfg!(target_os = "windows") {
let system_home_path = system_install_sys_prefix.clone();
let system_exe_path = system_home_path.join("python.exe");
let system_site_packages_path =
system_install_sys_prefix.join(r"Lib\site-packages");
(system_home_path, system_exe_path, system_site_packages_path)
} else {
let system_home_path = system_install_sys_prefix.join("bin");
let system_exe_path = system_home_path.join("python");
let system_site_packages_path =
system_install_sys_prefix.join(&unix_site_packages);
(system_home_path, system_exe_path, system_site_packages_path)
};
memory_fs.write_file(system_exe_path, "").unwrap();
memory_fs
.create_directory_all(&system_site_packages_path)
.unwrap();
let venv_sys_prefix = SystemPathBuf::from("/.venv");
let (venv_exe, site_packages_path) = if cfg!(target_os = "windows") {
(
venv_sys_prefix.join(r"Scripts\python.exe"),
venv_sys_prefix.join(r"Lib\site-packages"),
)
} else {
(
venv_sys_prefix.join("bin/python"),
venv_sys_prefix.join(&unix_site_packages),
)
};
memory_fs.write_file(&venv_exe, "").unwrap();
memory_fs.create_directory_all(&site_packages_path).unwrap();
let pyvenv_cfg_path = venv_sys_prefix.join("pyvenv.cfg");
let mut pyvenv_cfg_contents = format!("home = {system_home_path}\n");
if let Some(version_field) = pyvenv_cfg_version_field {
pyvenv_cfg_contents.push_str(version_field);
pyvenv_cfg_contents.push('\n');
}
// Deliberately using weird casing here to test that our pyvenv.cfg parsing is case-insensitive:
if *system_site_packages {
pyvenv_cfg_contents.push_str("include-system-site-packages = TRuE\n");
}
memory_fs
.write_file(pyvenv_cfg_path, &pyvenv_cfg_contents)
.unwrap();
venv_sys_prefix
}
fn test(self) {
let venv_path = self.build_mock_venv();
let venv = VirtualEnvironment::new(venv_path.clone(), &self.system).unwrap();
assert_eq!(
venv.venv_path,
SysPrefixPath(self.system.canonicalize_path(&venv_path).unwrap())
);
assert_eq!(venv.include_system_site_packages, self.system_site_packages);
if self.pyvenv_cfg_version_field.is_some() {
assert_eq!(
venv.version,
Some(PythonVersion {
major: 3,
minor: self.minor_version
})
);
} else {
assert_eq!(venv.version, None);
}
let expected_home = if cfg!(target_os = "windows") {
SystemPathBuf::from(&*format!(r"\Python3.{}", self.minor_version))
} else {
SystemPathBuf::from(&*format!("/Python3.{}/bin", self.minor_version))
};
assert_eq!(venv.base_executable_home_path, expected_home);
let site_packages_directories = venv.site_packages_directories(&self.system).unwrap();
let expected_venv_site_packages = if cfg!(target_os = "windows") {
SystemPathBuf::from(r"\.venv\Lib\site-packages")
} else if self.free_threaded {
SystemPathBuf::from(&*format!(
"/.venv/lib/python3.{}t/site-packages",
self.minor_version
))
} else {
SystemPathBuf::from(&*format!(
"/.venv/lib/python3.{}/site-packages",
self.minor_version
))
};
let expected_system_site_packages = if cfg!(target_os = "windows") {
SystemPathBuf::from(&*format!(
r"\Python3.{}\Lib\site-packages",
self.minor_version
))
} else if self.free_threaded {
SystemPathBuf::from(&*format!(
"/Python3.{minor_version}/lib/python3.{minor_version}t/site-packages",
minor_version = self.minor_version
))
} else {
SystemPathBuf::from(&*format!(
"/Python3.{minor_version}/lib/python3.{minor_version}/site-packages",
minor_version = self.minor_version
))
};
if self.system_site_packages {
assert_eq!(
&site_packages_directories,
&[expected_venv_site_packages, expected_system_site_packages]
);
} else {
assert_eq!(&site_packages_directories, &[expected_venv_site_packages]);
}
}
}
#[test]
fn can_find_site_packages_directory_no_version_field_in_pyvenv_cfg() {
let tester = VirtualEnvironmentTester {
system: TestSystem::default(),
minor_version: 12,
free_threaded: false,
system_site_packages: false,
pyvenv_cfg_version_field: None,
};
tester.test();
}
#[test]
fn can_find_site_packages_directory_venv_style_version_field_in_pyvenv_cfg() {
let tester = VirtualEnvironmentTester {
system: TestSystem::default(),
minor_version: 12,
free_threaded: false,
system_site_packages: false,
pyvenv_cfg_version_field: Some("version = 3.12"),
};
tester.test();
}
#[test]
fn can_find_site_packages_directory_uv_style_version_field_in_pyvenv_cfg() {
let tester = VirtualEnvironmentTester {
system: TestSystem::default(),
minor_version: 12,
free_threaded: false,
system_site_packages: false,
pyvenv_cfg_version_field: Some("version_info = 3.12"),
};
tester.test();
}
#[test]
fn can_find_site_packages_directory_virtualenv_style_version_field_in_pyvenv_cfg() {
let tester = VirtualEnvironmentTester {
system: TestSystem::default(),
minor_version: 12,
free_threaded: false,
system_site_packages: false,
pyvenv_cfg_version_field: Some("version_info = 3.12.0rc2"),
};
tester.test();
}
#[test]
fn can_find_site_packages_directory_freethreaded_build() {
let tester = VirtualEnvironmentTester {
system: TestSystem::default(),
minor_version: 13,
free_threaded: true,
system_site_packages: false,
pyvenv_cfg_version_field: Some("version_info = 3.13"),
};
tester.test();
}
#[test]
fn finds_system_site_packages() {
let tester = VirtualEnvironmentTester {
system: TestSystem::default(),
minor_version: 13,
free_threaded: true,
system_site_packages: true,
pyvenv_cfg_version_field: Some("version_info = 3.13"),
};
tester.test();
}
#[test]
fn reject_venv_that_does_not_exist() {
let system = TestSystem::default();
assert!(matches!(
VirtualEnvironment::new("/.venv", &system),
Err(SitePackagesDiscoveryError::VenvDirIsNotADirectory(_))
));
}
#[test]
fn reject_venv_with_no_pyvenv_cfg_file() {
let system = TestSystem::default();
system
.memory_file_system()
.create_directory_all("/.venv")
.unwrap();
assert!(matches!(
VirtualEnvironment::new("/.venv", &system),
Err(SitePackagesDiscoveryError::NoPyvenvCfgFile(_))
));
}
#[test]
fn parsing_pyvenv_cfg_with_too_many_equals() {
let system = TestSystem::default();
let memory_fs = system.memory_file_system();
let pyvenv_cfg_path = SystemPathBuf::from("/.venv/pyvenv.cfg");
memory_fs
.write_file(&pyvenv_cfg_path, "home = bar = /.venv/bin")
.unwrap();
let venv_result = VirtualEnvironment::new("/.venv", &system);
assert!(matches!(
venv_result,
Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
path,
PyvenvCfgParseErrorKind::TooManyEquals { line_number }
))
if path == pyvenv_cfg_path && Some(line_number) == NonZeroUsize::new(1)
));
}
#[test]
fn parsing_pyvenv_cfg_with_key_but_no_value_fails() {
let system = TestSystem::default();
let memory_fs = system.memory_file_system();
let pyvenv_cfg_path = SystemPathBuf::from("/.venv/pyvenv.cfg");
memory_fs.write_file(&pyvenv_cfg_path, "home =").unwrap();
let venv_result = VirtualEnvironment::new("/.venv", &system);
assert!(matches!(
venv_result,
Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
path,
PyvenvCfgParseErrorKind::MalformedKeyValuePair { line_number }
))
if path == pyvenv_cfg_path && Some(line_number) == NonZeroUsize::new(1)
));
}
#[test]
fn parsing_pyvenv_cfg_with_value_but_no_key_fails() {
let system = TestSystem::default();
let memory_fs = system.memory_file_system();
let pyvenv_cfg_path = SystemPathBuf::from("/.venv/pyvenv.cfg");
memory_fs
.write_file(&pyvenv_cfg_path, "= whatever")
.unwrap();
let venv_result = VirtualEnvironment::new("/.venv", &system);
assert!(matches!(
venv_result,
Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
path,
PyvenvCfgParseErrorKind::MalformedKeyValuePair { line_number }
))
if path == pyvenv_cfg_path && Some(line_number) == NonZeroUsize::new(1)
));
}
#[test]
fn parsing_pyvenv_cfg_with_no_home_key_fails() {
let system = TestSystem::default();
let memory_fs = system.memory_file_system();
let pyvenv_cfg_path = SystemPathBuf::from("/.venv/pyvenv.cfg");
memory_fs.write_file(&pyvenv_cfg_path, "").unwrap();
let venv_result = VirtualEnvironment::new("/.venv", &system);
assert!(matches!(
venv_result,
Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
path,
PyvenvCfgParseErrorKind::NoHomeKey
))
if path == pyvenv_cfg_path
));
}
#[test]
fn parsing_pyvenv_cfg_with_invalid_home_key_fails() {
let system = TestSystem::default();
let memory_fs = system.memory_file_system();
let pyvenv_cfg_path = SystemPathBuf::from("/.venv/pyvenv.cfg");
memory_fs
.write_file(&pyvenv_cfg_path, "home = foo")
.unwrap();
let venv_result = VirtualEnvironment::new("/.venv", &system);
assert!(matches!(
venv_result,
Err(SitePackagesDiscoveryError::PyvenvCfgParseError(
path,
PyvenvCfgParseErrorKind::InvalidHomeValue(_)
))
if path == pyvenv_cfg_path
));
}
}

View File

@@ -1,480 +0,0 @@
use ruff_db::files::File;
use ruff_python_ast::name::Name;
use crate::builtins::builtins_scope;
use crate::semantic_index::definition::Definition;
use crate::semantic_index::symbol::{ScopeId, ScopedSymbolId};
use crate::semantic_index::{
global_scope, semantic_index, symbol_table, use_def_map, DefinitionWithConstraints,
DefinitionWithConstraintsIterator,
};
use crate::types::narrow::narrowing_constraint;
use crate::{Db, FxOrderSet};
pub(crate) use self::builder::{IntersectionBuilder, UnionBuilder};
pub(crate) use self::diagnostic::TypeCheckDiagnostics;
pub(crate) use self::infer::{
infer_definition_types, infer_expression_types, infer_scope_types, TypeInference,
};
mod builder;
mod diagnostic;
mod display;
mod infer;
mod narrow;
pub fn check_types(db: &dyn Db, file: File) -> TypeCheckDiagnostics {
let _span = tracing::trace_span!("check_types", file=?file.path(db)).entered();
let index = semantic_index(db, file);
let mut diagnostics = TypeCheckDiagnostics::new();
for scope_id in index.scope_ids() {
let result = infer_scope_types(db, scope_id);
diagnostics.extend(result.diagnostics());
}
diagnostics
}
/// Infer the public type of a symbol (its type as seen from outside its scope).
pub(crate) fn symbol_ty<'db>(
db: &'db dyn Db,
scope: ScopeId<'db>,
symbol: ScopedSymbolId,
) -> Type<'db> {
let _span = tracing::trace_span!("symbol_ty", ?symbol).entered();
let use_def = use_def_map(db, scope);
definitions_ty(
db,
use_def.public_definitions(symbol),
use_def
.public_may_be_unbound(symbol)
.then_some(Type::Unbound),
)
}
/// Shorthand for `symbol_ty` that takes a symbol name instead of an ID.
pub(crate) fn symbol_ty_by_name<'db>(
db: &'db dyn Db,
scope: ScopeId<'db>,
name: &str,
) -> Type<'db> {
let table = symbol_table(db, scope);
table
.symbol_id_by_name(name)
.map(|symbol| symbol_ty(db, scope, symbol))
.unwrap_or(Type::Unbound)
}
/// Shorthand for `symbol_ty` that looks up a module-global symbol by name in a file.
pub(crate) fn global_symbol_ty_by_name<'db>(db: &'db dyn Db, file: File, name: &str) -> Type<'db> {
symbol_ty_by_name(db, global_scope(db, file), name)
}
/// Shorthand for `symbol_ty` that looks up a symbol in the builtins.
///
/// Returns `Unbound` if the builtins module isn't available for some reason.
pub(crate) fn builtins_symbol_ty_by_name<'db>(db: &'db dyn Db, name: &str) -> Type<'db> {
builtins_scope(db)
.map(|builtins| symbol_ty_by_name(db, builtins, name))
.unwrap_or(Type::Unbound)
}
/// Infer the type of a [`Definition`].
pub(crate) fn definition_ty<'db>(db: &'db dyn Db, definition: Definition<'db>) -> Type<'db> {
let inference = infer_definition_types(db, definition);
inference.definition_ty(definition)
}
/// Infer the combined type of an array of [`Definition`]s, plus one optional "unbound type".
///
/// Will return a union if there is more than one definition, or at least one plus an unbound
/// type.
///
/// The "unbound type" represents the type in case control flow may not have passed through any
/// definitions in this scope. If this isn't possible, then it will be `None`. If it is possible,
/// and the result in that case should be Unbound (e.g. an unbound function local), then it will be
/// `Some(Type::Unbound)`. If it is possible and the result should be something else (e.g. an
/// implicit global lookup), then `unbound_type` will be `Some(the_global_symbol_type)`.
///
/// # Panics
/// Will panic if called with zero definitions and no `unbound_ty`. This is a logic error,
/// as any symbol with zero visible definitions clearly may be unbound, and the caller should
/// provide an `unbound_ty`.
pub(crate) fn definitions_ty<'db>(
db: &'db dyn Db,
definitions_with_constraints: DefinitionWithConstraintsIterator<'_, 'db>,
unbound_ty: Option<Type<'db>>,
) -> Type<'db> {
let def_types = definitions_with_constraints.map(
|DefinitionWithConstraints {
definition,
constraints,
}| {
let mut constraint_tys =
constraints.filter_map(|test| narrowing_constraint(db, test, definition));
let definition_ty = definition_ty(db, definition);
if let Some(first_constraint_ty) = constraint_tys.next() {
let mut builder = IntersectionBuilder::new(db);
builder = builder
.add_positive(definition_ty)
.add_positive(first_constraint_ty);
for constraint_ty in constraint_tys {
builder = builder.add_positive(constraint_ty);
}
builder.build()
} else {
definition_ty
}
},
);
let mut all_types = unbound_ty.into_iter().chain(def_types);
let Some(first) = all_types.next() else {
panic!("definitions_ty should never be called with zero definitions and no unbound_ty.")
};
if let Some(second) = all_types.next() {
let mut builder = UnionBuilder::new(db);
builder = builder.add(first).add(second);
for variant in all_types {
builder = builder.add(variant);
}
builder.build()
} else {
first
}
}
/// Unique ID for a type.
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub enum Type<'db> {
/// the dynamic type: a statically-unknown set of values
Any,
/// the empty set of values
Never,
/// unknown type (either no annotation, or some kind of type error)
/// equivalent to Any, or possibly to object in strict mode
Unknown,
/// name does not exist or is not bound to any value (this represents an error, but with some
/// leniency options it could be silently resolved to Unknown in some cases)
Unbound,
/// the None object -- TODO remove this in favor of Instance(types.NoneType)
None,
/// a specific function object
Function(FunctionType<'db>),
/// a specific module object
Module(File),
/// a specific class object
Class(ClassType<'db>),
/// the set of Python objects with the given class in their __class__'s method resolution order
Instance(ClassType<'db>),
/// the set of objects in any of the types in the union
Union(UnionType<'db>),
/// the set of objects in all of the types in the intersection
Intersection(IntersectionType<'db>),
/// An integer literal
IntLiteral(i64),
/// A boolean literal, either `True` or `False`.
BooleanLiteral(bool),
// TODO protocols, callable types, overloads, generics, type vars
}
impl<'db> Type<'db> {
pub const fn is_unbound(&self) -> bool {
matches!(self, Type::Unbound)
}
pub const fn is_unknown(&self) -> bool {
matches!(self, Type::Unknown)
}
pub const fn is_never(&self) -> bool {
matches!(self, Type::Never)
}
pub fn may_be_unbound(&self, db: &'db dyn Db) -> bool {
match self {
Type::Unbound => true,
Type::Union(union) => union.contains(db, Type::Unbound),
// Unbound can't appear in an intersection, because an intersection with Unbound
// simplifies to just Unbound.
_ => false,
}
}
#[must_use]
pub fn replace_unbound_with(&self, db: &'db dyn Db, replacement: Type<'db>) -> Type<'db> {
match self {
Type::Unbound => replacement,
Type::Union(union) => union
.elements(db)
.into_iter()
.fold(UnionBuilder::new(db), |builder, ty| {
builder.add(ty.replace_unbound_with(db, replacement))
})
.build(),
ty => *ty,
}
}
/// Resolve a member access of a type.
///
/// For example, if `foo` is `Type::Instance(<Bar>)`,
/// `foo.member(&db, "baz")` returns the type of `baz` attributes
/// as accessed from instances of the `Bar` class.
///
/// TODO: use of this method currently requires manually checking
/// whether the returned type is `Unknown`/`Unbound`
/// (or a union with `Unknown`/`Unbound`) in many places.
/// Ideally we'd use a more type-safe pattern, such as returning
/// an `Option` or a `Result` from this method, which would force
/// us to explicitly consider whether to handle an error or propagate
/// it up the call stack.
#[must_use]
pub fn member(&self, db: &'db dyn Db, name: &Name) -> Type<'db> {
match self {
Type::Any => Type::Any,
Type::Never => {
// TODO: attribute lookup on Never type
Type::Unknown
}
Type::Unknown => Type::Unknown,
Type::Unbound => Type::Unbound,
Type::None => {
// TODO: attribute lookup on None type
Type::Unknown
}
Type::Function(_) => {
// TODO: attribute lookup on function type
Type::Unknown
}
Type::Module(file) => global_symbol_ty_by_name(db, *file, name),
Type::Class(class) => class.class_member(db, name),
Type::Instance(_) => {
// TODO MRO? get_own_instance_member, get_instance_member
Type::Unknown
}
Type::Union(union) => union
.elements(db)
.iter()
.fold(UnionBuilder::new(db), |builder, element_ty| {
builder.add(element_ty.member(db, name))
})
.build(),
Type::Intersection(_) => {
// TODO perform the get_member on each type in the intersection
// TODO return the intersection of those results
Type::Unknown
}
Type::IntLiteral(_) => {
// TODO raise error
Type::Unknown
}
Type::BooleanLiteral(_) => Type::Unknown,
}
}
#[must_use]
pub fn instance(&self) -> Type<'db> {
match self {
Type::Any => Type::Any,
Type::Unknown => Type::Unknown,
Type::Class(class) => Type::Instance(*class),
_ => Type::Unknown, // TODO type errors
}
}
}
#[salsa::interned]
pub struct FunctionType<'db> {
/// name of the function at definition
pub name: Name,
/// types of all decorators on this function
decorators: Vec<Type<'db>>,
}
impl<'db> FunctionType<'db> {
pub fn has_decorator(self, db: &dyn Db, decorator: Type<'_>) -> bool {
self.decorators(db).contains(&decorator)
}
}
#[salsa::interned]
pub struct ClassType<'db> {
/// Name of the class at definition
pub name: Name,
/// Types of all class bases
bases: Vec<Type<'db>>,
body_scope: ScopeId<'db>,
}
impl<'db> ClassType<'db> {
/// Returns the class member of this class named `name`.
///
/// The member resolves to a member of the class itself or any of its bases.
pub fn class_member(self, db: &'db dyn Db, name: &Name) -> Type<'db> {
let member = self.own_class_member(db, name);
if !member.is_unbound() {
return member;
}
self.inherited_class_member(db, name)
}
/// Returns the inferred type of the class member named `name`.
pub fn own_class_member(self, db: &'db dyn Db, name: &Name) -> Type<'db> {
let scope = self.body_scope(db);
symbol_ty_by_name(db, scope, name)
}
pub fn inherited_class_member(self, db: &'db dyn Db, name: &Name) -> Type<'db> {
for base in self.bases(db) {
let member = base.member(db, name);
if !member.is_unbound() {
return member;
}
}
Type::Unbound
}
}
#[salsa::interned]
pub struct UnionType<'db> {
/// The union type includes values in any of these types.
elements: FxOrderSet<Type<'db>>,
}
impl<'db> UnionType<'db> {
pub fn contains(&self, db: &'db dyn Db, ty: Type<'db>) -> bool {
self.elements(db).contains(&ty)
}
}
#[salsa::interned]
pub struct IntersectionType<'db> {
/// The intersection type includes only values in all of these types.
positive: FxOrderSet<Type<'db>>,
/// The intersection type does not include any value in any of these types.
///
/// Negation types aren't expressible in annotations, and are most likely to arise from type
/// narrowing along with intersections (e.g. `if not isinstance(...)`), so we represent them
/// directly in intersections rather than as a separate type.
negative: FxOrderSet<Type<'db>>,
}
#[cfg(test)]
mod tests {
use anyhow::Context;
use ruff_db::files::system_path_to_file;
use ruff_db::system::{DbWithTestSystem, SystemPathBuf};
use crate::db::tests::TestDb;
use crate::{Program, ProgramSettings, PythonVersion, SearchPathSettings};
use super::TypeCheckDiagnostics;
fn setup_db() -> TestDb {
let db = TestDb::new();
db.memory_file_system()
.create_directory_all("/src")
.unwrap();
Program::from_settings(
&db,
&ProgramSettings {
target_version: PythonVersion::default(),
search_paths: SearchPathSettings::new(SystemPathBuf::from("/src")),
},
)
.expect("Valid search path settings");
db
}
fn assert_diagnostic_messages(diagnostics: &TypeCheckDiagnostics, expected: &[&str]) {
let messages: Vec<&str> = diagnostics
.iter()
.map(|diagnostic| diagnostic.message())
.collect();
assert_eq!(&messages, expected);
}
#[test]
fn unresolved_import_statement() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file("src/foo.py", "import bar\n")
.context("Failed to write foo.py")?;
let foo = system_path_to_file(&db, "src/foo.py").context("Failed to resolve foo.py")?;
let diagnostics = super::check_types(&db, foo);
assert_diagnostic_messages(&diagnostics, &["Import 'bar' could not be resolved."]);
Ok(())
}
#[test]
fn unresolved_import_from_statement() {
let mut db = setup_db();
db.write_file("src/foo.py", "from bar import baz\n")
.unwrap();
let foo = system_path_to_file(&db, "src/foo.py").unwrap();
let diagnostics = super::check_types(&db, foo);
assert_diagnostic_messages(&diagnostics, &["Import 'bar' could not be resolved."]);
}
#[test]
fn unresolved_import_from_resolved_module() {
let mut db = setup_db();
db.write_files([("/src/a.py", ""), ("/src/b.py", "from a import thing")])
.unwrap();
let b_file = system_path_to_file(&db, "/src/b.py").unwrap();
let b_file_diagnostics = super::check_types(&db, b_file);
assert_diagnostic_messages(
&b_file_diagnostics,
&["Could not resolve import of 'thing' from 'a'"],
);
}
#[ignore = "\
A spurious second 'Unresolved import' diagnostic message is emitted on `b.py`, \
despite the symbol existing in the symbol table for `a.py`"]
#[test]
fn resolved_import_of_symbol_from_unresolved_import() {
let mut db = setup_db();
db.write_files([
("/src/a.py", "import foo as foo"),
("/src/b.py", "from a import foo"),
])
.unwrap();
let a_file = system_path_to_file(&db, "/src/a.py").unwrap();
let a_file_diagnostics = super::check_types(&db, a_file);
assert_diagnostic_messages(
&a_file_diagnostics,
&["Import 'foo' could not be resolved."],
);
// Importing the unresolved import into a second first-party file should not trigger
// an additional "unresolved import" violation
let b_file = system_path_to_file(&db, "/src/b.py").unwrap();
let b_file_diagnostics = super::check_types(&db, b_file);
assert_eq!(&*b_file_diagnostics, &[]);
}
}

View File

@@ -1,471 +0,0 @@
//! Smart builders for union and intersection types.
//!
//! Invariants we maintain here:
//! * No single-element union types (should just be the contained type instead.)
//! * No single-positive-element intersection types. Single-negative-element are OK, we don't
//! have a standalone negation type so there's no other representation for this.
//! * The same type should never appear more than once in a union or intersection. (This should
//! be expanded to cover subtyping -- see below -- but for now we only implement it for type
//! identity.)
//! * Disjunctive normal form (DNF): the tree of unions and intersections can never be deeper
//! than a union-of-intersections. Unions cannot contain other unions (the inner union just
//! flattens into the outer one), intersections cannot contain other intersections (also
//! flattens), and intersections cannot contain unions (the intersection distributes over the
//! union, inverting it into a union-of-intersections).
//!
//! The implication of these invariants is that a [`UnionBuilder`] does not necessarily build a
//! [`Type::Union`]. For example, if only one type is added to the [`UnionBuilder`], `build()` will
//! just return that type directly. The same is true for [`IntersectionBuilder`]; for example, if a
//! union type is added to the intersection, it will distribute and [`IntersectionBuilder::build`]
//! may end up returning a [`Type::Union`] of intersections.
//!
//! In the future we should have these additional invariants, but they aren't implemented yet:
//! * No type in a union can be a subtype of any other type in the union (just eliminate the
//! subtype from the union).
//! * No type in an intersection can be a supertype of any other type in the intersection (just
//! eliminate the supertype from the intersection).
//! * An intersection containing two non-overlapping types should simplify to [`Type::Never`].
use crate::types::{IntersectionType, Type, UnionType};
use crate::{Db, FxOrderSet};
pub(crate) struct UnionBuilder<'db> {
elements: FxOrderSet<Type<'db>>,
db: &'db dyn Db,
}
impl<'db> UnionBuilder<'db> {
pub(crate) fn new(db: &'db dyn Db) -> Self {
Self {
db,
elements: FxOrderSet::default(),
}
}
/// Adds a type to this union.
pub(crate) fn add(mut self, ty: Type<'db>) -> Self {
match ty {
Type::Union(union) => {
self.elements.extend(&union.elements(self.db));
}
Type::Never => {}
_ => {
self.elements.insert(ty);
}
}
self
}
pub(crate) fn build(self) -> Type<'db> {
match self.elements.len() {
0 => Type::Never,
1 => self.elements[0],
_ => Type::Union(UnionType::new(self.db, self.elements)),
}
}
}
#[derive(Clone)]
pub(crate) struct IntersectionBuilder<'db> {
// Really this builds a union-of-intersections, because we always keep our set-theoretic types
// in disjunctive normal form (DNF), a union of intersections. In the simplest case there's
// just a single intersection in this vector, and we are building a single intersection type,
// but if a union is added to the intersection, we'll distribute ourselves over that union and
// create a union of intersections.
intersections: Vec<InnerIntersectionBuilder<'db>>,
db: &'db dyn Db,
}
impl<'db> IntersectionBuilder<'db> {
pub(crate) fn new(db: &'db dyn Db) -> Self {
Self {
db,
intersections: vec![InnerIntersectionBuilder::new()],
}
}
fn empty(db: &'db dyn Db) -> Self {
Self {
db,
intersections: vec![],
}
}
pub(crate) fn add_positive(mut self, ty: Type<'db>) -> Self {
if let Type::Union(union) = ty {
// Distribute ourself over this union: for each union element, clone ourself and
// intersect with that union element, then create a new union-of-intersections with all
// of those sub-intersections in it. E.g. if `self` is a simple intersection `T1 & T2`
// and we add `T3 | T4` to the intersection, we don't get `T1 & T2 & (T3 | T4)` (that's
// not in DNF), we distribute the union and get `(T1 & T3) | (T2 & T3) | (T1 & T4) |
// (T2 & T4)`. If `self` is already a union-of-intersections `(T1 & T2) | (T3 & T4)`
// and we add `T5 | T6` to it, that flattens all the way out to `(T1 & T2 & T5) | (T1 &
// T2 & T6) | (T3 & T4 & T5) ...` -- you get the idea.
union
.elements(self.db)
.iter()
.map(|elem| self.clone().add_positive(*elem))
.fold(IntersectionBuilder::empty(self.db), |mut builder, sub| {
builder.intersections.extend(sub.intersections);
builder
})
} else {
// If we are already a union-of-intersections, distribute the new intersected element
// across all of those intersections.
for inner in &mut self.intersections {
inner.add_positive(self.db, ty);
}
self
}
}
pub(crate) fn add_negative(mut self, ty: Type<'db>) -> Self {
// See comments above in `add_positive`; this is just the negated version.
if let Type::Union(union) = ty {
union
.elements(self.db)
.iter()
.map(|elem| self.clone().add_negative(*elem))
.fold(IntersectionBuilder::empty(self.db), |mut builder, sub| {
builder.intersections.extend(sub.intersections);
builder
})
} else {
for inner in &mut self.intersections {
inner.add_negative(self.db, ty);
}
self
}
}
pub(crate) fn build(mut self) -> Type<'db> {
// Avoid allocating the UnionBuilder unnecessarily if we have just one intersection:
if self.intersections.len() == 1 {
self.intersections.pop().unwrap().build(self.db)
} else {
let mut builder = UnionBuilder::new(self.db);
for inner in self.intersections {
builder = builder.add(inner.build(self.db));
}
builder.build()
}
}
}
#[derive(Debug, Clone, Default)]
struct InnerIntersectionBuilder<'db> {
positive: FxOrderSet<Type<'db>>,
negative: FxOrderSet<Type<'db>>,
}
impl<'db> InnerIntersectionBuilder<'db> {
fn new() -> Self {
Self::default()
}
/// Adds a positive type to this intersection.
fn add_positive(&mut self, db: &'db dyn Db, ty: Type<'db>) {
match ty {
Type::Intersection(inter) => {
let pos = inter.positive(db);
let neg = inter.negative(db);
self.positive.extend(pos.difference(&self.negative));
self.negative.extend(neg.difference(&self.positive));
self.positive.retain(|elem| !neg.contains(elem));
self.negative.retain(|elem| !pos.contains(elem));
}
_ => {
if !self.negative.remove(&ty) {
self.positive.insert(ty);
};
}
}
}
/// Adds a negative type to this intersection.
fn add_negative(&mut self, db: &'db dyn Db, ty: Type<'db>) {
// TODO Any/Unknown actually should not self-cancel
match ty {
Type::Intersection(intersection) => {
let pos = intersection.negative(db);
let neg = intersection.positive(db);
self.positive.extend(pos.difference(&self.negative));
self.negative.extend(neg.difference(&self.positive));
self.positive.retain(|elem| !neg.contains(elem));
self.negative.retain(|elem| !pos.contains(elem));
}
Type::Never => {}
Type::Unbound => {}
_ => {
if !self.positive.remove(&ty) {
self.negative.insert(ty);
};
}
}
}
fn simplify(&mut self) {
// TODO this should be generalized based on subtyping, for now we just handle a few cases
// Never is a subtype of all types
if self.positive.contains(&Type::Never) {
self.positive.retain(Type::is_never);
self.negative.clear();
}
if self.positive.contains(&Type::Unbound) {
self.positive.retain(Type::is_unbound);
self.negative.clear();
}
// None intersects only with object
for pos in &self.positive {
if let Type::Instance(_) = pos {
// could be `object` type
} else {
self.negative.remove(&Type::None);
break;
}
}
}
fn build(mut self, db: &'db dyn Db) -> Type<'db> {
self.simplify();
match (self.positive.len(), self.negative.len()) {
(0, 0) => Type::Never,
(1, 0) => self.positive[0],
_ => {
self.positive.shrink_to_fit();
self.negative.shrink_to_fit();
Type::Intersection(IntersectionType::new(db, self.positive, self.negative))
}
}
}
}
#[cfg(test)]
mod tests {
use super::{IntersectionBuilder, IntersectionType, Type, UnionBuilder, UnionType};
use crate::db::tests::TestDb;
fn setup_db() -> TestDb {
TestDb::new()
}
impl<'db> UnionType<'db> {
fn elements_vec(self, db: &'db TestDb) -> Vec<Type<'db>> {
self.elements(db).into_iter().collect()
}
}
#[test]
fn build_union() {
let db = setup_db();
let t0 = Type::IntLiteral(0);
let t1 = Type::IntLiteral(1);
let Type::Union(union) = UnionBuilder::new(&db).add(t0).add(t1).build() else {
panic!("expected a union");
};
assert_eq!(union.elements_vec(&db), &[t0, t1]);
}
#[test]
fn build_union_single() {
let db = setup_db();
let t0 = Type::IntLiteral(0);
let ty = UnionBuilder::new(&db).add(t0).build();
assert_eq!(ty, t0);
}
#[test]
fn build_union_empty() {
let db = setup_db();
let ty = UnionBuilder::new(&db).build();
assert_eq!(ty, Type::Never);
}
#[test]
fn build_union_never() {
let db = setup_db();
let t0 = Type::IntLiteral(0);
let ty = UnionBuilder::new(&db).add(t0).add(Type::Never).build();
assert_eq!(ty, t0);
}
#[test]
fn build_union_flatten() {
let db = setup_db();
let t0 = Type::IntLiteral(0);
let t1 = Type::IntLiteral(1);
let t2 = Type::IntLiteral(2);
let u1 = UnionBuilder::new(&db).add(t0).add(t1).build();
let Type::Union(union) = UnionBuilder::new(&db).add(u1).add(t2).build() else {
panic!("expected a union");
};
assert_eq!(union.elements_vec(&db), &[t0, t1, t2]);
}
impl<'db> IntersectionType<'db> {
fn pos_vec(self, db: &'db TestDb) -> Vec<Type<'db>> {
self.positive(db).into_iter().collect()
}
fn neg_vec(self, db: &'db TestDb) -> Vec<Type<'db>> {
self.negative(db).into_iter().collect()
}
}
#[test]
fn build_intersection() {
let db = setup_db();
let t0 = Type::IntLiteral(0);
let ta = Type::Any;
let Type::Intersection(inter) = IntersectionBuilder::new(&db)
.add_positive(ta)
.add_negative(t0)
.build()
else {
panic!("expected to be an intersection");
};
assert_eq!(inter.pos_vec(&db), &[ta]);
assert_eq!(inter.neg_vec(&db), &[t0]);
}
#[test]
fn build_intersection_flatten_positive() {
let db = setup_db();
let ta = Type::Any;
let t1 = Type::IntLiteral(1);
let t2 = Type::IntLiteral(2);
let i0 = IntersectionBuilder::new(&db)
.add_positive(ta)
.add_negative(t1)
.build();
let Type::Intersection(inter) = IntersectionBuilder::new(&db)
.add_positive(t2)
.add_positive(i0)
.build()
else {
panic!("expected to be an intersection");
};
assert_eq!(inter.pos_vec(&db), &[t2, ta]);
assert_eq!(inter.neg_vec(&db), &[t1]);
}
#[test]
fn build_intersection_flatten_negative() {
let db = setup_db();
let ta = Type::Any;
let t1 = Type::IntLiteral(1);
let t2 = Type::IntLiteral(2);
let i0 = IntersectionBuilder::new(&db)
.add_positive(ta)
.add_negative(t1)
.build();
let Type::Intersection(inter) = IntersectionBuilder::new(&db)
.add_positive(t2)
.add_negative(i0)
.build()
else {
panic!("expected to be an intersection");
};
assert_eq!(inter.pos_vec(&db), &[t2, t1]);
assert_eq!(inter.neg_vec(&db), &[ta]);
}
#[test]
fn intersection_distributes_over_union() {
let db = setup_db();
let t0 = Type::IntLiteral(0);
let t1 = Type::IntLiteral(1);
let ta = Type::Any;
let u0 = UnionBuilder::new(&db).add(t0).add(t1).build();
let Type::Union(union) = IntersectionBuilder::new(&db)
.add_positive(ta)
.add_positive(u0)
.build()
else {
panic!("expected a union");
};
let [Type::Intersection(i0), Type::Intersection(i1)] = union.elements_vec(&db)[..] else {
panic!("expected a union of two intersections");
};
assert_eq!(i0.pos_vec(&db), &[ta, t0]);
assert_eq!(i1.pos_vec(&db), &[ta, t1]);
}
#[test]
fn build_intersection_self_negation() {
let db = setup_db();
let ty = IntersectionBuilder::new(&db)
.add_positive(Type::None)
.add_negative(Type::None)
.build();
assert_eq!(ty, Type::Never);
}
#[test]
fn build_intersection_simplify_negative_never() {
let db = setup_db();
let ty = IntersectionBuilder::new(&db)
.add_positive(Type::None)
.add_negative(Type::Never)
.build();
assert_eq!(ty, Type::None);
}
#[test]
fn build_intersection_simplify_positive_never() {
let db = setup_db();
let ty = IntersectionBuilder::new(&db)
.add_positive(Type::None)
.add_positive(Type::Never)
.build();
assert_eq!(ty, Type::Never);
}
#[test]
fn build_intersection_simplify_positive_unbound() {
let db = setup_db();
let ty = IntersectionBuilder::new(&db)
.add_positive(Type::Unbound)
.add_positive(Type::IntLiteral(1))
.build();
assert_eq!(ty, Type::Unbound);
}
#[test]
fn build_intersection_simplify_negative_unbound() {
let db = setup_db();
let ty = IntersectionBuilder::new(&db)
.add_negative(Type::Unbound)
.add_positive(Type::IntLiteral(1))
.build();
assert_eq!(ty, Type::IntLiteral(1));
}
#[test]
fn build_intersection_simplify_negative_none() {
let db = setup_db();
let ty = IntersectionBuilder::new(&db)
.add_negative(Type::None)
.add_positive(Type::IntLiteral(1))
.build();
assert_eq!(ty, Type::IntLiteral(1));
}
}

View File

@@ -1,111 +0,0 @@
use ruff_db::files::File;
use ruff_text_size::{Ranged, TextRange};
use std::fmt::Formatter;
use std::ops::Deref;
use std::sync::Arc;
#[derive(Debug, Eq, PartialEq)]
pub struct TypeCheckDiagnostic {
// TODO: Don't use string keys for rules
pub(super) rule: String,
pub(super) message: String,
pub(super) range: TextRange,
pub(super) file: File,
}
impl TypeCheckDiagnostic {
pub fn rule(&self) -> &str {
&self.rule
}
pub fn message(&self) -> &str {
&self.message
}
pub fn file(&self) -> File {
self.file
}
}
impl Ranged for TypeCheckDiagnostic {
fn range(&self) -> TextRange {
self.range
}
}
/// A collection of type check diagnostics.
///
/// The diagnostics are wrapped in an `Arc` because they need to be cloned multiple times
/// when going from `infer_expression` to `check_file`. We could consider
/// making [`TypeCheckDiagnostic`] a Salsa struct to have them Arena-allocated (once the Tables refactor is done).
/// Using Salsa struct does have the downside that it leaks the Salsa dependency into diagnostics and
/// each Salsa-struct comes with an overhead.
#[derive(Default, Eq, PartialEq)]
pub struct TypeCheckDiagnostics {
inner: Vec<std::sync::Arc<TypeCheckDiagnostic>>,
}
impl TypeCheckDiagnostics {
pub fn new() -> Self {
Self { inner: Vec::new() }
}
pub(super) fn push(&mut self, diagnostic: TypeCheckDiagnostic) {
self.inner.push(Arc::new(diagnostic));
}
pub(crate) fn shrink_to_fit(&mut self) {
self.inner.shrink_to_fit();
}
}
impl Extend<TypeCheckDiagnostic> for TypeCheckDiagnostics {
fn extend<T: IntoIterator<Item = TypeCheckDiagnostic>>(&mut self, iter: T) {
self.inner.extend(iter.into_iter().map(std::sync::Arc::new));
}
}
impl Extend<std::sync::Arc<TypeCheckDiagnostic>> for TypeCheckDiagnostics {
fn extend<T: IntoIterator<Item = Arc<TypeCheckDiagnostic>>>(&mut self, iter: T) {
self.inner.extend(iter);
}
}
impl<'a> Extend<&'a std::sync::Arc<TypeCheckDiagnostic>> for TypeCheckDiagnostics {
fn extend<T: IntoIterator<Item = &'a Arc<TypeCheckDiagnostic>>>(&mut self, iter: T) {
self.inner
.extend(iter.into_iter().map(std::sync::Arc::clone));
}
}
impl std::fmt::Debug for TypeCheckDiagnostics {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.inner.fmt(f)
}
}
impl Deref for TypeCheckDiagnostics {
type Target = [std::sync::Arc<TypeCheckDiagnostic>];
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl IntoIterator for TypeCheckDiagnostics {
type Item = Arc<TypeCheckDiagnostic>;
type IntoIter = std::vec::IntoIter<std::sync::Arc<TypeCheckDiagnostic>>;
fn into_iter(self) -> Self::IntoIter {
self.inner.into_iter()
}
}
impl<'a> IntoIterator for &'a TypeCheckDiagnostics {
type Item = &'a Arc<TypeCheckDiagnostic>;
type IntoIter = std::slice::Iter<'a, std::sync::Arc<TypeCheckDiagnostic>>;
fn into_iter(self) -> Self::IntoIter {
self.inner.iter()
}
}

View File

@@ -1,152 +0,0 @@
//! Display implementations for types.
use std::fmt::{Display, Formatter};
use crate::types::{IntersectionType, Type, UnionType};
use crate::Db;
impl<'db> Type<'db> {
pub fn display(&'db self, db: &'db dyn Db) -> DisplayType<'db> {
DisplayType { ty: self, db }
}
}
#[derive(Copy, Clone)]
pub struct DisplayType<'db> {
ty: &'db Type<'db>,
db: &'db dyn Db,
}
impl Display for DisplayType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self.ty {
Type::Any => f.write_str("Any"),
Type::Never => f.write_str("Never"),
Type::Unknown => f.write_str("Unknown"),
Type::Unbound => f.write_str("Unbound"),
Type::None => f.write_str("None"),
Type::Module(file) => {
write!(f, "<module '{:?}'>", file.path(self.db))
}
// TODO functions and classes should display using a fully qualified name
Type::Class(class) => write!(f, "Literal[{}]", class.name(self.db)),
Type::Instance(class) => f.write_str(&class.name(self.db)),
Type::Function(function) => write!(f, "Literal[{}]", function.name(self.db)),
Type::Union(union) => union.display(self.db).fmt(f),
Type::Intersection(intersection) => intersection.display(self.db).fmt(f),
Type::IntLiteral(n) => write!(f, "Literal[{n}]"),
Type::BooleanLiteral(boolean) => {
write!(f, "Literal[{}]", if *boolean { "True" } else { "False" })
}
}
}
}
impl std::fmt::Debug for DisplayType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl<'db> UnionType<'db> {
fn display(&'db self, db: &'db dyn Db) -> DisplayUnionType<'db> {
DisplayUnionType { db, ty: self }
}
}
struct DisplayUnionType<'db> {
ty: &'db UnionType<'db>,
db: &'db dyn Db,
}
impl Display for DisplayUnionType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let union = self.ty;
let (int_literals, other_types): (Vec<Type>, Vec<Type>) = union
.elements(self.db)
.iter()
.copied()
.partition(|ty| matches!(ty, Type::IntLiteral(_)));
let mut first = true;
if !int_literals.is_empty() {
f.write_str("Literal[")?;
let mut nums: Vec<_> = int_literals
.into_iter()
.filter_map(|ty| {
if let Type::IntLiteral(n) = ty {
Some(n)
} else {
None
}
})
.collect();
nums.sort_unstable();
for num in nums {
if !first {
f.write_str(", ")?;
}
write!(f, "{num}")?;
first = false;
}
f.write_str("]")?;
}
for ty in other_types {
if !first {
f.write_str(" | ")?;
};
first = false;
write!(f, "{}", ty.display(self.db))?;
}
Ok(())
}
}
impl std::fmt::Debug for DisplayUnionType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl<'db> IntersectionType<'db> {
fn display(&'db self, db: &'db dyn Db) -> DisplayIntersectionType<'db> {
DisplayIntersectionType { db, ty: self }
}
}
struct DisplayIntersectionType<'db> {
ty: &'db IntersectionType<'db>,
db: &'db dyn Db,
}
impl Display for DisplayIntersectionType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for (neg, ty) in self
.ty
.positive(self.db)
.iter()
.map(|ty| (false, ty))
.chain(self.ty.negative(self.db).iter().map(|ty| (true, ty)))
{
if !first {
f.write_str(" & ")?;
};
first = false;
if neg {
f.write_str("~")?;
};
write!(f, "{}", ty.display(self.db))?;
}
Ok(())
}
}
impl std::fmt::Debug for DisplayIntersectionType<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,115 +0,0 @@
use crate::semantic_index::ast_ids::HasScopedAstId;
use crate::semantic_index::definition::Definition;
use crate::semantic_index::expression::Expression;
use crate::semantic_index::symbol::{ScopeId, ScopedSymbolId, SymbolTable};
use crate::semantic_index::symbol_table;
use crate::types::{infer_expression_types, IntersectionBuilder, Type, TypeInference};
use crate::Db;
use ruff_python_ast as ast;
use rustc_hash::FxHashMap;
use std::sync::Arc;
/// Return the type constraint that `test` (if true) would place on `definition`, if any.
///
/// For example, if we have this code:
///
/// ```python
/// y = 1 if flag else None
/// x = 1 if flag else None
/// if x is not None:
/// ...
/// ```
///
/// The `test` expression `x is not None` places the constraint "not None" on the definition of
/// `x`, so in that case we'd return `Some(Type::Intersection(negative=[Type::None]))`.
///
/// But if we called this with the same `test` expression, but the `definition` of `y`, no
/// constraint is applied to that definition, so we'd just return `None`.
pub(crate) fn narrowing_constraint<'db>(
db: &'db dyn Db,
test: Expression<'db>,
definition: Definition<'db>,
) -> Option<Type<'db>> {
all_narrowing_constraints(db, test)
.get(&definition.symbol(db))
.copied()
}
#[salsa::tracked(return_ref)]
fn all_narrowing_constraints<'db>(
db: &'db dyn Db,
test: Expression<'db>,
) -> NarrowingConstraints<'db> {
NarrowingConstraintsBuilder::new(db, test).finish()
}
type NarrowingConstraints<'db> = FxHashMap<ScopedSymbolId, Type<'db>>;
struct NarrowingConstraintsBuilder<'db> {
db: &'db dyn Db,
expression: Expression<'db>,
constraints: NarrowingConstraints<'db>,
}
impl<'db> NarrowingConstraintsBuilder<'db> {
fn new(db: &'db dyn Db, expression: Expression<'db>) -> Self {
Self {
db,
expression,
constraints: NarrowingConstraints::default(),
}
}
fn finish(mut self) -> NarrowingConstraints<'db> {
if let ast::Expr::Compare(expr_compare) = self.expression.node_ref(self.db).node() {
self.add_expr_compare(expr_compare);
}
// TODO other test expression kinds
self.constraints.shrink_to_fit();
self.constraints
}
fn symbols(&self) -> Arc<SymbolTable> {
symbol_table(self.db, self.scope())
}
fn scope(&self) -> ScopeId<'db> {
self.expression.scope(self.db)
}
fn inference(&self) -> &'db TypeInference<'db> {
infer_expression_types(self.db, self.expression)
}
fn add_expr_compare(&mut self, expr_compare: &ast::ExprCompare) {
let ast::ExprCompare {
range: _,
left,
ops,
comparators,
} = expr_compare;
if let ast::Expr::Name(ast::ExprName {
range: _,
id,
ctx: _,
}) = left.as_ref()
{
// SAFETY: we should always have a symbol for every Name node.
let symbol = self.symbols().symbol_id_by_name(id).unwrap();
let scope = self.scope();
let inference = self.inference();
for (op, comparator) in std::iter::zip(&**ops, &**comparators) {
let comp_ty = inference.expression_ty(comparator.scoped_ast_id(self.db, scope));
if matches!(op, ast::CmpOp::IsNot) {
let ty = IntersectionBuilder::new(self.db)
.add_negative(comp_ty)
.build();
self.constraints.insert(symbol, ty);
};
// TODO other comparison types
}
}
}
}

View File

@@ -1,237 +0,0 @@
The "typeshed" project is licensed under the terms of the Apache license, as
reproduced below.
= = = = =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= = = = =
Parts of typeshed are licensed under different licenses (like the MIT
license), reproduced below.
= = = = =
The MIT License
Copyright (c) 2015 Jukka Lehtosalo and contributors
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
= = = = =

View File

@@ -1,124 +0,0 @@
# typeshed
[![Tests](https://github.com/python/typeshed/actions/workflows/tests.yml/badge.svg)](https://github.com/python/typeshed/actions/workflows/tests.yml)
[![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Pull Requests Welcome](https://img.shields.io/badge/pull%20requests-welcome-brightgreen.svg)](https://github.com/python/typeshed/blob/main/CONTRIBUTING.md)
## About
Typeshed contains external type annotations for the Python standard library
and Python builtins, as well as third party packages as contributed by
people external to those projects.
This data can e.g. be used for static analysis, type checking, type inference,
and autocompletion.
For information on how to use typeshed, read below. Information for
contributors can be found in [CONTRIBUTING.md](CONTRIBUTING.md). **Please read
it before submitting pull requests; do not report issues with annotations to
the project the stubs are for, but instead report them here to typeshed.**
Further documentation on stub files, typeshed, and Python's typing system in
general, can also be found at https://typing.readthedocs.io/en/latest/.
Typeshed supports Python versions 3.8 and up.
## Using
If you're just using a type checker ([mypy](https://github.com/python/mypy/),
[pyright](https://github.com/microsoft/pyright),
[pytype](https://github.com/google/pytype/), PyCharm, ...), as opposed to
developing it, you don't need to interact with the typeshed repo at
all: a copy of standard library part of typeshed is bundled with type checkers.
And type stubs for third party packages and modules you are using can
be installed from PyPI. For example, if you are using `html5lib` and `requests`,
you can install the type stubs using
```bash
$ pip install types-html5lib types-requests
```
These PyPI packages follow [PEP 561](http://www.python.org/dev/peps/pep-0561/)
and are automatically released (up to once a day) by
[typeshed internal machinery](https://github.com/typeshed-internal/stub_uploader).
Type checkers should be able to use these stub packages when installed. For more
details, see the documentation for your type checker.
### Package versioning for third-party stubs
Version numbers of third-party stub packages consist of at least four parts.
All parts of the stub version, except for the last part, correspond to the
version of the runtime package being stubbed. For example, if the `types-foo`
package has version `1.2.0.20240309`, this guarantees that the `types-foo` package
contains stubs targeted against `foo==1.2.*` and tested against the latest
version of `foo` matching that specifier. In this example, the final element
of the version number (20240309) indicates that the stub package was pushed on
March 9, 2024.
At typeshed, we try to keep breaking changes to a minimum. However, due to the
nature of stubs, any version bump can introduce changes that might make your
code fail to type check.
There are several strategies available for specifying the version of a stubs
package you're using, each with its own tradeoffs:
1. Use the same bounds that you use for the package being stubbed. For example,
if you use `requests>=2.30.0,<2.32`, you can use
`types-requests>=2.30.0,<2.32`. This ensures that the stubs are compatible
with the package you are using, but it carries a small risk of breaking
type checking due to changes in the stubs.
Another risk of this strategy is that stubs often lag behind
the package being stubbed. You might want to force the package being stubbed
to a certain minimum version because it fixes a critical bug, but if
correspondingly updated stubs have not been released, your type
checking results may not be fully accurate.
2. Pin the stubs to a known good version and update the pin from time to time
(either manually, or using a tool such as dependabot or renovate).
For example, if you use `types-requests==2.31.0.1`, you can have confidence
that upgrading dependencies will not break type checking. However, you will
miss out on improvements in the stubs that could potentially improve type
checking until you update the pin. This strategy also has the risk that the
stubs you are using might become incompatible with the package being stubbed.
3. Don't pin the stubs. This is the option that demands the least work from
you when it comes to updating version pins, and has the advantage that you
will automatically benefit from improved stubs whenever a new version of the
stubs package is released. However, it carries the risk that the stubs
become incompatible with the package being stubbed.
For example, if a new major version of the package is released, there's a
chance the stubs might be updated to reflect the new version of the runtime
package before you update the package being stubbed.
You can also switch between the different strategies as needed. For example,
you could default to strategy (1), but fall back to strategy (2) when
a problem arises that can't easily be fixed.
### The `_typeshed` package
typeshed includes a package `_typeshed` as part of the standard library.
This package and its submodules contain utility types, but are not
available at runtime. For more information about how to use this package,
[see the `stdlib/_typeshed` directory](https://github.com/python/typeshed/tree/main/stdlib/_typeshed).
## Discussion
If you've run into behavior in the type checker that suggests the type
stubs for a given library are incorrect or incomplete,
we want to hear from you!
Our main forum for discussion is the project's [GitHub issue
tracker](https://github.com/python/typeshed/issues). This is the right
place to start a discussion of any of the above or most any other
topic concerning the project.
If you have general questions about typing with Python, or you need
a review of your type annotations or stubs outside of typeshed, head over to
[our discussion forum](https://github.com/python/typing/discussions).
For less formal discussion, try the typing chat room on
[gitter.im](https://gitter.im/python/typing). Some typeshed maintainers
are almost always present; feel free to find us there and we're happy
to chat. Substantive technical discussion will be directed to the
issue tracker.

View File

@@ -1 +0,0 @@
1ace5718deaf3041f8e3d1dc9c9e8a8e830e517f

View File

@@ -1,315 +0,0 @@
# The structure of this file is as follows:
# - Blank lines and comments starting with `#` are ignored.
# - Lines contain the name of a module, followed by a colon,
# a space, and a version range (for example: `symbol: 3.0-3.9`).
#
# Version ranges may be of the form "X.Y-A.B" or "X.Y-". The
# first form means that a module was introduced in version X.Y and last
# available in version A.B. The second form means that the module was
# introduced in version X.Y and is still available in the latest
# version of Python.
#
# If a submodule is not listed separately, it has the same lifetime as
# its parent module.
#
# Python versions before 3.0 are ignored, so any module that was already
# present in 3.0 will have "3.0" as its minimum version. Version ranges
# for unsupported versions of Python 3 are generally accurate but we do
# not guarantee their correctness.
__future__: 3.0-
__main__: 3.0-
_ast: 3.0-
_bisect: 3.0-
_bootlocale: 3.4-3.9
_codecs: 3.0-
_collections_abc: 3.3-
_compat_pickle: 3.1-
_compression: 3.5-
_csv: 3.0-
_ctypes: 3.0-
_curses: 3.0-
_decimal: 3.3-
_dummy_thread: 3.0-3.8
_dummy_threading: 3.0-3.8
_heapq: 3.0-
_imp: 3.0-
_interpchannels: 3.13-
_interpqueues: 3.13-
_interpreters: 3.13-
_json: 3.0-
_locale: 3.0-
_lsprof: 3.0-
_markupbase: 3.0-
_msi: 3.0-
_operator: 3.4-
_osx_support: 3.0-
_posixsubprocess: 3.2-
_py_abc: 3.7-
_pydecimal: 3.5-
_random: 3.0-
_sitebuiltins: 3.4-
_socket: 3.0- # present in 3.0 at runtime, but not in typeshed
_stat: 3.4-
_thread: 3.0-
_threading_local: 3.0-
_tkinter: 3.0-
_tracemalloc: 3.4-
_typeshed: 3.0- # not present at runtime, only for type checking
_warnings: 3.0-
_weakref: 3.0-
_weakrefset: 3.0-
_winapi: 3.3-
abc: 3.0-
aifc: 3.0-3.12
antigravity: 3.0-
argparse: 3.0-
array: 3.0-
ast: 3.0-
asynchat: 3.0-3.11
asyncio: 3.4-
asyncio.exceptions: 3.8-
asyncio.format_helpers: 3.7-
asyncio.mixins: 3.10-
asyncio.runners: 3.7-
asyncio.staggered: 3.8-
asyncio.taskgroups: 3.11-
asyncio.threads: 3.9-
asyncio.timeouts: 3.11-
asyncio.trsock: 3.8-
asyncore: 3.0-3.11
atexit: 3.0-
audioop: 3.0-3.12
base64: 3.0-
bdb: 3.0-
binascii: 3.0-
binhex: 3.0-3.10
bisect: 3.0-
builtins: 3.0-
bz2: 3.0-
cProfile: 3.0-
calendar: 3.0-
cgi: 3.0-3.12
cgitb: 3.0-3.12
chunk: 3.0-3.12
cmath: 3.0-
cmd: 3.0-
code: 3.0-
codecs: 3.0-
codeop: 3.0-
collections: 3.0-
collections.abc: 3.3-
colorsys: 3.0-
compileall: 3.0-
concurrent: 3.2-
configparser: 3.0-
contextlib: 3.0-
contextvars: 3.7-
copy: 3.0-
copyreg: 3.0-
crypt: 3.0-3.12
csv: 3.0-
ctypes: 3.0-
curses: 3.0-
dataclasses: 3.7-
datetime: 3.0-
dbm: 3.0-
dbm.sqlite3: 3.13-
decimal: 3.0-
difflib: 3.0-
dis: 3.0-
distutils: 3.0-3.11
distutils.command.bdist_msi: 3.0-3.10
distutils.command.bdist_wininst: 3.0-3.9
doctest: 3.0-
dummy_threading: 3.0-3.8
email: 3.0-
encodings: 3.0-
ensurepip: 3.0-
enum: 3.4-
errno: 3.0-
faulthandler: 3.3-
fcntl: 3.0-
filecmp: 3.0-
fileinput: 3.0-
fnmatch: 3.0-
formatter: 3.0-3.9
fractions: 3.0-
ftplib: 3.0-
functools: 3.0-
gc: 3.0-
genericpath: 3.0-
getopt: 3.0-
getpass: 3.0-
gettext: 3.0-
glob: 3.0-
graphlib: 3.9-
grp: 3.0-
gzip: 3.0-
hashlib: 3.0-
heapq: 3.0-
hmac: 3.0-
html: 3.0-
http: 3.0-
imaplib: 3.0-
imghdr: 3.0-3.12
imp: 3.0-3.11
importlib: 3.0-
importlib._abc: 3.10-
importlib.metadata: 3.8-
importlib.metadata._meta: 3.10-
importlib.metadata.diagnose: 3.13-
importlib.readers: 3.10-
importlib.resources: 3.7-
importlib.resources.abc: 3.11-
importlib.resources.readers: 3.11-
importlib.resources.simple: 3.11-
importlib.simple: 3.11-
inspect: 3.0-
io: 3.0-
ipaddress: 3.3-
itertools: 3.0-
json: 3.0-
keyword: 3.0-
lib2to3: 3.0-3.12
linecache: 3.0-
locale: 3.0-
logging: 3.0-
lzma: 3.3-
mailbox: 3.0-
mailcap: 3.0-3.12
marshal: 3.0-
math: 3.0-
mimetypes: 3.0-
mmap: 3.0-
modulefinder: 3.0-
msilib: 3.0-3.12
msvcrt: 3.0-
multiprocessing: 3.0-
multiprocessing.resource_tracker: 3.8-
multiprocessing.shared_memory: 3.8-
netrc: 3.0-
nis: 3.0-3.12
nntplib: 3.0-3.12
nt: 3.0-
ntpath: 3.0-
nturl2path: 3.0-
numbers: 3.0-
opcode: 3.0-
operator: 3.0-
optparse: 3.0-
os: 3.0-
ossaudiodev: 3.0-3.12
parser: 3.0-3.9
pathlib: 3.4-
pdb: 3.0-
pickle: 3.0-
pickletools: 3.0-
pipes: 3.0-3.12
pkgutil: 3.0-
platform: 3.0-
plistlib: 3.0-
poplib: 3.0-
posix: 3.0-
posixpath: 3.0-
pprint: 3.0-
profile: 3.0-
pstats: 3.0-
pty: 3.0-
pwd: 3.0-
py_compile: 3.0-
pyclbr: 3.0-
pydoc: 3.0-
pydoc_data: 3.0-
pyexpat: 3.0-
queue: 3.0-
quopri: 3.0-
random: 3.0-
re: 3.0-
readline: 3.0-
reprlib: 3.0-
resource: 3.0-
rlcompleter: 3.0-
runpy: 3.0-
sched: 3.0-
secrets: 3.6-
select: 3.0-
selectors: 3.4-
shelve: 3.0-
shlex: 3.0-
shutil: 3.0-
signal: 3.0-
site: 3.0-
smtpd: 3.0-3.11
smtplib: 3.0-
sndhdr: 3.0-3.12
socket: 3.0-
socketserver: 3.0-
spwd: 3.0-3.12
sqlite3: 3.0-
sre_compile: 3.0-
sre_constants: 3.0-
sre_parse: 3.0-
ssl: 3.0-
stat: 3.0-
statistics: 3.4-
string: 3.0-
stringprep: 3.0-
struct: 3.0-
subprocess: 3.0-
sunau: 3.0-3.12
symbol: 3.0-3.9
symtable: 3.0-
sys: 3.0-
sys._monitoring: 3.12- # Doesn't actually exist. See comments in the stub.
sysconfig: 3.0-
syslog: 3.0-
tabnanny: 3.0-
tarfile: 3.0-
telnetlib: 3.0-3.12
tempfile: 3.0-
termios: 3.0-
textwrap: 3.0-
this: 3.0-
threading: 3.0-
time: 3.0-
timeit: 3.0-
tkinter: 3.0-
tkinter.tix: 3.0-3.12
token: 3.0-
tokenize: 3.0-
tomllib: 3.11-
trace: 3.0-
traceback: 3.0-
tracemalloc: 3.4-
tty: 3.0-
turtle: 3.0-
types: 3.0-
typing: 3.5-
typing_extensions: 3.0-
unicodedata: 3.0-
unittest: 3.0-
unittest._log: 3.9-
unittest.async_case: 3.8-
urllib: 3.0-
uu: 3.0-3.12
uuid: 3.0-
venv: 3.3-
warnings: 3.0-
wave: 3.0-
weakref: 3.0-
webbrowser: 3.0-
winreg: 3.0-
winsound: 3.0-
wsgiref: 3.0-
wsgiref.types: 3.11-
xdrlib: 3.0-3.12
xml: 3.0-
xmlrpc: 3.0-
xxlimited: 3.2-
zipapp: 3.5-
zipfile: 3.0-
zipfile._path: 3.12-
zipimport: 3.0-
zlib: 3.0-
zoneinfo: 3.9-

View File

@@ -1,36 +0,0 @@
from typing_extensions import TypeAlias
_VersionInfo: TypeAlias = tuple[int, int, int, str, int]
class _Feature:
def __init__(self, optionalRelease: _VersionInfo, mandatoryRelease: _VersionInfo | None, compiler_flag: int) -> None: ...
def getOptionalRelease(self) -> _VersionInfo: ...
def getMandatoryRelease(self) -> _VersionInfo | None: ...
compiler_flag: int
absolute_import: _Feature
division: _Feature
generators: _Feature
nested_scopes: _Feature
print_function: _Feature
unicode_literals: _Feature
with_statement: _Feature
barry_as_FLUFL: _Feature
generator_stop: _Feature
annotations: _Feature
all_feature_names: list[str] # undocumented
__all__ = [
"all_feature_names",
"absolute_import",
"division",
"generators",
"nested_scopes",
"print_function",
"unicode_literals",
"with_statement",
"barry_as_FLUFL",
"generator_stop",
"annotations",
]

View File

@@ -1,3 +0,0 @@
from typing import Any
def __getattr__(name: str) -> Any: ...

File diff suppressed because it is too large Load Diff

View File

@@ -1,84 +0,0 @@
import sys
from _typeshed import SupportsLenAndGetItem, SupportsRichComparisonT
from collections.abc import Callable, MutableSequence
from typing import TypeVar, overload
_T = TypeVar("_T")
if sys.version_info >= (3, 10):
@overload
def bisect_left(
a: SupportsLenAndGetItem[SupportsRichComparisonT],
x: SupportsRichComparisonT,
lo: int = 0,
hi: int | None = None,
*,
key: None = None,
) -> int: ...
@overload
def bisect_left(
a: SupportsLenAndGetItem[_T],
x: SupportsRichComparisonT,
lo: int = 0,
hi: int | None = None,
*,
key: Callable[[_T], SupportsRichComparisonT],
) -> int: ...
@overload
def bisect_right(
a: SupportsLenAndGetItem[SupportsRichComparisonT],
x: SupportsRichComparisonT,
lo: int = 0,
hi: int | None = None,
*,
key: None = None,
) -> int: ...
@overload
def bisect_right(
a: SupportsLenAndGetItem[_T],
x: SupportsRichComparisonT,
lo: int = 0,
hi: int | None = None,
*,
key: Callable[[_T], SupportsRichComparisonT],
) -> int: ...
@overload
def insort_left(
a: MutableSequence[SupportsRichComparisonT],
x: SupportsRichComparisonT,
lo: int = 0,
hi: int | None = None,
*,
key: None = None,
) -> None: ...
@overload
def insort_left(
a: MutableSequence[_T], x: _T, lo: int = 0, hi: int | None = None, *, key: Callable[[_T], SupportsRichComparisonT]
) -> None: ...
@overload
def insort_right(
a: MutableSequence[SupportsRichComparisonT],
x: SupportsRichComparisonT,
lo: int = 0,
hi: int | None = None,
*,
key: None = None,
) -> None: ...
@overload
def insort_right(
a: MutableSequence[_T], x: _T, lo: int = 0, hi: int | None = None, *, key: Callable[[_T], SupportsRichComparisonT]
) -> None: ...
else:
def bisect_left(
a: SupportsLenAndGetItem[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> int: ...
def bisect_right(
a: SupportsLenAndGetItem[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> int: ...
def insort_left(
a: MutableSequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> None: ...
def insort_right(
a: MutableSequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None
) -> None: ...

View File

@@ -1 +0,0 @@
def getpreferredencoding(do_setlocale: bool = True) -> str: ...

View File

@@ -1,133 +0,0 @@
import codecs
import sys
from _typeshed import ReadableBuffer
from collections.abc import Callable
from typing import Literal, overload
from typing_extensions import TypeAlias
# This type is not exposed; it is defined in unicodeobject.c
class _EncodingMap:
def size(self) -> int: ...
_CharMap: TypeAlias = dict[int, int] | _EncodingMap
_Handler: TypeAlias = Callable[[UnicodeError], tuple[str | bytes, int]]
_SearchFunction: TypeAlias = Callable[[str], codecs.CodecInfo | None]
def register(search_function: _SearchFunction, /) -> None: ...
if sys.version_info >= (3, 10):
def unregister(search_function: _SearchFunction, /) -> None: ...
def register_error(errors: str, handler: _Handler, /) -> None: ...
def lookup_error(name: str, /) -> _Handler: ...
# The type ignore on `encode` and `decode` is to avoid issues with overlapping overloads, for more details, see #300
# https://docs.python.org/3/library/codecs.html#binary-transforms
_BytesToBytesEncoding: TypeAlias = Literal[
"base64",
"base_64",
"base64_codec",
"bz2",
"bz2_codec",
"hex",
"hex_codec",
"quopri",
"quotedprintable",
"quoted_printable",
"quopri_codec",
"uu",
"uu_codec",
"zip",
"zlib",
"zlib_codec",
]
# https://docs.python.org/3/library/codecs.html#text-transforms
_StrToStrEncoding: TypeAlias = Literal["rot13", "rot_13"]
@overload
def encode(obj: ReadableBuffer, encoding: _BytesToBytesEncoding, errors: str = "strict") -> bytes: ...
@overload
def encode(obj: str, encoding: _StrToStrEncoding, errors: str = "strict") -> str: ... # type: ignore[overload-overlap]
@overload
def encode(obj: str, encoding: str = "utf-8", errors: str = "strict") -> bytes: ...
@overload
def decode(obj: ReadableBuffer, encoding: _BytesToBytesEncoding, errors: str = "strict") -> bytes: ... # type: ignore[overload-overlap]
@overload
def decode(obj: str, encoding: _StrToStrEncoding, errors: str = "strict") -> str: ...
# these are documented as text encodings but in practice they also accept str as input
@overload
def decode(
obj: str,
encoding: Literal["unicode_escape", "unicode-escape", "raw_unicode_escape", "raw-unicode-escape"],
errors: str = "strict",
) -> str: ...
# hex is officially documented as a bytes to bytes encoding, but it appears to also work with str
@overload
def decode(obj: str, encoding: Literal["hex", "hex_codec"], errors: str = "strict") -> bytes: ...
@overload
def decode(obj: ReadableBuffer, encoding: str = "utf-8", errors: str = "strict") -> str: ...
def lookup(encoding: str, /) -> codecs.CodecInfo: ...
def charmap_build(map: str, /) -> _CharMap: ...
def ascii_decode(data: ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ...
def ascii_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def charmap_decode(data: ReadableBuffer, errors: str | None = None, mapping: _CharMap | None = None, /) -> tuple[str, int]: ...
def charmap_encode(str: str, errors: str | None = None, mapping: _CharMap | None = None, /) -> tuple[bytes, int]: ...
def escape_decode(data: str | ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ...
def escape_encode(data: bytes, errors: str | None = None, /) -> tuple[bytes, int]: ...
def latin_1_decode(data: ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ...
def latin_1_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
if sys.version_info >= (3, 9):
def raw_unicode_escape_decode(
data: str | ReadableBuffer, errors: str | None = None, final: bool = True, /
) -> tuple[str, int]: ...
else:
def raw_unicode_escape_decode(data: str | ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ...
def raw_unicode_escape_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def readbuffer_encode(data: str | ReadableBuffer, errors: str | None = None, /) -> tuple[bytes, int]: ...
if sys.version_info >= (3, 9):
def unicode_escape_decode(
data: str | ReadableBuffer, errors: str | None = None, final: bool = True, /
) -> tuple[str, int]: ...
else:
def unicode_escape_decode(data: str | ReadableBuffer, errors: str | None = None, /) -> tuple[str, int]: ...
def unicode_escape_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def utf_16_be_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_16_be_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def utf_16_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_16_encode(str: str, errors: str | None = None, byteorder: int = 0, /) -> tuple[bytes, int]: ...
def utf_16_ex_decode(
data: ReadableBuffer, errors: str | None = None, byteorder: int = 0, final: bool = False, /
) -> tuple[str, int, int]: ...
def utf_16_le_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_16_le_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def utf_32_be_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_32_be_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def utf_32_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_32_encode(str: str, errors: str | None = None, byteorder: int = 0, /) -> tuple[bytes, int]: ...
def utf_32_ex_decode(
data: ReadableBuffer, errors: str | None = None, byteorder: int = 0, final: bool = False, /
) -> tuple[str, int, int]: ...
def utf_32_le_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_32_le_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def utf_7_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_7_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def utf_8_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def utf_8_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
if sys.platform == "win32":
def mbcs_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def mbcs_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def code_page_decode(
codepage: int, data: ReadableBuffer, errors: str | None = None, final: bool = False, /
) -> tuple[str, int]: ...
def code_page_encode(code_page: int, str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...
def oem_decode(data: ReadableBuffer, errors: str | None = None, final: bool = False, /) -> tuple[str, int]: ...
def oem_encode(str: str, errors: str | None = None, /) -> tuple[bytes, int]: ...

View File

@@ -1,101 +0,0 @@
import sys
from abc import abstractmethod
from types import MappingProxyType
from typing import ( # noqa: Y022,Y038
AbstractSet as Set,
AsyncGenerator as AsyncGenerator,
AsyncIterable as AsyncIterable,
AsyncIterator as AsyncIterator,
Awaitable as Awaitable,
Callable as Callable,
Collection as Collection,
Container as Container,
Coroutine as Coroutine,
Generator as Generator,
Generic,
Hashable as Hashable,
ItemsView as ItemsView,
Iterable as Iterable,
Iterator as Iterator,
KeysView as KeysView,
Mapping as Mapping,
MappingView as MappingView,
MutableMapping as MutableMapping,
MutableSequence as MutableSequence,
MutableSet as MutableSet,
Protocol,
Reversible as Reversible,
Sequence as Sequence,
Sized as Sized,
TypeVar,
ValuesView as ValuesView,
final,
runtime_checkable,
)
__all__ = [
"Awaitable",
"Coroutine",
"AsyncIterable",
"AsyncIterator",
"AsyncGenerator",
"Hashable",
"Iterable",
"Iterator",
"Generator",
"Reversible",
"Sized",
"Container",
"Callable",
"Collection",
"Set",
"MutableSet",
"Mapping",
"MutableMapping",
"MappingView",
"KeysView",
"ItemsView",
"ValuesView",
"Sequence",
"MutableSequence",
]
if sys.version_info < (3, 14):
from typing import ByteString as ByteString # noqa: Y057
__all__ += ["ByteString"]
if sys.version_info >= (3, 12):
__all__ += ["Buffer"]
_KT_co = TypeVar("_KT_co", covariant=True) # Key type covariant containers.
_VT_co = TypeVar("_VT_co", covariant=True) # Value type covariant containers.
@final
class dict_keys(KeysView[_KT_co], Generic[_KT_co, _VT_co]): # undocumented
def __eq__(self, value: object, /) -> bool: ...
if sys.version_info >= (3, 13):
def isdisjoint(self, other: Iterable[_KT_co], /) -> bool: ...
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
@final
class dict_values(ValuesView[_VT_co], Generic[_KT_co, _VT_co]): # undocumented
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
@final
class dict_items(ItemsView[_KT_co, _VT_co]): # undocumented
def __eq__(self, value: object, /) -> bool: ...
if sys.version_info >= (3, 13):
def isdisjoint(self, other: Iterable[tuple[_KT_co, _VT_co]], /) -> bool: ...
if sys.version_info >= (3, 10):
@property
def mapping(self) -> MappingProxyType[_KT_co, _VT_co]: ...
if sys.version_info >= (3, 12):
@runtime_checkable
class Buffer(Protocol):
@abstractmethod
def __buffer__(self, flags: int, /) -> memoryview: ...

View File

@@ -1,8 +0,0 @@
IMPORT_MAPPING: dict[str, str]
NAME_MAPPING: dict[tuple[str, str], tuple[str, str]]
PYTHON2_EXCEPTIONS: tuple[str, ...]
MULTIPROCESSING_EXCEPTIONS: tuple[str, ...]
REVERSE_IMPORT_MAPPING: dict[str, str]
REVERSE_NAME_MAPPING: dict[tuple[str, str], tuple[str, str]]
PYTHON3_OSERROR_EXCEPTIONS: tuple[str, ...]
PYTHON3_IMPORTERROR_EXCEPTIONS: tuple[str, ...]

View File

@@ -1,25 +0,0 @@
from _typeshed import WriteableBuffer
from collections.abc import Callable
from io import DEFAULT_BUFFER_SIZE, BufferedIOBase, RawIOBase
from typing import Any, Protocol
BUFFER_SIZE = DEFAULT_BUFFER_SIZE
class _Reader(Protocol):
def read(self, n: int, /) -> bytes: ...
def seekable(self) -> bool: ...
def seek(self, n: int, /) -> Any: ...
class BaseStream(BufferedIOBase): ...
class DecompressReader(RawIOBase):
def __init__(
self,
fp: _Reader,
decomp_factory: Callable[..., object],
trailing_error: type[Exception] | tuple[type[Exception], ...] = (),
**decomp_args: Any,
) -> None: ...
def readinto(self, b: WriteableBuffer) -> int: ...
def read(self, size: int = -1) -> bytes: ...
def seek(self, offset: int, whence: int = 0) -> int: ...

View File

@@ -1,90 +0,0 @@
import sys
from _typeshed import SupportsWrite
from collections.abc import Iterable, Iterator
from typing import Any, Final
from typing_extensions import TypeAlias
__version__: Final[str]
QUOTE_ALL: Final = 1
QUOTE_MINIMAL: Final = 0
QUOTE_NONE: Final = 3
QUOTE_NONNUMERIC: Final = 2
if sys.version_info >= (3, 12):
QUOTE_STRINGS: Final = 4
QUOTE_NOTNULL: Final = 5
# Ideally this would be `QUOTE_ALL | QUOTE_MINIMAL | QUOTE_NONE | QUOTE_NONNUMERIC`
# However, using literals in situations like these can cause false-positives (see #7258)
_QuotingType: TypeAlias = int
class Error(Exception): ...
class Dialect:
delimiter: str
quotechar: str | None
escapechar: str | None
doublequote: bool
skipinitialspace: bool
lineterminator: str
quoting: _QuotingType
strict: bool
def __init__(self) -> None: ...
_DialectLike: TypeAlias = str | Dialect | type[Dialect]
class _reader(Iterator[list[str]]):
@property
def dialect(self) -> Dialect: ...
line_num: int
def __next__(self) -> list[str]: ...
class _writer:
@property
def dialect(self) -> Dialect: ...
def writerow(self, row: Iterable[Any]) -> Any: ...
def writerows(self, rows: Iterable[Iterable[Any]]) -> None: ...
def writer(
csvfile: SupportsWrite[str],
dialect: _DialectLike = "excel",
*,
delimiter: str = ",",
quotechar: str | None = '"',
escapechar: str | None = None,
doublequote: bool = True,
skipinitialspace: bool = False,
lineterminator: str = "\r\n",
quoting: _QuotingType = 0,
strict: bool = False,
) -> _writer: ...
def reader(
csvfile: Iterable[str],
dialect: _DialectLike = "excel",
*,
delimiter: str = ",",
quotechar: str | None = '"',
escapechar: str | None = None,
doublequote: bool = True,
skipinitialspace: bool = False,
lineterminator: str = "\r\n",
quoting: _QuotingType = 0,
strict: bool = False,
) -> _reader: ...
def register_dialect(
name: str,
dialect: type[Dialect] = ...,
*,
delimiter: str = ",",
quotechar: str | None = '"',
escapechar: str | None = None,
doublequote: bool = True,
skipinitialspace: bool = False,
lineterminator: str = "\r\n",
quoting: _QuotingType = 0,
strict: bool = False,
) -> None: ...
def unregister_dialect(name: str) -> None: ...
def get_dialect(name: str) -> Dialect: ...
def list_dialects() -> list[str]: ...
def field_size_limit(new_limit: int = ...) -> int: ...

View File

@@ -1,210 +0,0 @@
import sys
from _typeshed import ReadableBuffer, WriteableBuffer
from abc import abstractmethod
from collections.abc import Callable, Iterable, Iterator, Mapping, Sequence
from ctypes import CDLL, ArgumentError as ArgumentError
from typing import Any, ClassVar, Generic, TypeVar, overload
from typing_extensions import Self, TypeAlias
if sys.version_info >= (3, 9):
from types import GenericAlias
_T = TypeVar("_T")
_CT = TypeVar("_CT", bound=_CData)
FUNCFLAG_CDECL: int
FUNCFLAG_PYTHONAPI: int
FUNCFLAG_USE_ERRNO: int
FUNCFLAG_USE_LASTERROR: int
RTLD_GLOBAL: int
RTLD_LOCAL: int
if sys.version_info >= (3, 11):
CTYPES_MAX_ARGCOUNT: int
if sys.version_info >= (3, 12):
SIZEOF_TIME_T: int
if sys.platform == "win32":
# Description, Source, HelpFile, HelpContext, scode
_COMError_Details: TypeAlias = tuple[str | None, str | None, str | None, int | None, int | None]
class COMError(Exception):
hresult: int
text: str | None
details: _COMError_Details
def __init__(self, hresult: int, text: str | None, details: _COMError_Details) -> None: ...
def CopyComPointer(src: _PointerLike, dst: _PointerLike | _CArgObject) -> int: ...
FUNCFLAG_HRESULT: int
FUNCFLAG_STDCALL: int
def FormatError(code: int = ...) -> str: ...
def get_last_error() -> int: ...
def set_last_error(value: int) -> int: ...
def LoadLibrary(name: str, load_flags: int = 0, /) -> int: ...
def FreeLibrary(handle: int, /) -> None: ...
class _CDataMeta(type):
# By default mypy complains about the following two methods, because strictly speaking cls
# might not be a Type[_CT]. However this can never actually happen, because the only class that
# uses _CDataMeta as its metaclass is _CData. So it's safe to ignore the errors here.
def __mul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
def __rmul__(cls: type[_CT], other: int) -> type[Array[_CT]]: ... # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
class _CData(metaclass=_CDataMeta):
_b_base_: int
_b_needsfree_: bool
_objects: Mapping[Any, int] | None
# At runtime the following classmethods are available only on classes, not
# on instances. This can't be reflected properly in the type system:
#
# Structure.from_buffer(...) # valid at runtime
# Structure(...).from_buffer(...) # invalid at runtime
#
@classmethod
def from_buffer(cls, source: WriteableBuffer, offset: int = ...) -> Self: ...
@classmethod
def from_buffer_copy(cls, source: ReadableBuffer, offset: int = ...) -> Self: ...
@classmethod
def from_address(cls, address: int) -> Self: ...
@classmethod
def from_param(cls, value: Any, /) -> Self | _CArgObject: ...
@classmethod
def in_dll(cls, library: CDLL, name: str) -> Self: ...
def __buffer__(self, flags: int, /) -> memoryview: ...
def __release_buffer__(self, buffer: memoryview, /) -> None: ...
class _SimpleCData(_CData, Generic[_T]):
value: _T
# The TypeVar can be unsolved here,
# but we can't use overloads without creating many, many mypy false-positive errors
def __init__(self, value: _T = ...) -> None: ... # pyright: ignore[reportInvalidTypeVarUse]
class _CanCastTo(_CData): ...
class _PointerLike(_CanCastTo): ...
class _Pointer(_PointerLike, _CData, Generic[_CT]):
_type_: type[_CT]
contents: _CT
@overload
def __init__(self) -> None: ...
@overload
def __init__(self, arg: _CT) -> None: ...
@overload
def __getitem__(self, key: int, /) -> Any: ...
@overload
def __getitem__(self, key: slice, /) -> list[Any]: ...
def __setitem__(self, key: int, value: Any, /) -> None: ...
def POINTER(type: type[_CT], /) -> type[_Pointer[_CT]]: ...
def pointer(obj: _CT, /) -> _Pointer[_CT]: ...
class _CArgObject: ...
def byref(obj: _CData, offset: int = ...) -> _CArgObject: ...
_ECT: TypeAlias = Callable[[_CData | None, CFuncPtr, tuple[_CData, ...]], _CData]
_PF: TypeAlias = tuple[int] | tuple[int, str | None] | tuple[int, str | None, Any]
class CFuncPtr(_PointerLike, _CData):
restype: type[_CData] | Callable[[int], Any] | None
argtypes: Sequence[type[_CData]]
errcheck: _ECT
# Abstract attribute that must be defined on subclasses
_flags_: ClassVar[int]
@overload
def __init__(self) -> None: ...
@overload
def __init__(self, address: int, /) -> None: ...
@overload
def __init__(self, callable: Callable[..., Any], /) -> None: ...
@overload
def __init__(self, func_spec: tuple[str | int, CDLL], paramflags: tuple[_PF, ...] | None = ..., /) -> None: ...
if sys.platform == "win32":
@overload
def __init__(
self, vtbl_index: int, name: str, paramflags: tuple[_PF, ...] | None = ..., iid: _CData | None = ..., /
) -> None: ...
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
_GetT = TypeVar("_GetT")
_SetT = TypeVar("_SetT")
class _CField(Generic[_CT, _GetT, _SetT]):
offset: int
size: int
@overload
def __get__(self, instance: None, owner: type[Any] | None, /) -> Self: ...
@overload
def __get__(self, instance: Any, owner: type[Any] | None, /) -> _GetT: ...
def __set__(self, instance: Any, value: _SetT, /) -> None: ...
class _StructUnionMeta(_CDataMeta):
_fields_: Sequence[tuple[str, type[_CData]] | tuple[str, type[_CData], int]]
_pack_: int
_anonymous_: Sequence[str]
def __getattr__(self, name: str) -> _CField[Any, Any, Any]: ...
class _StructUnionBase(_CData, metaclass=_StructUnionMeta):
def __init__(self, *args: Any, **kw: Any) -> None: ...
def __getattr__(self, name: str) -> Any: ...
def __setattr__(self, name: str, value: Any) -> None: ...
class Union(_StructUnionBase): ...
class Structure(_StructUnionBase): ...
class Array(_CData, Generic[_CT]):
@property
@abstractmethod
def _length_(self) -> int: ...
@_length_.setter
def _length_(self, value: int) -> None: ...
@property
@abstractmethod
def _type_(self) -> type[_CT]: ...
@_type_.setter
def _type_(self, value: type[_CT]) -> None: ...
# Note: only available if _CT == c_char
@property
def raw(self) -> bytes: ...
@raw.setter
def raw(self, value: ReadableBuffer) -> None: ...
value: Any # Note: bytes if _CT == c_char, str if _CT == c_wchar, unavailable otherwise
# TODO These methods cannot be annotated correctly at the moment.
# All of these "Any"s stand for the array's element type, but it's not possible to use _CT
# here, because of a special feature of ctypes.
# By default, when accessing an element of an Array[_CT], the returned object has type _CT.
# However, when _CT is a "simple type" like c_int, ctypes automatically "unboxes" the object
# and converts it to the corresponding Python primitive. For example, when accessing an element
# of an Array[c_int], a Python int object is returned, not a c_int.
# This behavior does *not* apply to subclasses of "simple types".
# If MyInt is a subclass of c_int, then accessing an element of an Array[MyInt] returns
# a MyInt, not an int.
# This special behavior is not easy to model in a stub, so for now all places where
# the array element type would belong are annotated with Any instead.
def __init__(self, *args: Any) -> None: ...
@overload
def __getitem__(self, key: int, /) -> Any: ...
@overload
def __getitem__(self, key: slice, /) -> list[Any]: ...
@overload
def __setitem__(self, key: int, value: Any, /) -> None: ...
@overload
def __setitem__(self, key: slice, value: Iterable[Any], /) -> None: ...
def __iter__(self) -> Iterator[Any]: ...
# Can't inherit from Sized because the metaclass conflict between
# Sized and _CData prevents using _CDataMeta.
def __len__(self) -> int: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any, /) -> GenericAlias: ...
def addressof(obj: _CData, /) -> int: ...
def alignment(obj_or_type: _CData | type[_CData], /) -> int: ...
def get_errno() -> int: ...
def resize(obj: _CData, size: int, /) -> None: ...
def set_errno(value: int, /) -> int: ...
def sizeof(obj_or_type: _CData | type[_CData], /) -> int: ...

View File

@@ -1,558 +0,0 @@
import sys
from _typeshed import ReadOnlyBuffer, SupportsRead
from typing import IO, Any, NamedTuple, final, overload
from typing_extensions import TypeAlias
# NOTE: This module is ordinarily only available on Unix, but the windows-curses
# package makes it available on Windows as well with the same contents.
# Handled by PyCurses_ConvertToChtype in _cursesmodule.c.
_ChType: TypeAlias = str | bytes | int
# ACS codes are only initialized after initscr is called
ACS_BBSS: int
ACS_BLOCK: int
ACS_BOARD: int
ACS_BSBS: int
ACS_BSSB: int
ACS_BSSS: int
ACS_BTEE: int
ACS_BULLET: int
ACS_CKBOARD: int
ACS_DARROW: int
ACS_DEGREE: int
ACS_DIAMOND: int
ACS_GEQUAL: int
ACS_HLINE: int
ACS_LANTERN: int
ACS_LARROW: int
ACS_LEQUAL: int
ACS_LLCORNER: int
ACS_LRCORNER: int
ACS_LTEE: int
ACS_NEQUAL: int
ACS_PI: int
ACS_PLMINUS: int
ACS_PLUS: int
ACS_RARROW: int
ACS_RTEE: int
ACS_S1: int
ACS_S3: int
ACS_S7: int
ACS_S9: int
ACS_SBBS: int
ACS_SBSB: int
ACS_SBSS: int
ACS_SSBB: int
ACS_SSBS: int
ACS_SSSB: int
ACS_SSSS: int
ACS_STERLING: int
ACS_TTEE: int
ACS_UARROW: int
ACS_ULCORNER: int
ACS_URCORNER: int
ACS_VLINE: int
ALL_MOUSE_EVENTS: int
A_ALTCHARSET: int
A_ATTRIBUTES: int
A_BLINK: int
A_BOLD: int
A_CHARTEXT: int
A_COLOR: int
A_DIM: int
A_HORIZONTAL: int
A_INVIS: int
A_ITALIC: int
A_LEFT: int
A_LOW: int
A_NORMAL: int
A_PROTECT: int
A_REVERSE: int
A_RIGHT: int
A_STANDOUT: int
A_TOP: int
A_UNDERLINE: int
A_VERTICAL: int
BUTTON1_CLICKED: int
BUTTON1_DOUBLE_CLICKED: int
BUTTON1_PRESSED: int
BUTTON1_RELEASED: int
BUTTON1_TRIPLE_CLICKED: int
BUTTON2_CLICKED: int
BUTTON2_DOUBLE_CLICKED: int
BUTTON2_PRESSED: int
BUTTON2_RELEASED: int
BUTTON2_TRIPLE_CLICKED: int
BUTTON3_CLICKED: int
BUTTON3_DOUBLE_CLICKED: int
BUTTON3_PRESSED: int
BUTTON3_RELEASED: int
BUTTON3_TRIPLE_CLICKED: int
BUTTON4_CLICKED: int
BUTTON4_DOUBLE_CLICKED: int
BUTTON4_PRESSED: int
BUTTON4_RELEASED: int
BUTTON4_TRIPLE_CLICKED: int
# Darwin ncurses doesn't provide BUTTON5_* constants
if sys.version_info >= (3, 10) and sys.platform != "darwin":
BUTTON5_PRESSED: int
BUTTON5_RELEASED: int
BUTTON5_CLICKED: int
BUTTON5_DOUBLE_CLICKED: int
BUTTON5_TRIPLE_CLICKED: int
BUTTON_ALT: int
BUTTON_CTRL: int
BUTTON_SHIFT: int
COLOR_BLACK: int
COLOR_BLUE: int
COLOR_CYAN: int
COLOR_GREEN: int
COLOR_MAGENTA: int
COLOR_RED: int
COLOR_WHITE: int
COLOR_YELLOW: int
ERR: int
KEY_A1: int
KEY_A3: int
KEY_B2: int
KEY_BACKSPACE: int
KEY_BEG: int
KEY_BREAK: int
KEY_BTAB: int
KEY_C1: int
KEY_C3: int
KEY_CANCEL: int
KEY_CATAB: int
KEY_CLEAR: int
KEY_CLOSE: int
KEY_COMMAND: int
KEY_COPY: int
KEY_CREATE: int
KEY_CTAB: int
KEY_DC: int
KEY_DL: int
KEY_DOWN: int
KEY_EIC: int
KEY_END: int
KEY_ENTER: int
KEY_EOL: int
KEY_EOS: int
KEY_EXIT: int
KEY_F0: int
KEY_F1: int
KEY_F10: int
KEY_F11: int
KEY_F12: int
KEY_F13: int
KEY_F14: int
KEY_F15: int
KEY_F16: int
KEY_F17: int
KEY_F18: int
KEY_F19: int
KEY_F2: int
KEY_F20: int
KEY_F21: int
KEY_F22: int
KEY_F23: int
KEY_F24: int
KEY_F25: int
KEY_F26: int
KEY_F27: int
KEY_F28: int
KEY_F29: int
KEY_F3: int
KEY_F30: int
KEY_F31: int
KEY_F32: int
KEY_F33: int
KEY_F34: int
KEY_F35: int
KEY_F36: int
KEY_F37: int
KEY_F38: int
KEY_F39: int
KEY_F4: int
KEY_F40: int
KEY_F41: int
KEY_F42: int
KEY_F43: int
KEY_F44: int
KEY_F45: int
KEY_F46: int
KEY_F47: int
KEY_F48: int
KEY_F49: int
KEY_F5: int
KEY_F50: int
KEY_F51: int
KEY_F52: int
KEY_F53: int
KEY_F54: int
KEY_F55: int
KEY_F56: int
KEY_F57: int
KEY_F58: int
KEY_F59: int
KEY_F6: int
KEY_F60: int
KEY_F61: int
KEY_F62: int
KEY_F63: int
KEY_F7: int
KEY_F8: int
KEY_F9: int
KEY_FIND: int
KEY_HELP: int
KEY_HOME: int
KEY_IC: int
KEY_IL: int
KEY_LEFT: int
KEY_LL: int
KEY_MARK: int
KEY_MAX: int
KEY_MESSAGE: int
KEY_MIN: int
KEY_MOUSE: int
KEY_MOVE: int
KEY_NEXT: int
KEY_NPAGE: int
KEY_OPEN: int
KEY_OPTIONS: int
KEY_PPAGE: int
KEY_PREVIOUS: int
KEY_PRINT: int
KEY_REDO: int
KEY_REFERENCE: int
KEY_REFRESH: int
KEY_REPLACE: int
KEY_RESET: int
KEY_RESIZE: int
KEY_RESTART: int
KEY_RESUME: int
KEY_RIGHT: int
KEY_SAVE: int
KEY_SBEG: int
KEY_SCANCEL: int
KEY_SCOMMAND: int
KEY_SCOPY: int
KEY_SCREATE: int
KEY_SDC: int
KEY_SDL: int
KEY_SELECT: int
KEY_SEND: int
KEY_SEOL: int
KEY_SEXIT: int
KEY_SF: int
KEY_SFIND: int
KEY_SHELP: int
KEY_SHOME: int
KEY_SIC: int
KEY_SLEFT: int
KEY_SMESSAGE: int
KEY_SMOVE: int
KEY_SNEXT: int
KEY_SOPTIONS: int
KEY_SPREVIOUS: int
KEY_SPRINT: int
KEY_SR: int
KEY_SREDO: int
KEY_SREPLACE: int
KEY_SRESET: int
KEY_SRIGHT: int
KEY_SRSUME: int
KEY_SSAVE: int
KEY_SSUSPEND: int
KEY_STAB: int
KEY_SUNDO: int
KEY_SUSPEND: int
KEY_UNDO: int
KEY_UP: int
OK: int
REPORT_MOUSE_POSITION: int
_C_API: Any
version: bytes
def baudrate() -> int: ...
def beep() -> None: ...
def can_change_color() -> bool: ...
def cbreak(flag: bool = True, /) -> None: ...
def color_content(color_number: int, /) -> tuple[int, int, int]: ...
def color_pair(pair_number: int, /) -> int: ...
def curs_set(visibility: int, /) -> int: ...
def def_prog_mode() -> None: ...
def def_shell_mode() -> None: ...
def delay_output(ms: int, /) -> None: ...
def doupdate() -> None: ...
def echo(flag: bool = True, /) -> None: ...
def endwin() -> None: ...
def erasechar() -> bytes: ...
def filter() -> None: ...
def flash() -> None: ...
def flushinp() -> None: ...
if sys.version_info >= (3, 9):
def get_escdelay() -> int: ...
def get_tabsize() -> int: ...
def getmouse() -> tuple[int, int, int, int, int]: ...
def getsyx() -> tuple[int, int]: ...
def getwin(file: SupportsRead[bytes], /) -> _CursesWindow: ...
def halfdelay(tenths: int, /) -> None: ...
def has_colors() -> bool: ...
if sys.version_info >= (3, 10):
def has_extended_color_support() -> bool: ...
def has_ic() -> bool: ...
def has_il() -> bool: ...
def has_key(key: int, /) -> bool: ...
def init_color(color_number: int, r: int, g: int, b: int, /) -> None: ...
def init_pair(pair_number: int, fg: int, bg: int, /) -> None: ...
def initscr() -> _CursesWindow: ...
def intrflush(flag: bool, /) -> None: ...
def is_term_resized(nlines: int, ncols: int, /) -> bool: ...
def isendwin() -> bool: ...
def keyname(key: int, /) -> bytes: ...
def killchar() -> bytes: ...
def longname() -> bytes: ...
def meta(yes: bool, /) -> None: ...
def mouseinterval(interval: int, /) -> None: ...
def mousemask(newmask: int, /) -> tuple[int, int]: ...
def napms(ms: int, /) -> int: ...
def newpad(nlines: int, ncols: int, /) -> _CursesWindow: ...
def newwin(nlines: int, ncols: int, begin_y: int = ..., begin_x: int = ..., /) -> _CursesWindow: ...
def nl(flag: bool = True, /) -> None: ...
def nocbreak() -> None: ...
def noecho() -> None: ...
def nonl() -> None: ...
def noqiflush() -> None: ...
def noraw() -> None: ...
def pair_content(pair_number: int, /) -> tuple[int, int]: ...
def pair_number(attr: int, /) -> int: ...
def putp(string: ReadOnlyBuffer, /) -> None: ...
def qiflush(flag: bool = True, /) -> None: ...
def raw(flag: bool = True, /) -> None: ...
def reset_prog_mode() -> None: ...
def reset_shell_mode() -> None: ...
def resetty() -> None: ...
def resize_term(nlines: int, ncols: int, /) -> None: ...
def resizeterm(nlines: int, ncols: int, /) -> None: ...
def savetty() -> None: ...
if sys.version_info >= (3, 9):
def set_escdelay(ms: int, /) -> None: ...
def set_tabsize(size: int, /) -> None: ...
def setsyx(y: int, x: int, /) -> None: ...
def setupterm(term: str | None = None, fd: int = -1) -> None: ...
def start_color() -> None: ...
def termattrs() -> int: ...
def termname() -> bytes: ...
def tigetflag(capname: str, /) -> int: ...
def tigetnum(capname: str, /) -> int: ...
def tigetstr(capname: str, /) -> bytes | None: ...
def tparm(
str: ReadOnlyBuffer,
i1: int = 0,
i2: int = 0,
i3: int = 0,
i4: int = 0,
i5: int = 0,
i6: int = 0,
i7: int = 0,
i8: int = 0,
i9: int = 0,
/,
) -> bytes: ...
def typeahead(fd: int, /) -> None: ...
def unctrl(ch: _ChType, /) -> bytes: ...
def unget_wch(ch: int | str, /) -> None: ...
def ungetch(ch: _ChType, /) -> None: ...
def ungetmouse(id: int, x: int, y: int, z: int, bstate: int, /) -> None: ...
def update_lines_cols() -> None: ...
def use_default_colors() -> None: ...
def use_env(flag: bool, /) -> None: ...
class error(Exception): ...
@final
class _CursesWindow:
encoding: str
@overload
def addch(self, ch: _ChType, attr: int = ...) -> None: ...
@overload
def addch(self, y: int, x: int, ch: _ChType, attr: int = ...) -> None: ...
@overload
def addnstr(self, str: str, n: int, attr: int = ...) -> None: ...
@overload
def addnstr(self, y: int, x: int, str: str, n: int, attr: int = ...) -> None: ...
@overload
def addstr(self, str: str, attr: int = ...) -> None: ...
@overload
def addstr(self, y: int, x: int, str: str, attr: int = ...) -> None: ...
def attroff(self, attr: int, /) -> None: ...
def attron(self, attr: int, /) -> None: ...
def attrset(self, attr: int, /) -> None: ...
def bkgd(self, ch: _ChType, attr: int = ..., /) -> None: ...
def bkgdset(self, ch: _ChType, attr: int = ..., /) -> None: ...
def border(
self,
ls: _ChType = ...,
rs: _ChType = ...,
ts: _ChType = ...,
bs: _ChType = ...,
tl: _ChType = ...,
tr: _ChType = ...,
bl: _ChType = ...,
br: _ChType = ...,
) -> None: ...
@overload
def box(self) -> None: ...
@overload
def box(self, vertch: _ChType = ..., horch: _ChType = ...) -> None: ...
@overload
def chgat(self, attr: int) -> None: ...
@overload
def chgat(self, num: int, attr: int) -> None: ...
@overload
def chgat(self, y: int, x: int, attr: int) -> None: ...
@overload
def chgat(self, y: int, x: int, num: int, attr: int) -> None: ...
def clear(self) -> None: ...
def clearok(self, yes: int) -> None: ...
def clrtobot(self) -> None: ...
def clrtoeol(self) -> None: ...
def cursyncup(self) -> None: ...
@overload
def delch(self) -> None: ...
@overload
def delch(self, y: int, x: int) -> None: ...
def deleteln(self) -> None: ...
@overload
def derwin(self, begin_y: int, begin_x: int) -> _CursesWindow: ...
@overload
def derwin(self, nlines: int, ncols: int, begin_y: int, begin_x: int) -> _CursesWindow: ...
def echochar(self, ch: _ChType, attr: int = ..., /) -> None: ...
def enclose(self, y: int, x: int, /) -> bool: ...
def erase(self) -> None: ...
def getbegyx(self) -> tuple[int, int]: ...
def getbkgd(self) -> tuple[int, int]: ...
@overload
def getch(self) -> int: ...
@overload
def getch(self, y: int, x: int) -> int: ...
@overload
def get_wch(self) -> int | str: ...
@overload
def get_wch(self, y: int, x: int) -> int | str: ...
@overload
def getkey(self) -> str: ...
@overload
def getkey(self, y: int, x: int) -> str: ...
def getmaxyx(self) -> tuple[int, int]: ...
def getparyx(self) -> tuple[int, int]: ...
@overload
def getstr(self) -> bytes: ...
@overload
def getstr(self, n: int) -> bytes: ...
@overload
def getstr(self, y: int, x: int) -> bytes: ...
@overload
def getstr(self, y: int, x: int, n: int) -> bytes: ...
def getyx(self) -> tuple[int, int]: ...
@overload
def hline(self, ch: _ChType, n: int) -> None: ...
@overload
def hline(self, y: int, x: int, ch: _ChType, n: int) -> None: ...
def idcok(self, flag: bool) -> None: ...
def idlok(self, yes: bool) -> None: ...
def immedok(self, flag: bool) -> None: ...
@overload
def inch(self) -> int: ...
@overload
def inch(self, y: int, x: int) -> int: ...
@overload
def insch(self, ch: _ChType, attr: int = ...) -> None: ...
@overload
def insch(self, y: int, x: int, ch: _ChType, attr: int = ...) -> None: ...
def insdelln(self, nlines: int) -> None: ...
def insertln(self) -> None: ...
@overload
def insnstr(self, str: str, n: int, attr: int = ...) -> None: ...
@overload
def insnstr(self, y: int, x: int, str: str, n: int, attr: int = ...) -> None: ...
@overload
def insstr(self, str: str, attr: int = ...) -> None: ...
@overload
def insstr(self, y: int, x: int, str: str, attr: int = ...) -> None: ...
@overload
def instr(self, n: int = ...) -> bytes: ...
@overload
def instr(self, y: int, x: int, n: int = ...) -> bytes: ...
def is_linetouched(self, line: int, /) -> bool: ...
def is_wintouched(self) -> bool: ...
def keypad(self, yes: bool) -> None: ...
def leaveok(self, yes: bool) -> None: ...
def move(self, new_y: int, new_x: int) -> None: ...
def mvderwin(self, y: int, x: int) -> None: ...
def mvwin(self, new_y: int, new_x: int) -> None: ...
def nodelay(self, yes: bool) -> None: ...
def notimeout(self, yes: bool) -> None: ...
@overload
def noutrefresh(self) -> None: ...
@overload
def noutrefresh(self, pminrow: int, pmincol: int, sminrow: int, smincol: int, smaxrow: int, smaxcol: int) -> None: ...
@overload
def overlay(self, destwin: _CursesWindow) -> None: ...
@overload
def overlay(
self, destwin: _CursesWindow, sminrow: int, smincol: int, dminrow: int, dmincol: int, dmaxrow: int, dmaxcol: int
) -> None: ...
@overload
def overwrite(self, destwin: _CursesWindow) -> None: ...
@overload
def overwrite(
self, destwin: _CursesWindow, sminrow: int, smincol: int, dminrow: int, dmincol: int, dmaxrow: int, dmaxcol: int
) -> None: ...
def putwin(self, file: IO[Any], /) -> None: ...
def redrawln(self, beg: int, num: int, /) -> None: ...
def redrawwin(self) -> None: ...
@overload
def refresh(self) -> None: ...
@overload
def refresh(self, pminrow: int, pmincol: int, sminrow: int, smincol: int, smaxrow: int, smaxcol: int) -> None: ...
def resize(self, nlines: int, ncols: int) -> None: ...
def scroll(self, lines: int = ...) -> None: ...
def scrollok(self, flag: bool) -> None: ...
def setscrreg(self, top: int, bottom: int, /) -> None: ...
def standend(self) -> None: ...
def standout(self) -> None: ...
@overload
def subpad(self, begin_y: int, begin_x: int) -> _CursesWindow: ...
@overload
def subpad(self, nlines: int, ncols: int, begin_y: int, begin_x: int) -> _CursesWindow: ...
@overload
def subwin(self, begin_y: int, begin_x: int) -> _CursesWindow: ...
@overload
def subwin(self, nlines: int, ncols: int, begin_y: int, begin_x: int) -> _CursesWindow: ...
def syncdown(self) -> None: ...
def syncok(self, flag: bool) -> None: ...
def syncup(self) -> None: ...
def timeout(self, delay: int) -> None: ...
def touchline(self, start: int, count: int, changed: bool = ...) -> None: ...
def touchwin(self) -> None: ...
def untouchwin(self) -> None: ...
@overload
def vline(self, ch: _ChType, n: int) -> None: ...
@overload
def vline(self, y: int, x: int, ch: _ChType, n: int) -> None: ...
class _ncurses_version(NamedTuple):
major: int
minor: int
patch: int
ncurses_version: _ncurses_version
window = _CursesWindow # undocumented

View File

@@ -1,281 +0,0 @@
import numbers
import sys
from collections.abc import Container, Sequence
from types import TracebackType
from typing import Any, ClassVar, Final, Literal, NamedTuple, overload
from typing_extensions import Self, TypeAlias
_Decimal: TypeAlias = Decimal | int
_DecimalNew: TypeAlias = Decimal | float | str | tuple[int, Sequence[int], int]
_ComparableNum: TypeAlias = Decimal | float | numbers.Rational
__version__: Final[str]
__libmpdec_version__: Final[str]
class DecimalTuple(NamedTuple):
sign: int
digits: tuple[int, ...]
exponent: int | Literal["n", "N", "F"]
ROUND_DOWN: Final[str]
ROUND_HALF_UP: Final[str]
ROUND_HALF_EVEN: Final[str]
ROUND_CEILING: Final[str]
ROUND_FLOOR: Final[str]
ROUND_UP: Final[str]
ROUND_HALF_DOWN: Final[str]
ROUND_05UP: Final[str]
HAVE_CONTEXTVAR: Final[bool]
HAVE_THREADS: Final[bool]
MAX_EMAX: Final[int]
MAX_PREC: Final[int]
MIN_EMIN: Final[int]
MIN_ETINY: Final[int]
class DecimalException(ArithmeticError): ...
class Clamped(DecimalException): ...
class InvalidOperation(DecimalException): ...
class ConversionSyntax(InvalidOperation): ...
class DivisionByZero(DecimalException, ZeroDivisionError): ...
class DivisionImpossible(InvalidOperation): ...
class DivisionUndefined(InvalidOperation, ZeroDivisionError): ...
class Inexact(DecimalException): ...
class InvalidContext(InvalidOperation): ...
class Rounded(DecimalException): ...
class Subnormal(DecimalException): ...
class Overflow(Inexact, Rounded): ...
class Underflow(Inexact, Rounded, Subnormal): ...
class FloatOperation(DecimalException, TypeError): ...
def setcontext(context: Context, /) -> None: ...
def getcontext() -> Context: ...
if sys.version_info >= (3, 11):
def localcontext(
ctx: Context | None = None,
*,
prec: int | None = ...,
rounding: str | None = ...,
Emin: int | None = ...,
Emax: int | None = ...,
capitals: int | None = ...,
clamp: int | None = ...,
traps: dict[_TrapType, bool] | None = ...,
flags: dict[_TrapType, bool] | None = ...,
) -> _ContextManager: ...
else:
def localcontext(ctx: Context | None = None) -> _ContextManager: ...
class Decimal:
def __new__(cls, value: _DecimalNew = ..., context: Context | None = ...) -> Self: ...
@classmethod
def from_float(cls, f: float, /) -> Self: ...
def __bool__(self) -> bool: ...
def compare(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def __hash__(self) -> int: ...
def as_tuple(self) -> DecimalTuple: ...
def as_integer_ratio(self) -> tuple[int, int]: ...
def to_eng_string(self, context: Context | None = None) -> str: ...
def __abs__(self) -> Decimal: ...
def __add__(self, value: _Decimal, /) -> Decimal: ...
def __divmod__(self, value: _Decimal, /) -> tuple[Decimal, Decimal]: ...
def __eq__(self, value: object, /) -> bool: ...
def __floordiv__(self, value: _Decimal, /) -> Decimal: ...
def __ge__(self, value: _ComparableNum, /) -> bool: ...
def __gt__(self, value: _ComparableNum, /) -> bool: ...
def __le__(self, value: _ComparableNum, /) -> bool: ...
def __lt__(self, value: _ComparableNum, /) -> bool: ...
def __mod__(self, value: _Decimal, /) -> Decimal: ...
def __mul__(self, value: _Decimal, /) -> Decimal: ...
def __neg__(self) -> Decimal: ...
def __pos__(self) -> Decimal: ...
def __pow__(self, value: _Decimal, mod: _Decimal | None = None, /) -> Decimal: ...
def __radd__(self, value: _Decimal, /) -> Decimal: ...
def __rdivmod__(self, value: _Decimal, /) -> tuple[Decimal, Decimal]: ...
def __rfloordiv__(self, value: _Decimal, /) -> Decimal: ...
def __rmod__(self, value: _Decimal, /) -> Decimal: ...
def __rmul__(self, value: _Decimal, /) -> Decimal: ...
def __rsub__(self, value: _Decimal, /) -> Decimal: ...
def __rtruediv__(self, value: _Decimal, /) -> Decimal: ...
def __sub__(self, value: _Decimal, /) -> Decimal: ...
def __truediv__(self, value: _Decimal, /) -> Decimal: ...
def remainder_near(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def __float__(self) -> float: ...
def __int__(self) -> int: ...
def __trunc__(self) -> int: ...
@property
def real(self) -> Decimal: ...
@property
def imag(self) -> Decimal: ...
def conjugate(self) -> Decimal: ...
def __complex__(self) -> complex: ...
@overload
def __round__(self) -> int: ...
@overload
def __round__(self, ndigits: int, /) -> Decimal: ...
def __floor__(self) -> int: ...
def __ceil__(self) -> int: ...
def fma(self, other: _Decimal, third: _Decimal, context: Context | None = None) -> Decimal: ...
def __rpow__(self, value: _Decimal, mod: Context | None = None, /) -> Decimal: ...
def normalize(self, context: Context | None = None) -> Decimal: ...
def quantize(self, exp: _Decimal, rounding: str | None = None, context: Context | None = None) -> Decimal: ...
def same_quantum(self, other: _Decimal, context: Context | None = None) -> bool: ...
def to_integral_exact(self, rounding: str | None = None, context: Context | None = None) -> Decimal: ...
def to_integral_value(self, rounding: str | None = None, context: Context | None = None) -> Decimal: ...
def to_integral(self, rounding: str | None = None, context: Context | None = None) -> Decimal: ...
def sqrt(self, context: Context | None = None) -> Decimal: ...
def max(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def min(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def adjusted(self) -> int: ...
def canonical(self) -> Decimal: ...
def compare_signal(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def compare_total(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def compare_total_mag(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def copy_abs(self) -> Decimal: ...
def copy_negate(self) -> Decimal: ...
def copy_sign(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def exp(self, context: Context | None = None) -> Decimal: ...
def is_canonical(self) -> bool: ...
def is_finite(self) -> bool: ...
def is_infinite(self) -> bool: ...
def is_nan(self) -> bool: ...
def is_normal(self, context: Context | None = None) -> bool: ...
def is_qnan(self) -> bool: ...
def is_signed(self) -> bool: ...
def is_snan(self) -> bool: ...
def is_subnormal(self, context: Context | None = None) -> bool: ...
def is_zero(self) -> bool: ...
def ln(self, context: Context | None = None) -> Decimal: ...
def log10(self, context: Context | None = None) -> Decimal: ...
def logb(self, context: Context | None = None) -> Decimal: ...
def logical_and(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def logical_invert(self, context: Context | None = None) -> Decimal: ...
def logical_or(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def logical_xor(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def max_mag(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def min_mag(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def next_minus(self, context: Context | None = None) -> Decimal: ...
def next_plus(self, context: Context | None = None) -> Decimal: ...
def next_toward(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def number_class(self, context: Context | None = None) -> str: ...
def radix(self) -> Decimal: ...
def rotate(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def scaleb(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def shift(self, other: _Decimal, context: Context | None = None) -> Decimal: ...
def __reduce__(self) -> tuple[type[Self], tuple[str]]: ...
def __copy__(self) -> Self: ...
def __deepcopy__(self, memo: Any, /) -> Self: ...
def __format__(self, specifier: str, context: Context | None = ..., /) -> str: ...
class _ContextManager:
new_context: Context
saved_context: Context
def __init__(self, new_context: Context) -> None: ...
def __enter__(self) -> Context: ...
def __exit__(self, t: type[BaseException] | None, v: BaseException | None, tb: TracebackType | None) -> None: ...
_TrapType: TypeAlias = type[DecimalException]
class Context:
# TODO: Context doesn't allow you to delete *any* attributes from instances of the class at runtime,
# even settable attributes like `prec` and `rounding`,
# but that's inexpressable in the stub.
# Type checkers either ignore it or misinterpret it
# if you add a `def __delattr__(self, name: str, /) -> NoReturn` method to the stub
prec: int
rounding: str
Emin: int
Emax: int
capitals: int
clamp: int
traps: dict[_TrapType, bool]
flags: dict[_TrapType, bool]
def __init__(
self,
prec: int | None = ...,
rounding: str | None = ...,
Emin: int | None = ...,
Emax: int | None = ...,
capitals: int | None = ...,
clamp: int | None = ...,
flags: None | dict[_TrapType, bool] | Container[_TrapType] = ...,
traps: None | dict[_TrapType, bool] | Container[_TrapType] = ...,
_ignored_flags: list[_TrapType] | None = ...,
) -> None: ...
def __reduce__(self) -> tuple[type[Self], tuple[Any, ...]]: ...
def clear_flags(self) -> None: ...
def clear_traps(self) -> None: ...
def copy(self) -> Context: ...
def __copy__(self) -> Context: ...
# see https://github.com/python/cpython/issues/94107
__hash__: ClassVar[None] # type: ignore[assignment]
def Etiny(self) -> int: ...
def Etop(self) -> int: ...
def create_decimal(self, num: _DecimalNew = "0", /) -> Decimal: ...
def create_decimal_from_float(self, f: float, /) -> Decimal: ...
def abs(self, x: _Decimal, /) -> Decimal: ...
def add(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def canonical(self, x: Decimal, /) -> Decimal: ...
def compare(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def compare_signal(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def compare_total(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def compare_total_mag(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def copy_abs(self, x: _Decimal, /) -> Decimal: ...
def copy_decimal(self, x: _Decimal, /) -> Decimal: ...
def copy_negate(self, x: _Decimal, /) -> Decimal: ...
def copy_sign(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def divide(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def divide_int(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def divmod(self, x: _Decimal, y: _Decimal, /) -> tuple[Decimal, Decimal]: ...
def exp(self, x: _Decimal, /) -> Decimal: ...
def fma(self, x: _Decimal, y: _Decimal, z: _Decimal, /) -> Decimal: ...
def is_canonical(self, x: _Decimal, /) -> bool: ...
def is_finite(self, x: _Decimal, /) -> bool: ...
def is_infinite(self, x: _Decimal, /) -> bool: ...
def is_nan(self, x: _Decimal, /) -> bool: ...
def is_normal(self, x: _Decimal, /) -> bool: ...
def is_qnan(self, x: _Decimal, /) -> bool: ...
def is_signed(self, x: _Decimal, /) -> bool: ...
def is_snan(self, x: _Decimal, /) -> bool: ...
def is_subnormal(self, x: _Decimal, /) -> bool: ...
def is_zero(self, x: _Decimal, /) -> bool: ...
def ln(self, x: _Decimal, /) -> Decimal: ...
def log10(self, x: _Decimal, /) -> Decimal: ...
def logb(self, x: _Decimal, /) -> Decimal: ...
def logical_and(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def logical_invert(self, x: _Decimal, /) -> Decimal: ...
def logical_or(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def logical_xor(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def max(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def max_mag(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def min(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def min_mag(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def minus(self, x: _Decimal, /) -> Decimal: ...
def multiply(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def next_minus(self, x: _Decimal, /) -> Decimal: ...
def next_plus(self, x: _Decimal, /) -> Decimal: ...
def next_toward(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def normalize(self, x: _Decimal, /) -> Decimal: ...
def number_class(self, x: _Decimal, /) -> str: ...
def plus(self, x: _Decimal, /) -> Decimal: ...
def power(self, a: _Decimal, b: _Decimal, modulo: _Decimal | None = None) -> Decimal: ...
def quantize(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def radix(self) -> Decimal: ...
def remainder(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def remainder_near(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def rotate(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def same_quantum(self, x: _Decimal, y: _Decimal, /) -> bool: ...
def scaleb(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def shift(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def sqrt(self, x: _Decimal, /) -> Decimal: ...
def subtract(self, x: _Decimal, y: _Decimal, /) -> Decimal: ...
def to_eng_string(self, x: _Decimal, /) -> str: ...
def to_sci_string(self, x: _Decimal, /) -> str: ...
def to_integral_exact(self, x: _Decimal, /) -> Decimal: ...
def to_integral_value(self, x: _Decimal, /) -> Decimal: ...
def to_integral(self, x: _Decimal, /) -> Decimal: ...
DefaultContext: Context
BasicContext: Context
ExtendedContext: Context

View File

@@ -1,33 +0,0 @@
from collections.abc import Callable
from types import TracebackType
from typing import Any, NoReturn, overload
from typing_extensions import TypeVarTuple, Unpack
__all__ = ["error", "start_new_thread", "exit", "get_ident", "allocate_lock", "interrupt_main", "LockType", "RLock"]
_Ts = TypeVarTuple("_Ts")
TIMEOUT_MAX: int
error = RuntimeError
@overload
def start_new_thread(function: Callable[[Unpack[_Ts]], object], args: tuple[Unpack[_Ts]]) -> None: ...
@overload
def start_new_thread(function: Callable[..., object], args: tuple[Any, ...], kwargs: dict[str, Any]) -> None: ...
def exit() -> NoReturn: ...
def get_ident() -> int: ...
def allocate_lock() -> LockType: ...
def stack_size(size: int | None = None) -> int: ...
class LockType:
locked_status: bool
def acquire(self, waitflag: bool | None = None, timeout: int = -1) -> bool: ...
def __enter__(self, waitflag: bool | None = None, timeout: int = -1) -> bool: ...
def __exit__(self, typ: type[BaseException] | None, val: BaseException | None, tb: TracebackType | None) -> None: ...
def release(self) -> bool: ...
def locked(self) -> bool: ...
class RLock(LockType):
def release(self) -> None: ... # type: ignore[override]
def interrupt_main() -> None: ...

View File

@@ -1,164 +0,0 @@
import sys
from _thread import _excepthook, _ExceptHookArgs
from _typeshed import ProfileFunction, TraceFunction
from collections.abc import Callable, Iterable, Mapping
from types import TracebackType
from typing import Any, TypeVar
_T = TypeVar("_T")
__all__ = [
"get_ident",
"active_count",
"Condition",
"current_thread",
"enumerate",
"main_thread",
"TIMEOUT_MAX",
"Event",
"Lock",
"RLock",
"Semaphore",
"BoundedSemaphore",
"Thread",
"Barrier",
"BrokenBarrierError",
"Timer",
"ThreadError",
"setprofile",
"settrace",
"local",
"stack_size",
"ExceptHookArgs",
"excepthook",
]
def active_count() -> int: ...
def current_thread() -> Thread: ...
def currentThread() -> Thread: ...
def get_ident() -> int: ...
def enumerate() -> list[Thread]: ...
def main_thread() -> Thread: ...
def settrace(func: TraceFunction) -> None: ...
def setprofile(func: ProfileFunction | None) -> None: ...
def stack_size(size: int | None = None) -> int: ...
TIMEOUT_MAX: float
class ThreadError(Exception): ...
class local:
def __getattribute__(self, name: str) -> Any: ...
def __setattr__(self, name: str, value: Any) -> None: ...
def __delattr__(self, name: str) -> None: ...
class Thread:
name: str
daemon: bool
@property
def ident(self) -> int | None: ...
def __init__(
self,
group: None = None,
target: Callable[..., object] | None = None,
name: str | None = None,
args: Iterable[Any] = (),
kwargs: Mapping[str, Any] | None = None,
*,
daemon: bool | None = None,
) -> None: ...
def start(self) -> None: ...
def run(self) -> None: ...
def join(self, timeout: float | None = None) -> None: ...
def getName(self) -> str: ...
def setName(self, name: str) -> None: ...
@property
def native_id(self) -> int | None: ... # only available on some platforms
def is_alive(self) -> bool: ...
if sys.version_info < (3, 9):
def isAlive(self) -> bool: ...
def isDaemon(self) -> bool: ...
def setDaemon(self, daemonic: bool) -> None: ...
class _DummyThread(Thread): ...
class Lock:
def __enter__(self) -> bool: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> bool | None: ...
def acquire(self, blocking: bool = ..., timeout: float = ...) -> bool: ...
def release(self) -> None: ...
def locked(self) -> bool: ...
class _RLock:
def __enter__(self) -> bool: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> bool | None: ...
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ...
def release(self) -> None: ...
RLock = _RLock
class Condition:
def __init__(self, lock: Lock | _RLock | None = None) -> None: ...
def __enter__(self) -> bool: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> bool | None: ...
def acquire(self, blocking: bool = ..., timeout: float = ...) -> bool: ...
def release(self) -> None: ...
def wait(self, timeout: float | None = None) -> bool: ...
def wait_for(self, predicate: Callable[[], _T], timeout: float | None = None) -> _T: ...
def notify(self, n: int = 1) -> None: ...
def notify_all(self) -> None: ...
def notifyAll(self) -> None: ...
class Semaphore:
def __init__(self, value: int = 1) -> None: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> bool | None: ...
def acquire(self, blocking: bool = True, timeout: float | None = None) -> bool: ...
def __enter__(self, blocking: bool = True, timeout: float | None = None) -> bool: ...
if sys.version_info >= (3, 9):
def release(self, n: int = ...) -> None: ...
else:
def release(self) -> None: ...
class BoundedSemaphore(Semaphore): ...
class Event:
def is_set(self) -> bool: ...
def set(self) -> None: ...
def clear(self) -> None: ...
def wait(self, timeout: float | None = None) -> bool: ...
excepthook = _excepthook
ExceptHookArgs = _ExceptHookArgs
class Timer(Thread):
def __init__(
self,
interval: float,
function: Callable[..., object],
args: Iterable[Any] | None = None,
kwargs: Mapping[str, Any] | None = None,
) -> None: ...
def cancel(self) -> None: ...
class Barrier:
@property
def parties(self) -> int: ...
@property
def n_waiting(self) -> int: ...
@property
def broken(self) -> bool: ...
def __init__(self, parties: int, action: Callable[[], None] | None = None, timeout: float | None = None) -> None: ...
def wait(self, timeout: float | None = None) -> int: ...
def reset(self) -> None: ...
def abort(self) -> None: ...
class BrokenBarrierError(RuntimeError): ...

View File

@@ -1,11 +0,0 @@
from typing import Any, Final, TypeVar
_T = TypeVar("_T")
__about__: Final[str]
def heapify(heap: list[Any], /) -> None: ...
def heappop(heap: list[_T], /) -> _T: ...
def heappush(heap: list[_T], item: _T, /) -> None: ...
def heappushpop(heap: list[_T], item: _T, /) -> _T: ...
def heapreplace(heap: list[_T], item: _T, /) -> _T: ...

View File

@@ -1,28 +0,0 @@
import sys
import types
from _typeshed import ReadableBuffer
from importlib.machinery import ModuleSpec
from typing import Any
check_hash_based_pycs: str
def source_hash(key: int, source: ReadableBuffer) -> bytes: ...
def create_builtin(spec: ModuleSpec, /) -> types.ModuleType: ...
def create_dynamic(spec: ModuleSpec, file: Any = None, /) -> types.ModuleType: ...
def acquire_lock() -> None: ...
def exec_builtin(mod: types.ModuleType, /) -> int: ...
def exec_dynamic(mod: types.ModuleType, /) -> int: ...
def extension_suffixes() -> list[str]: ...
def init_frozen(name: str, /) -> types.ModuleType: ...
def is_builtin(name: str, /) -> int: ...
def is_frozen(name: str, /) -> bool: ...
def is_frozen_package(name: str, /) -> bool: ...
def lock_held() -> bool: ...
def release_lock() -> None: ...
if sys.version_info >= (3, 11):
def find_frozen(name: str, /, *, withdata: bool = False) -> tuple[memoryview | None, bool, str | None] | None: ...
def get_frozen_object(name: str, data: ReadableBuffer | None = None, /) -> types.CodeType: ...
else:
def get_frozen_object(name: str, /) -> types.CodeType: ...

View File

@@ -1,86 +0,0 @@
from _typeshed import structseq
from typing import Any, Final, Literal, SupportsIndex, final
from typing_extensions import Buffer, Self
class ChannelError(RuntimeError): ...
class ChannelClosedError(ChannelError): ...
class ChannelEmptyError(ChannelError): ...
class ChannelNotEmptyError(ChannelError): ...
class ChannelNotFoundError(ChannelError): ...
# Mark as final, since instantiating ChannelID is not supported.
@final
class ChannelID:
@property
def end(self) -> Literal["send", "recv", "both"]: ...
@property
def send(self) -> Self: ...
@property
def recv(self) -> Self: ...
def __eq__(self, other: object) -> bool: ...
def __ge__(self, other: ChannelID) -> bool: ...
def __gt__(self, other: ChannelID) -> bool: ...
def __hash__(self) -> int: ...
def __index__(self) -> int: ...
def __int__(self) -> int: ...
def __le__(self, other: ChannelID) -> bool: ...
def __lt__(self, other: ChannelID) -> bool: ...
def __ne__(self, other: object) -> bool: ...
@final
class ChannelInfo(structseq[int], tuple[bool, bool, bool, int, int, int, int, int]):
__match_args__: Final = (
"open",
"closing",
"closed",
"count",
"num_interp_send",
"num_interp_send_released",
"num_interp_recv",
"num_interp_recv_released",
)
@property
def open(self) -> bool: ...
@property
def closing(self) -> bool: ...
@property
def closed(self) -> bool: ...
@property
def count(self) -> int: ... # type: ignore[override]
@property
def num_interp_send(self) -> int: ...
@property
def num_interp_send_released(self) -> int: ...
@property
def num_interp_recv(self) -> int: ...
@property
def num_interp_recv_released(self) -> int: ...
@property
def num_interp_both(self) -> int: ...
@property
def num_interp_both_recv_released(self) -> int: ...
@property
def num_interp_both_send_released(self) -> int: ...
@property
def num_interp_both_released(self) -> int: ...
@property
def recv_associated(self) -> bool: ...
@property
def recv_released(self) -> bool: ...
@property
def send_associated(self) -> bool: ...
@property
def send_released(self) -> bool: ...
def create(unboundop: Literal[1, 2, 3]) -> ChannelID: ...
def destroy(cid: SupportsIndex) -> None: ...
def list_all() -> list[ChannelID]: ...
def list_interpreters(cid: SupportsIndex, *, send: bool) -> list[int]: ...
def send(cid: SupportsIndex, obj: object, *, blocking: bool = True, timeout: float | None = None) -> None: ...
def send_buffer(cid: SupportsIndex, obj: Buffer, *, blocking: bool = True, timeout: float | None = None) -> None: ...
def recv(cid: SupportsIndex, default: object = ...) -> tuple[Any, Literal[1, 2, 3]]: ...
def close(cid: SupportsIndex, *, send: bool = False, recv: bool = False) -> None: ...
def get_count(cid: SupportsIndex) -> int: ...
def get_info(cid: SupportsIndex) -> ChannelInfo: ...
def get_channel_defaults(cid: SupportsIndex) -> Literal[1, 2, 3]: ...
def release(cid: SupportsIndex, *, send: bool = False, recv: bool = False, force: bool = False) -> None: ...

Some files were not shown because too many files have changed in this diff Show More