Compare commits

..

3 Commits

Author SHA1 Message Date
Micha Reiser
f6b2544993 Add rule registry and rule selection 2024-12-09 14:10:40 +01:00
Micha Reiser
fe78d50560 Add declare_lint 2024-12-09 14:08:19 +01:00
Micha Reiser
b39def2915 Introduce DiagnosticId 2024-12-09 14:05:09 +01:00
3094 changed files with 49580 additions and 124250 deletions

View File

@@ -8,7 +8,3 @@ benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.'wasm32-unknown-unknown']
# See https://docs.rs/getrandom/latest/getrandom/#webassembly-support
rustflags = ["--cfg", 'getrandom_backend="wasm_js"']

View File

@@ -6,10 +6,3 @@ failure-output = "immediate-final"
fail-fast = false
status-level = "skip"
# Mark tests that take longer than 1s as slow.
# Terminate after 60s as a stop-gap measure to terminate on deadlock.
slow-timeout = { period = "1s", terminate-after = 60 }
# Show slow jobs in the final summary
final-status-level = "slow"

4
.gitattributes vendored
View File

@@ -14,7 +14,5 @@ crates/ruff_python_parser/resources/invalid/re_lex_logical_token_mac_eol.py text
crates/ruff_python_parser/resources/inline linguist-generated=true
ruff.schema.json -diff linguist-generated=true text=auto eol=lf
crates/ruff_python_ast/src/generated.rs -diff linguist-generated=true text=auto eol=lf
crates/ruff_python_formatter/src/generated.rs -diff linguist-generated=true text=auto eol=lf
ruff.schema.json linguist-generated=true text=auto eol=lf
*.md.snap linguist-language=Markdown

1
.github/CODEOWNERS vendored
View File

@@ -9,7 +9,6 @@
/crates/ruff_formatter/ @MichaReiser
/crates/ruff_python_formatter/ @MichaReiser
/crates/ruff_python_parser/ @MichaReiser @dhruvmanila
/crates/ruff_annotate_snippets/ @BurntSushi
# flake8-pyi
/crates/ruff_linter/src/rules/flake8_pyi/ @AlexWaygood

12
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,12 @@
<!--
Thank you for taking the time to report an issue! We're glad to have you involved with Ruff.
If you're filing a bug report, please consider including the following information:
* List of keywords you searched for before creating this issue. Write them down here so that others can find this issue more easily and help provide feedback.
e.g. "RUF001", "unused variable", "Jupyter notebook"
* A minimal code snippet that reproduces the bug.
* The command you invoked (e.g., `ruff /path/to/file.py --fix`), ideally including the `--isolated` flag.
* The current Ruff settings (any relevant sections from your `pyproject.toml`).
* The current Ruff version (`ruff --version`).
-->

View File

@@ -1,2 +0,0 @@
# This file cannot use the extension `.yaml`.
blank_issues_enabled: false

View File

@@ -1,22 +0,0 @@
name: New issue
description: A generic issue
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to report an issue! We're glad to have you involved with Ruff.
If you're filing a bug report, please consider including the following information:
* List of keywords you searched for before creating this issue. Write them down here so that others can find this issue more easily and help provide feedback.
e.g. "RUF001", "unused variable", "Jupyter notebook"
* A minimal code snippet that reproduces the bug.
* The command you invoked (e.g., `ruff /path/to/file.py --fix`), ideally including the `--isolated` flag.
* The current Ruff settings (any relevant sections from your `pyproject.toml`).
* The current Ruff version (`ruff --version`).
- type: textarea
attributes:
label: Description
description: A description of the issue

View File

@@ -1,10 +0,0 @@
# Configuration for the actionlint tool, which we run via pre-commit
# to verify the correctness of the syntax in our GitHub Actions workflows.
self-hosted-runner:
# Various runners we use that aren't recognized out-of-the-box by actionlint:
labels:
- depot-ubuntu-latest-8
- depot-ubuntu-22.04-16
- github-windows-2025-x86_64-8
- github-windows-2025-x86_64-16

View File

@@ -45,7 +45,7 @@
groupName: "Artifact GitHub Actions dependencies",
matchManagers: ["github-actions"],
matchDatasources: ["gitea-tags", "github-tags"],
matchPackageNames: ["actions/.*-artifact"],
matchPackagePatterns: ["actions/.*-artifact"],
description: "Weekly update of artifact-related GitHub Actions dependencies",
},
{
@@ -58,16 +58,10 @@
description: "Disable PRs updating GitHub runners (e.g. 'runs-on: macos-14')",
enabled: false,
},
{
// TODO: Remove this once the codebase is upgrade to v4 (https://github.com/astral-sh/ruff/pull/16069)
matchPackageNames: ["tailwindcss"],
matchManagers: ["npm"],
enabled: false,
},
{
// Disable updates of `zip-rs`; intentionally pinned for now due to ownership change
// See: https://github.com/astral-sh/uv/issues/3642
matchPackageNames: ["zip"],
matchPackagePatterns: ["zip"],
matchManagers: ["cargo"],
enabled: false,
},
@@ -76,7 +70,7 @@
// with `mkdocs-material-insider`.
// See: https://squidfunk.github.io/mkdocs-material/insiders/upgrade/
matchManagers: ["pip_requirements"],
matchPackageNames: ["mkdocs-material"],
matchPackagePatterns: ["mkdocs-material"],
enabled: false,
},
{
@@ -93,13 +87,13 @@
{
groupName: "Monaco",
matchManagers: ["npm"],
matchPackageNames: ["monaco"],
matchPackagePatterns: ["monaco"],
description: "Weekly update of the Monaco editor",
},
{
groupName: "strum",
matchManagers: ["cargo"],
matchPackageNames: ["strum"],
matchPackagePatterns: ["strum"],
description: "Weekly update of strum dependencies",
},
{

View File

@@ -23,8 +23,6 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions: {}
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
@@ -55,9 +53,9 @@ jobs:
args: --out dist
- name: "Test sdist"
run: |
pip install dist/"${PACKAGE_NAME}"-*.tar.gz --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload sdist"
uses: actions/upload-artifact@v4
with:
@@ -127,7 +125,7 @@ jobs:
args: --release --locked --out dist
- name: "Test wheel - aarch64"
run: |
pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
@@ -188,9 +186,9 @@ jobs:
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
@@ -238,9 +236,9 @@ jobs:
- name: "Test wheel"
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/"${PACKAGE_NAME}"-*.whl --force-reinstall
"${MODULE_NAME}" --help
python -m "${MODULE_NAME}" --help
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:

View File

@@ -48,13 +48,11 @@ jobs:
- name: Check tag consistency
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
env:
TAG: ${{ inputs.plan != '' && fromJson(inputs.plan).announcement_tag || 'dry-run' }}
run: |
version=$(grep -m 1 "^version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${TAG}" != "${version}" ]; then
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ fromJson(inputs.plan).announcement_tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${TAG}" >&2
echo "${{ fromJson(inputs.plan).announcement_tag }}" >&2
echo "${version}" >&2
exit 1
else
@@ -74,7 +72,7 @@ jobs:
- name: Normalize Platform Pair (replace / with -)
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_TUPLE=${platform//\//-}" >> "$GITHUB_ENV"
echo "PLATFORM_TUPLE=${platform//\//-}" >> $GITHUB_ENV
# Adapted from https://docs.docker.com/build/ci/github-actions/multi-platform/
- name: Build and push by digest
@@ -89,10 +87,9 @@ jobs:
outputs: type=image,name=${{ env.RUFF_BASE_IMG }},push-by-digest=true,name-canonical=true,push=${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
- name: Export digests
env:
digest: ${{ steps.build.outputs.digest }}
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digests
@@ -144,10 +141,9 @@ jobs:
# The printf will expand the base image with the `<RUFF_BASE_IMG>@sha256:<sha256> ...` for each sha256 in the directory
# The final command becomes `docker buildx imagetools create -t tag1 -t tag2 ... <RUFF_BASE_IMG>@sha256:<sha256_1> <RUFF_BASE_IMG>@sha256:<sha256_2> ...`
run: |
# shellcheck disable=SC2046
docker buildx imagetools create \
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "${RUFF_BASE_IMG}@sha256:%s " *)
$(printf '${{ env.RUFF_BASE_IMG }}@sha256:%s ' *)
docker-publish-extra:
name: Publish additional Docker image based on ${{ matrix.image-mapping }}
@@ -177,8 +173,6 @@ jobs:
- name: Generate Dynamic Dockerfile Tags
shell: bash
env:
TAG_VALUE: ${{ fromJson(inputs.plan).announcement_tag }}
run: |
set -euo pipefail
@@ -188,7 +182,7 @@ jobs:
# Generate Dockerfile content
cat <<EOF > Dockerfile
FROM ${BASE_IMAGE}
COPY --from=${RUFF_BASE_IMG}:latest /ruff /usr/local/bin/ruff
COPY --from=${{ env.RUFF_BASE_IMG }}:latest /ruff /usr/local/bin/ruff
ENTRYPOINT []
CMD ["/usr/local/bin/ruff"]
EOF
@@ -199,8 +193,8 @@ jobs:
# Loop through all base tags and append its docker metadata pattern to the list
# Order is on purpose such that the label org.opencontainers.image.version has the first pattern with the full version
IFS=','; for TAG in ${BASE_TAGS}; do
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ version }},suffix=-${TAG},value=${TAG_VALUE}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ major }}.{{ minor }},suffix=-${TAG},value=${TAG_VALUE}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ version }},suffix=-${TAG},value=${{ fromJson(inputs.plan).announcement_tag }}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=pep440,pattern={{ major }}.{{ minor }},suffix=-${TAG},value=${{ fromJson(inputs.plan).announcement_tag }}\n"
TAG_PATTERNS="${TAG_PATTERNS}type=raw,value=${TAG}\n"
done
@@ -208,14 +202,14 @@ jobs:
TAG_PATTERNS="${TAG_PATTERNS%\\n}"
# Export image cache name
echo "IMAGE_REF=${BASE_IMAGE//:/-}" >> "$GITHUB_ENV"
echo "IMAGE_REF=${BASE_IMAGE//:/-}" >> $GITHUB_ENV
# Export tag patterns using the multiline env var syntax
{
echo "TAG_PATTERNS<<EOF"
echo -e "${TAG_PATTERNS}"
echo EOF
} >> "$GITHUB_ENV"
} >> $GITHUB_ENV
- name: Extract metadata (tags, labels) for Docker
id: meta
@@ -291,9 +285,7 @@ jobs:
# The final command becomes `docker buildx imagetools create -t tag1 -t tag2 ... <RUFF_BASE_IMG>@sha256:<sha256_1> <RUFF_BASE_IMG>@sha256:<sha256_2> ...`
run: |
readarray -t lines <<< "$DOCKER_METADATA_OUTPUT_ANNOTATIONS"; annotations=(); for line in "${lines[@]}"; do annotations+=(--annotation "$line"); done
# shellcheck disable=SC2046
docker buildx imagetools create \
"${annotations[@]}" \
$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "${RUFF_BASE_IMG}@sha256:%s " *)
$(printf '${{ env.RUFF_BASE_IMG }}@sha256:%s ' *)

View File

@@ -1,7 +1,5 @@
name: CI
permissions: {}
on:
push:
branches: [main]
@@ -61,7 +59,6 @@ jobs:
- Cargo.toml
- Cargo.lock
- crates/**
- "!crates/red_knot*/**"
- "!crates/ruff_python_formatter/**"
- "!crates/ruff_formatter/**"
- "!crates/ruff_dev/**"
@@ -119,11 +116,11 @@ jobs:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: |
rustup component add clippy
rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features --locked -- -D warnings
- name: "Clippy (wasm)"
@@ -133,13 +130,12 @@ jobs:
name: "cargo test (linux)"
runs-on: depot-ubuntu-22.04-16
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
@@ -152,6 +148,7 @@ jobs:
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
@@ -179,13 +176,12 @@ jobs:
name: "cargo test (linux, release)"
runs-on: depot-ubuntu-22.04-16
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
@@ -198,6 +194,7 @@ jobs:
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
@@ -206,25 +203,24 @@ jobs:
cargo-test-windows:
name: "cargo test (windows)"
runs-on: github-windows-2025-x86_64-16
runs-on: windows-latest-xlarge
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo nextest"
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
# Workaround for <https://github.com/nextest-rs/nextest/issues/1493>.
RUSTUP_WINDOWS_PATH_ADD_BIN: 1
run: |
@@ -235,13 +231,12 @@ jobs:
name: "cargo test (wasm)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@v4
@@ -252,6 +247,7 @@ jobs:
- uses: jetli/wasm-pack-action@v0.4.0
with:
version: v0.13.1
- uses: Swatinem/rust-cache@v2
- name: "Test ruff_wasm"
run: |
cd crates/ruff_wasm
@@ -270,19 +266,19 @@ jobs:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: "Build"
run: cargo build --release --locked
cargo-build-msrv:
name: "cargo build (msrv)"
runs-on: depot-ubuntu-latest-8
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
@@ -293,11 +289,8 @@ jobs:
with:
file: "Cargo.toml"
field: "workspace.package.rust-version"
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
env:
MSRV: ${{ steps.msrv.outputs.value }}
run: rustup default "${MSRV}"
run: rustup default ${{ steps.msrv.outputs.value }}
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
@@ -308,28 +301,28 @@ jobs:
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
MSRV: ${{ steps.msrv.outputs.value }}
run: cargo "+${MSRV}" insta test --all-features --unreferenced reject --test-runner nextest
run: cargo +${{ steps.msrv.outputs.value }} insta test --all-features --unreferenced reject --test-runner nextest
cargo-fuzz-build:
name: "cargo fuzz build"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ github.ref == 'refs/heads/main' || needs.determine_changes.outputs.fuzz == 'true' || needs.determine_changes.outputs.code == 'true' }}
if: ${{ github.ref == 'refs/heads/main' || needs.determine_changes.outputs.fuzz == 'true' }}
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@main
with:
@@ -345,7 +338,7 @@ jobs:
needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && needs.determine_changes.outputs.parser == 'true' }}
if: ${{ needs.determine_changes.outputs.parser == 'true' }}
timeout-minutes: 20
env:
FORCE_COLOR: 1
@@ -353,7 +346,7 @@ jobs:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: astral-sh/setup-uv@v5
- uses: astral-sh/setup-uv@v4
- uses: actions/download-artifact@v4
name: Download Ruff binary to test
id: download-cached-binary
@@ -361,18 +354,16 @@ jobs:
name: ruff
path: ruff-to-test
- name: Fuzz
env:
DOWNLOAD_PATH: ${{ steps.download-cached-binary.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x "${DOWNLOAD_PATH}/ruff"
chmod +x ${{ steps.download-cached-binary.outputs.download-path }}/ruff
(
uvx \
--python="${PYTHON_VERSION}" \
--python=${{ env.PYTHON_VERSION }} \
--from=./python/py-fuzzer \
fuzz \
--test-executable="${DOWNLOAD_PATH}/ruff" \
--test-executable=${{ steps.download-cached-binary.outputs.download-path }}/ruff \
--bin=ruff \
0-500
)
@@ -381,22 +372,16 @@ jobs:
name: "test scripts"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup component add rustfmt
# Run all code generation scripts, and verify that the current output is
# already checked into git.
- run: python crates/ruff_python_ast/generate.py
- run: python crates/ruff_python_formatter/generate.py
- run: test -z "$(git status --porcelain)"
# Verify that adding a plugin or rule produces clean code.
- run: ./scripts/add_rule.py --name DoTheThing --prefix F --code 999 --linter pyflakes
- uses: Swatinem/rust-cache@v2
- run: ./scripts/add_rule.py --name DoTheThing --prefix PL --code C0999 --linter pylint
- run: cargo check
- run: cargo fmt --all --check
- run: |
@@ -413,7 +398,7 @@ jobs:
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
if: ${{ github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
@@ -430,7 +415,7 @@ jobs:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v8
- uses: dawidd6/action-download-artifact@v7
name: Download baseline Ruff binary
with:
name: ruff
@@ -444,72 +429,64 @@ jobs:
- name: Run `ruff check` stable ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-check-stable > $GITHUB_STEP_SUMMARY
echo "### Linter (stable)" > ecosystem-result
cat ecosystem-result-check-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff check` preview ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-check-preview > $GITHUB_STEP_SUMMARY
echo "### Linter (preview)" >> ecosystem-result
cat ecosystem-result-check-preview >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` stable ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-format-stable > $GITHUB_STEP_SUMMARY
echo "### Formatter (stable)" >> ecosystem-result
cat ecosystem-result-format-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` preview ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY"
cat ecosystem-result-format-preview > $GITHUB_STEP_SUMMARY
echo "### Formatter (preview)" >> ecosystem-result
cat ecosystem-result-format-preview >> ecosystem-result
echo "" >> ecosystem-result
@@ -547,7 +524,6 @@ jobs:
name: "python package"
runs-on: ubuntu-latest
timeout-minutes: 20
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') }}
steps:
- uses: actions/checkout@v4
with:
@@ -565,7 +541,7 @@ jobs:
args: --out dist
- name: "Test wheel"
run: |
pip install --force-reinstall --find-links dist "${PACKAGE_NAME}"
pip install --force-reinstall --find-links dist ${{ env.PACKAGE_NAME }}
ruff --help
python -m ruff --help
- name: "Remove wheels from cache"
@@ -582,9 +558,9 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install pre-commit"
run: pip install pre-commit
- name: "Cache pre-commit"
@@ -594,14 +570,13 @@ jobs:
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: "Run pre-commit"
run: |
echo '```console' > "$GITHUB_STEP_SUMMARY"
echo '```console' > $GITHUB_STEP_SUMMARY
# Enable color output for pre-commit and remove it for the summary
# Use --hook-stage=manual to enable slower pre-commit hooks that are skipped by default
SKIP=cargo-fmt,clippy,dev-generate-all pre-commit run --all-files --show-diff-on-failure --color=always --hook-stage=manual | \
tee >(sed -E 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[mGK]//g' >> "$GITHUB_STEP_SUMMARY") >&1
exit_code="${PIPESTATUS[0]}"
echo '```' >> "$GITHUB_STEP_SUMMARY"
exit "$exit_code"
SKIP=cargo-fmt,clippy,dev-generate-all pre-commit run --all-files --show-diff-on-failure --color=always | \
tee >(sed -E 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})*)?[mGK]//g' >> $GITHUB_STEP_SUMMARY) >&1
exit_code=${PIPESTATUS[0]}
echo '```' >> $GITHUB_STEP_SUMMARY
exit $exit_code
docs:
name: "mkdocs"
@@ -616,7 +591,6 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: "3.13"
- uses: Swatinem/rust-cache@v2
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
@@ -625,7 +599,8 @@ jobs:
- name: "Install Rust toolchain"
run: rustup show
- name: Install uv
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@v4
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: uv pip install -r docs/requirements-insiders.txt --system
@@ -649,19 +624,20 @@ jobs:
name: "formatter instabilities and black similarity"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main') }}
if: needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main'
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
- name: "Cache rust"
uses: Swatinem/rust-cache@v2
- name: "Run checks"
run: scripts/formatter_ecosystem_checks.sh
- name: "Github step summary"
run: cat target/formatter-ecosystem/stats.txt > "$GITHUB_STEP_SUMMARY"
run: cat target/formatter-ecosystem/stats.txt > $GITHUB_STEP_SUMMARY
- name: "Remove checkouts from cache"
run: rm -r target/formatter-ecosystem
@@ -672,7 +648,7 @@ jobs:
needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
steps:
- uses: extractions/setup-just@v2
env:
@@ -700,13 +676,11 @@ jobs:
just install
- name: Run ruff-lsp tests
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: |
# Setup development binary
pip uninstall --yes ruff
chmod +x "${DOWNLOAD_PATH}/ruff"
export PATH="${DOWNLOAD_PATH}:${PATH}"
chmod +x ${{ steps.ruff-target.outputs.download-path }}/ruff
export PATH=${{ steps.ruff-target.outputs.download-path }}:$PATH
ruff version
just test
@@ -714,7 +688,7 @@ jobs:
benchmarks:
runs-on: ubuntu-22.04
needs: determine_changes
if: ${{ github.repository == 'astral-sh/ruff' && !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
if: ${{ github.repository == 'astral-sh/ruff' && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
steps:
- name: "Checkout Branch"
@@ -722,8 +696,6 @@ jobs:
with:
persist-credentials: false
- uses: Swatinem/rust-cache@v2
- name: "Install Rust toolchain"
run: rustup show
@@ -732,6 +704,8 @@ jobs:
with:
tool: cargo-codspeed
- uses: Swatinem/rust-cache@v2
- name: "Build benchmarks"
run: cargo codspeed build --features codspeed -p ruff_benchmark

View File

@@ -34,7 +34,7 @@ jobs:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: astral-sh/setup-uv@v5
- uses: astral-sh/setup-uv@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
@@ -46,7 +46,6 @@ jobs:
run: cargo build --locked
- name: Fuzz
run: |
# shellcheck disable=SC2046
(
uvx \
--python=3.12 \
@@ -73,6 +72,6 @@ jobs:
owner: "astral-sh",
repo: "ruff",
title: `Daily parser fuzz failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
body: "Runs listed here: https://github.com/astral-sh/ruff/actions/workflows/daily_fuzz.yml",
labels: ["bug", "parser", "fuzzer"],
})

View File

@@ -1,71 +0,0 @@
name: Daily property test run
on:
workflow_dispatch:
schedule:
- cron: "0 12 * * *"
pull_request:
paths:
- ".github/workflows/daily_property_tests.yaml"
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
FORCE_COLOR: 1
jobs:
property_tests:
name: Property tests
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: Build Red Knot
# A release build takes longer (2 min vs 1 min), but the property tests run much faster in release
# mode (1.5 min vs 14 min), so the overall time is shorter with a release build.
run: cargo build --locked --release --package red_knot_python_semantic --tests
- name: Run property tests
shell: bash
run: |
export QUICKCHECK_TESTS=100000
for _ in {1..5}; do
cargo test --locked --release --package red_knot_python_semantic -- --ignored types::property_tests::stable
done
create-issue-on-failure:
name: Create an issue if the daily property test run surfaced any bugs
runs-on: ubuntu-latest
needs: property_tests
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.property_tests.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Daily property test run failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "red-knot", "testing"],
})

View File

@@ -10,13 +10,14 @@ on:
description: The ecosystem workflow that triggers the workflow run
required: true
permissions:
pull-requests: write
jobs:
comment:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@v8
- uses: dawidd6/action-download-artifact@v7
name: Download pull request number
with:
name: pr-number
@@ -29,10 +30,10 @@ jobs:
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@v8
- uses: dawidd6/action-download-artifact@v7
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
@@ -65,9 +66,9 @@ jobs:
cat pr/ecosystem/ecosystem-result >> comment.txt
echo "" >> comment.txt
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
echo 'comment<<EOF' >> $GITHUB_OUTPUT
cat comment.txt >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
- name: Find existing comment
uses: peter-evans/find-comment@v3

View File

@@ -33,9 +33,8 @@ jobs:
python-version: 3.12
- name: "Set docs version"
env:
version: ${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}
run: |
version="${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}"
# if version is missing, use 'latest'
if [ -z "$version" ]; then
echo "Using 'latest' as version"
@@ -45,19 +44,21 @@ jobs:
# Use version as display name for now
display_name="$version"
echo "version=$version" >> "$GITHUB_ENV"
echo "display_name=$display_name" >> "$GITHUB_ENV"
echo "version=$version" >> $GITHUB_ENV
echo "display_name=$display_name" >> $GITHUB_ENV
- name: "Set branch name"
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
timestamp="$(date +%s)"
# create branch_display_name from display_name by replacing all
# characters disallowed in git branch names with hyphens
branch_display_name="$(echo "${display_name}" | tr -c '[:alnum:]._' '-' | tr -s '-')"
branch_display_name="$(echo "$display_name" | tr -c '[:alnum:]._' '-' | tr -s '-')"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> "$GITHUB_ENV"
echo "timestamp=$timestamp" >> "$GITHUB_ENV"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> $GITHUB_ENV
echo "timestamp=$timestamp" >> $GITHUB_ENV
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
@@ -92,7 +93,9 @@ jobs:
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo"
run: git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
run: |
version="${{ env.version }}"
git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
- name: "Copy docs"
run: rm -rf astral-docs/site/ruff && mkdir -p astral-docs/site && cp -r site/ruff astral-docs/site/
@@ -100,10 +103,12 @@ jobs:
- name: "Commit docs"
working-directory: astral-docs
run: |
branch_name="${{ env.branch_name }}"
git config user.name "astral-docs-bot"
git config user.email "176161322+astral-docs-bot@users.noreply.github.com"
git checkout -b "${branch_name}"
git checkout -b $branch_name
git add site/ruff
git commit -m "Update ruff documentation for $version"
@@ -112,8 +117,12 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
branch_name="${{ env.branch_name }}"
# set the PR title
pull_request_title="Update ruff documentation for ${display_name}"
pull_request_title="Update ruff documentation for $display_name"
# Delete any existing pull requests that are open for this version
# by checking against pull_request_title because the new PR will
@@ -122,15 +131,13 @@ jobs:
xargs -I {} gh pr close {}
# push the branch to GitHub
git push origin "${branch_name}"
git push origin $branch_name
# create the PR
gh pr create \
--base=main \
--head="${branch_name}" \
--title="${pull_request_title}" \
--body="Automated documentation update for ${display_name}" \
--label="documentation"
gh pr create --base main --head $branch_name \
--title "$pull_request_title" \
--body "Automated documentation update for $display_name" \
--label "documentation"
- name: "Merge Pull Request"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
@@ -138,7 +145,9 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
branch_name="${{ env.branch_name }}"
# auto-merge the PR if the build was triggered by a release. Manual builds should be reviewed by a human.
# give the PR a few seconds to be created before trying to auto-merge it
sleep 10
gh pr merge --squash "${branch_name}"
gh pr merge --squash $branch_name

View File

@@ -49,7 +49,7 @@ jobs:
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.13.1
uses: cloudflare/wrangler-action@v3.13.0
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}

View File

@@ -22,7 +22,7 @@ jobs:
id-token: write
steps:
- name: "Install uv"
uses: astral-sh/setup-uv@v5
uses: astral-sh/setup-uv@v4
- uses: actions/download-artifact@v4
with:
pattern: wheels-*

View File

@@ -31,7 +31,7 @@ jobs:
with:
repository: python/typeshed
path: typeshed
persist-credentials: false
persist-credentials: true
- name: Setup git
run: |
git config --global user.name typeshedbot
@@ -59,7 +59,7 @@ jobs:
run: |
cd ruff
git push --force origin typeshedbot/sync-typeshed
gh pr list --repo "$GITHUB_REPOSITORY" --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr list --repo $GITHUB_REPOSITORY --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
create-issue-on-failure:
@@ -78,6 +78,5 @@ jobs:
owner: "astral-sh",
repo: "ruff",
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
body: "Run listed here: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
labels: ["bug", "red-knot"],
body: "Runs are listed here: https://github.com/astral-sh/ruff/actions/workflows/sync_typeshed.yaml",
})

19
.github/zizmor.yml vendored
View File

@@ -1,19 +0,0 @@
# Configuration for the zizmor static analysis tool, run via pre-commit in CI
# https://woodruffw.github.io/zizmor/configuration/
#
# TODO: can we remove the ignores here so that our workflows are more secure?
rules:
dangerous-triggers:
ignore:
- pr-comment.yaml
cache-poisoning:
ignore:
- build-docker.yml
- publish-playground.yml
excessive-permissions:
# it's hard to test what the impact of removing these ignores would be
# without actually running the release workflow...
ignore:
- build-docker.yml
- publish-playground.yml
- publish-docs.yml

4
.gitignore vendored
View File

@@ -29,10 +29,6 @@ tracing.folded
tracing-flamechart.svg
tracing-flamegraph.svg
# insta
*.rs.pending-snap
###
# Rust.gitignore
###

View File

@@ -21,11 +21,3 @@ MD014: false
MD024:
# Allow when nested under different parents e.g. CHANGELOG.md
siblings_only: true
# MD046/code-block-style
#
# Ignore this because it conflicts with the code block style used in content
# tabs of mkdocs-material which is to add a blank line after the content title.
#
# Ref: https://github.com/astral-sh/ruff/pull/15011#issuecomment-2544790854
MD046: false

View File

@@ -2,10 +2,8 @@ fail_fast: false
exclude: |
(?x)^(
.github/workflows/release.yml|
crates/red_knot_vendored/vendor/.*|
crates/red_knot_project/resources/.*|
crates/ruff_benchmark/resources/.*|
crates/red_knot_workspace/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff_notebook/resources/.*|
@@ -24,12 +22,13 @@ repos:
- id: validate-pyproject
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.22
rev: 0.7.19
hooks:
- id: mdformat
additional_dependencies:
- mdformat-mkdocs==4.0.0
- mdformat-footnote==0.1.1
- mdformat-mkdocs
- mdformat-admon
- mdformat-footnote
exclude: |
(?x)^(
docs/formatter/black\.md
@@ -37,7 +36,7 @@ repos:
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.44.0
rev: v0.43.0
hooks:
- id: markdownlint-fix
exclude: |
@@ -57,10 +56,10 @@ repos:
.*?invalid(_.+)*_syntax\.md
)$
additional_dependencies:
- black==25.1.0
- black==24.10.0
- repo: https://github.com/crate-ci/typos
rev: v1.29.5
rev: v1.28.2
hooks:
- id: typos
@@ -74,7 +73,7 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.5
rev: v0.8.2
hooks:
- id: ruff-format
- id: ruff
@@ -89,37 +88,20 @@ repos:
- id: prettier
types: [yaml]
# zizmor detects security vulnerabilities in GitHub Actions workflows.
# Additional configuration for the tool is found in `.github/zizmor.yml`
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.3.1
rev: v0.8.0
hooks:
- id: zizmor
# `release.yml` is autogenerated by `dist`; security issues need to be fixed there
# (https://opensource.axo.dev/cargo-dist/)
exclude: .github/workflows/release.yml
# We could consider enabling the low-severity warnings, but they're noisy
args: [--min-severity=medium]
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.31.1
rev: 0.30.0
hooks:
- id: check-github-workflows
# `actionlint` hook, for verifying correct syntax in GitHub Actions workflows.
# Some additional configuration for `actionlint` can be found in `.github/actionlint.yaml`.
- repo: https://github.com/rhysd/actionlint
rev: v1.7.7
hooks:
- id: actionlint
stages:
# This hook is disabled by default, since it's quite slow.
# To run all hooks *including* this hook, use `uvx pre-commit run -a --hook-stage=manual`.
# To run *just* this hook, use `uvx pre-commit run -a actionlint --hook-stage=manual`.
- manual
args:
- "-ignore=SC2129" # ignorable stylistic lint from shellcheck
- "-ignore=SC2016" # another shellcheck lint: seems to have false positives?
additional_dependencies:
# actionlint has a shellcheck integration which extracts shell scripts in `run:` steps from GitHub Actions
# and checks these with shellcheck. This is arguably its most useful feature,
# but the integration only works if shellcheck is installed
- "github.com/wasilibs/go-shellcheck/cmd/shellcheck@v0.10.0"
ci:
skip: [cargo-fmt, dev-generate-all]

View File

@@ -1,9 +1,5 @@
# Breaking Changes
## 0.9.0
Ruff now formats your code according to the 2025 style guide. As a result, your code might now get formatted differently. See the [changelog](./CHANGELOG.md#090) for a detailed list of changes.
## 0.8.0
- **Default to Python 3.9**

View File

@@ -1,477 +1,5 @@
# Changelog
## 0.9.6
### Preview features
- \[`airflow`\] Add `external_task.{ExternalTaskMarker, ExternalTaskSensor}` for `AIR302` ([#16014](https://github.com/astral-sh/ruff/pull/16014))
- \[`flake8-builtins`\] Make strict module name comparison optional (`A005`) ([#15951](https://github.com/astral-sh/ruff/pull/15951))
- \[`flake8-pyi`\] Extend fix to Python \<= 3.9 for `redundant-none-literal` (`PYI061`) ([#16044](https://github.com/astral-sh/ruff/pull/16044))
- \[`pylint`\] Also report when the object isn't a literal (`PLE1310`) ([#15985](https://github.com/astral-sh/ruff/pull/15985))
- \[`ruff`\] Implement `indented-form-feed` (`RUF054`) ([#16049](https://github.com/astral-sh/ruff/pull/16049))
- \[`ruff`\] Skip type definitions for `missing-f-string-syntax` (`RUF027`) ([#16054](https://github.com/astral-sh/ruff/pull/16054))
### Rule changes
- \[`flake8-annotations`\] Correct syntax for `typing.Union` in suggested return type fixes for `ANN20x` rules ([#16025](https://github.com/astral-sh/ruff/pull/16025))
- \[`flake8-builtins`\] Match upstream module name comparison (`A005`) ([#16006](https://github.com/astral-sh/ruff/pull/16006))
- \[`flake8-comprehensions`\] Detect overshadowed `list`/`set`/`dict`, ignore variadics and named expressions (`C417`) ([#15955](https://github.com/astral-sh/ruff/pull/15955))
- \[`flake8-pie`\] Remove following comma correctly when the unpacked dictionary is empty (`PIE800`) ([#16008](https://github.com/astral-sh/ruff/pull/16008))
- \[`flake8-simplify`\] Only trigger `SIM401` on known dictionaries ([#15995](https://github.com/astral-sh/ruff/pull/15995))
- \[`pylint`\] Do not report calls when object type and argument type mismatch, remove custom escape handling logic (`PLE1310`) ([#15984](https://github.com/astral-sh/ruff/pull/15984))
- \[`pyupgrade`\] Comments within parenthesized value ranges should not affect applicability (`UP040`) ([#16027](https://github.com/astral-sh/ruff/pull/16027))
- \[`pyupgrade`\] Don't introduce invalid syntax when upgrading old-style type aliases with parenthesized multiline values (`UP040`) ([#16026](https://github.com/astral-sh/ruff/pull/16026))
- \[`pyupgrade`\] Ensure we do not rename two type parameters to the same name (`UP049`) ([#16038](https://github.com/astral-sh/ruff/pull/16038))
- \[`pyupgrade`\] \[`ruff`\] Don't apply renamings if the new name is shadowed in a scope of one of the references to the binding (`UP049`, `RUF052`) ([#16032](https://github.com/astral-sh/ruff/pull/16032))
- \[`ruff`\] Update `RUF009` to behave similar to `B008` and ignore attributes with immutable types ([#16048](https://github.com/astral-sh/ruff/pull/16048))
### Server
- Root exclusions in the server to project root ([#16043](https://github.com/astral-sh/ruff/pull/16043))
### Bug fixes
- \[`flake8-datetime`\] Ignore `.replace()` calls while looking for `.astimezone` ([#16050](https://github.com/astral-sh/ruff/pull/16050))
- \[`flake8-type-checking`\] Avoid `TC004` false positive where the runtime definition is provided by `__getattr__` ([#16052](https://github.com/astral-sh/ruff/pull/16052))
### Documentation
- Improve `ruff-lsp` migration document ([#16072](https://github.com/astral-sh/ruff/pull/16072))
- Undeprecate `ruff.nativeServer` ([#16039](https://github.com/astral-sh/ruff/pull/16039))
## 0.9.5
### Preview features
- Recognize all symbols named `TYPE_CHECKING` for `in_type_checking_block` ([#15719](https://github.com/astral-sh/ruff/pull/15719))
- \[`flake8-comprehensions`\] Handle builtins at top of file correctly for `unnecessary-dict-comprehension-for-iterable` (`C420`) ([#15837](https://github.com/astral-sh/ruff/pull/15837))
- \[`flake8-logging`\] `.exception()` and `exc_info=` outside exception handlers (`LOG004`, `LOG014`) ([#15799](https://github.com/astral-sh/ruff/pull/15799))
- \[`flake8-pyi`\] Fix incorrect behaviour of `custom-typevar-return-type` preview-mode autofix if `typing` was already imported (`PYI019`) ([#15853](https://github.com/astral-sh/ruff/pull/15853))
- \[`flake8-pyi`\] Fix more complex cases (`PYI019`) ([#15821](https://github.com/astral-sh/ruff/pull/15821))
- \[`flake8-pyi`\] Make `PYI019` autofixable for `.py` files in preview mode as well as stubs ([#15889](https://github.com/astral-sh/ruff/pull/15889))
- \[`flake8-pyi`\] Remove type parameter correctly when it is the last (`PYI019`) ([#15854](https://github.com/astral-sh/ruff/pull/15854))
- \[`pylint`\] Fix missing parens in unsafe fix for `unnecessary-dunder-call` (`PLC2801`) ([#15762](https://github.com/astral-sh/ruff/pull/15762))
- \[`pyupgrade`\] Better messages and diagnostic range (`UP015`) ([#15872](https://github.com/astral-sh/ruff/pull/15872))
- \[`pyupgrade`\] Rename private type parameters in PEP 695 generics (`UP049`) ([#15862](https://github.com/astral-sh/ruff/pull/15862))
- \[`refurb`\] Also report non-name expressions (`FURB169`) ([#15905](https://github.com/astral-sh/ruff/pull/15905))
- \[`refurb`\] Mark fix as unsafe if there are comments (`FURB171`) ([#15832](https://github.com/astral-sh/ruff/pull/15832))
- \[`ruff`\] Classes with mixed type variable style (`RUF053`) ([#15841](https://github.com/astral-sh/ruff/pull/15841))
- \[`airflow`\] `BashOperator` has been moved to `airflow.providers.standard.operators.bash.BashOperator` (`AIR302`) ([#15922](https://github.com/astral-sh/ruff/pull/15922))
- \[`flake8-pyi`\] Add autofix for unused-private-type-var (`PYI018`) ([#15999](https://github.com/astral-sh/ruff/pull/15999))
- \[`flake8-pyi`\] Significantly improve accuracy of `PYI019` if preview mode is enabled ([#15888](https://github.com/astral-sh/ruff/pull/15888))
### Rule changes
- Preserve triple quotes and prefixes for strings ([#15818](https://github.com/astral-sh/ruff/pull/15818))
- \[`flake8-comprehensions`\] Skip when `TypeError` present from too many (kw)args for `C410`,`C411`, and `C418` ([#15838](https://github.com/astral-sh/ruff/pull/15838))
- \[`flake8-pyi`\] Rename `PYI019` and improve its diagnostic message ([#15885](https://github.com/astral-sh/ruff/pull/15885))
- \[`pep8-naming`\] Ignore `@override` methods (`N803`) ([#15954](https://github.com/astral-sh/ruff/pull/15954))
- \[`pyupgrade`\] Reuse replacement logic from `UP046` and `UP047` to preserve more comments (`UP040`) ([#15840](https://github.com/astral-sh/ruff/pull/15840))
- \[`ruff`\] Analyze deferred annotations before enforcing `mutable-(data)class-default` and `function-call-in-dataclass-default-argument` (`RUF008`,`RUF009`,`RUF012`) ([#15921](https://github.com/astral-sh/ruff/pull/15921))
- \[`pycodestyle`\] Exempt `sys.path += ...` calls (`E402`) ([#15980](https://github.com/astral-sh/ruff/pull/15980))
### Configuration
- Config error only when `flake8-import-conventions` alias conflicts with `isort.required-imports` bound name ([#15918](https://github.com/astral-sh/ruff/pull/15918))
- Workaround Even Better TOML crash related to `allOf` ([#15992](https://github.com/astral-sh/ruff/pull/15992))
### Bug fixes
- \[`flake8-comprehensions`\] Unnecessary `list` comprehension (rewrite as a `set` comprehension) (`C403`) - Handle extraneous parentheses around list comprehension ([#15877](https://github.com/astral-sh/ruff/pull/15877))
- \[`flake8-comprehensions`\] Handle trailing comma in fixes for `unnecessary-generator-list/set` (`C400`,`C401`) ([#15929](https://github.com/astral-sh/ruff/pull/15929))
- \[`flake8-pyi`\] Fix several correctness issues with `custom-type-var-return-type` (`PYI019`) ([#15851](https://github.com/astral-sh/ruff/pull/15851))
- \[`pep8-naming`\] Consider any number of leading underscore for `N801` ([#15988](https://github.com/astral-sh/ruff/pull/15988))
- \[`pyflakes`\] Visit forward annotations in `TypeAliasType` as types (`F401`) ([#15829](https://github.com/astral-sh/ruff/pull/15829))
- \[`pylint`\] Correct min/max auto-fix and suggestion for (`PL1730`) ([#15930](https://github.com/astral-sh/ruff/pull/15930))
- \[`refurb`\] Handle unparenthesized tuples correctly (`FURB122`, `FURB142`) ([#15953](https://github.com/astral-sh/ruff/pull/15953))
- \[`refurb`\] Avoid `None | None` as well as better detection and fix (`FURB168`) ([#15779](https://github.com/astral-sh/ruff/pull/15779))
### Documentation
- Add deprecation warning for `ruff-lsp` related settings ([#15850](https://github.com/astral-sh/ruff/pull/15850))
- Docs (`linter.md`): clarify that Python files are always searched for in subdirectories ([#15882](https://github.com/astral-sh/ruff/pull/15882))
- Fix a typo in `non_pep695_generic_class.rs` ([#15946](https://github.com/astral-sh/ruff/pull/15946))
- Improve Docs: Pylint subcategories' codes ([#15909](https://github.com/astral-sh/ruff/pull/15909))
- Remove non-existing `lint.extendIgnore` editor setting ([#15844](https://github.com/astral-sh/ruff/pull/15844))
- Update black deviations ([#15928](https://github.com/astral-sh/ruff/pull/15928))
- Mention `UP049` in `UP046` and `UP047`, add `See also` section to `UP040` ([#15956](https://github.com/astral-sh/ruff/pull/15956))
- Add instance variable examples to `RUF012` ([#15982](https://github.com/astral-sh/ruff/pull/15982))
- Explain precedence for `ignore` and `select` config ([#15883](https://github.com/astral-sh/ruff/pull/15883))
## 0.9.4
### Preview features
- \[`airflow`\] Extend airflow context parameter check for `BaseOperator.execute` (`AIR302`) ([#15713](https://github.com/astral-sh/ruff/pull/15713))
- \[`airflow`\] Update `AIR302` to check for deprecated context keys ([#15144](https://github.com/astral-sh/ruff/pull/15144))
- \[`flake8-bandit`\] Permit suspicious imports within stub files (`S4`) ([#15822](https://github.com/astral-sh/ruff/pull/15822))
- \[`pylint`\] Do not trigger `PLR6201` on empty collections ([#15732](https://github.com/astral-sh/ruff/pull/15732))
- \[`refurb`\] Do not emit diagnostic when loop variables are used outside loop body (`FURB122`) ([#15757](https://github.com/astral-sh/ruff/pull/15757))
- \[`ruff`\] Add support for more `re` patterns (`RUF055`) ([#15764](https://github.com/astral-sh/ruff/pull/15764))
- \[`ruff`\] Check for shadowed `map` before suggesting fix (`RUF058`) ([#15790](https://github.com/astral-sh/ruff/pull/15790))
- \[`ruff`\] Do not emit diagnostic when all arguments to `zip()` are variadic (`RUF058`) ([#15744](https://github.com/astral-sh/ruff/pull/15744))
- \[`ruff`\] Parenthesize fix when argument spans multiple lines for `unnecessary-round` (`RUF057`) ([#15703](https://github.com/astral-sh/ruff/pull/15703))
### Rule changes
- Preserve quote style in generated code ([#15726](https://github.com/astral-sh/ruff/pull/15726), [#15778](https://github.com/astral-sh/ruff/pull/15778), [#15794](https://github.com/astral-sh/ruff/pull/15794))
- \[`flake8-bugbear`\] Exempt `NewType` calls where the original type is immutable (`B008`) ([#15765](https://github.com/astral-sh/ruff/pull/15765))
- \[`pylint`\] Honor banned top-level imports by `TID253` in `PLC0415`. ([#15628](https://github.com/astral-sh/ruff/pull/15628))
- \[`pyupgrade`\] Ignore `is_typeddict` and `TypedDict` for `deprecated-import` (`UP035`) ([#15800](https://github.com/astral-sh/ruff/pull/15800))
### CLI
- Fix formatter warning message for `flake8-quotes` option ([#15788](https://github.com/astral-sh/ruff/pull/15788))
- Implement tab autocomplete for `ruff config` ([#15603](https://github.com/astral-sh/ruff/pull/15603))
### Bug fixes
- \[`flake8-comprehensions`\] Do not emit `unnecessary-map` diagnostic when lambda has different arity (`C417`) ([#15802](https://github.com/astral-sh/ruff/pull/15802))
- \[`flake8-comprehensions`\] Parenthesize `sorted` when needed for `unnecessary-call-around-sorted` (`C413`) ([#15825](https://github.com/astral-sh/ruff/pull/15825))
- \[`pyupgrade`\] Handle end-of-line comments for `quoted-annotation` (`UP037`) ([#15824](https://github.com/astral-sh/ruff/pull/15824))
### Documentation
- Add missing config docstrings ([#15803](https://github.com/astral-sh/ruff/pull/15803))
- Add references to `trio.run_process` and `anyio.run_process` ([#15761](https://github.com/astral-sh/ruff/pull/15761))
- Use `uv init --lib` in tutorial ([#15718](https://github.com/astral-sh/ruff/pull/15718))
## 0.9.3
### Preview features
- \[`airflow`\] Argument `fail_stop` in DAG has been renamed as `fail_fast` (`AIR302`) ([#15633](https://github.com/astral-sh/ruff/pull/15633))
- \[`airflow`\] Extend `AIR303` with more symbols ([#15611](https://github.com/astral-sh/ruff/pull/15611))
- \[`flake8-bandit`\] Report all references to suspicious functions (`S3`) ([#15541](https://github.com/astral-sh/ruff/pull/15541))
- \[`flake8-pytest-style`\] Do not emit diagnostics for empty `for` loops (`PT012`, `PT031`) ([#15542](https://github.com/astral-sh/ruff/pull/15542))
- \[`flake8-simplify`\] Avoid double negations (`SIM103`) ([#15562](https://github.com/astral-sh/ruff/pull/15562))
- \[`pyflakes`\] Fix infinite loop with unused local import in `__init__.py` (`F401`) ([#15517](https://github.com/astral-sh/ruff/pull/15517))
- \[`pylint`\] Do not report methods with only one `EM101`-compatible `raise` (`PLR6301`) ([#15507](https://github.com/astral-sh/ruff/pull/15507))
- \[`pylint`\] Implement `redefined-slots-in-subclass` (`W0244`) ([#9640](https://github.com/astral-sh/ruff/pull/9640))
- \[`pyupgrade`\] Add rules to use PEP 695 generics in classes and functions (`UP046`, `UP047`) ([#15565](https://github.com/astral-sh/ruff/pull/15565), [#15659](https://github.com/astral-sh/ruff/pull/15659))
- \[`refurb`\] Implement `for-loop-writes` (`FURB122`) ([#10630](https://github.com/astral-sh/ruff/pull/10630))
- \[`ruff`\] Implement `needless-else` clause (`RUF047`) ([#15051](https://github.com/astral-sh/ruff/pull/15051))
- \[`ruff`\] Implement `starmap-zip` (`RUF058`) ([#15483](https://github.com/astral-sh/ruff/pull/15483))
### Rule changes
- \[`flake8-bugbear`\] Do not raise error if keyword argument is present and target-python version is less or equals than 3.9 (`B903`) ([#15549](https://github.com/astral-sh/ruff/pull/15549))
- \[`flake8-comprehensions`\] strip parentheses around generators in `unnecessary-generator-set` (`C401`) ([#15553](https://github.com/astral-sh/ruff/pull/15553))
- \[`flake8-pytest-style`\] Rewrite references to `.exception` (`PT027`) ([#15680](https://github.com/astral-sh/ruff/pull/15680))
- \[`flake8-simplify`\] Mark fixes as unsafe (`SIM201`, `SIM202`) ([#15626](https://github.com/astral-sh/ruff/pull/15626))
- \[`flake8-type-checking`\] Fix some safe fixes being labeled unsafe (`TC006`,`TC008`) ([#15638](https://github.com/astral-sh/ruff/pull/15638))
- \[`isort`\] Omit trailing whitespace in `unsorted-imports` (`I001`) ([#15518](https://github.com/astral-sh/ruff/pull/15518))
- \[`pydoclint`\] Allow ignoring one line docstrings for `DOC` rules ([#13302](https://github.com/astral-sh/ruff/pull/13302))
- \[`pyflakes`\] Apply redefinition fixes by source code order (`F811`) ([#15575](https://github.com/astral-sh/ruff/pull/15575))
- \[`pyflakes`\] Avoid removing too many imports in `redefined-while-unused` (`F811`) ([#15585](https://github.com/astral-sh/ruff/pull/15585))
- \[`pyflakes`\] Group redefinition fixes by source statement (`F811`) ([#15574](https://github.com/astral-sh/ruff/pull/15574))
- \[`pylint`\] Include name of base class in message for `redefined-slots-in-subclass` (`W0244`) ([#15559](https://github.com/astral-sh/ruff/pull/15559))
- \[`ruff`\] Update fix for `RUF055` to use `var == value` ([#15605](https://github.com/astral-sh/ruff/pull/15605))
### Formatter
- Fix bracket spacing for single-element tuples in f-string expressions ([#15537](https://github.com/astral-sh/ruff/pull/15537))
- Fix unstable f-string formatting for expressions containing a trailing comma ([#15545](https://github.com/astral-sh/ruff/pull/15545))
### Performance
- Avoid quadratic membership check in import fixes ([#15576](https://github.com/astral-sh/ruff/pull/15576))
### Server
- Allow `unsafe-fixes` settings for code actions ([#15666](https://github.com/astral-sh/ruff/pull/15666))
### Bug fixes
- \[`flake8-bandit`\] Add missing single-line/dotall regex flag (`S608`) ([#15654](https://github.com/astral-sh/ruff/pull/15654))
- \[`flake8-import-conventions`\] Fix infinite loop between `ICN001` and `I002` (`ICN001`) ([#15480](https://github.com/astral-sh/ruff/pull/15480))
- \[`flake8-simplify`\] Do not emit diagnostics for expressions inside string type annotations (`SIM222`, `SIM223`) ([#15405](https://github.com/astral-sh/ruff/pull/15405))
- \[`pyflakes`\] Treat arguments passed to the `default=` parameter of `TypeVar` as type expressions (`F821`) ([#15679](https://github.com/astral-sh/ruff/pull/15679))
- \[`pyupgrade`\] Avoid syntax error when the iterable is a non-parenthesized tuple (`UP028`) ([#15543](https://github.com/astral-sh/ruff/pull/15543))
- \[`ruff`\] Exempt `NewType` calls where the original type is immutable (`RUF009`) ([#15588](https://github.com/astral-sh/ruff/pull/15588))
- Preserve raw string prefix and escapes in all codegen fixes ([#15694](https://github.com/astral-sh/ruff/pull/15694))
### Documentation
- Generate documentation redirects for lowercase rule codes ([#15564](https://github.com/astral-sh/ruff/pull/15564))
- `TRY300`: Add some extra notes on not catching exceptions you didn't expect ([#15036](https://github.com/astral-sh/ruff/pull/15036))
## 0.9.2
### Preview features
- \[`airflow`\] Fix typo "security_managr" to "security_manager" (`AIR303`) ([#15463](https://github.com/astral-sh/ruff/pull/15463))
- \[`airflow`\] extend and fix AIR302 rules ([#15525](https://github.com/astral-sh/ruff/pull/15525))
- \[`fastapi`\] Handle parameters with `Depends` correctly (`FAST003`) ([#15364](https://github.com/astral-sh/ruff/pull/15364))
- \[`flake8-pytest-style`\] Implement pytest.warns diagnostics (`PT029`, `PT030`, `PT031`) ([#15444](https://github.com/astral-sh/ruff/pull/15444))
- \[`flake8-pytest-style`\] Test function parameters with default arguments (`PT028`) ([#15449](https://github.com/astral-sh/ruff/pull/15449))
- \[`flake8-type-checking`\] Avoid false positives for `|` in `TC008` ([#15201](https://github.com/astral-sh/ruff/pull/15201))
### Rule changes
- \[`flake8-todos`\] Allow VSCode GitHub PR extension style links in `missing-todo-link` (`TD003`) ([#15519](https://github.com/astral-sh/ruff/pull/15519))
- \[`pyflakes`\] Show syntax error message for `F722` ([#15523](https://github.com/astral-sh/ruff/pull/15523))
### Formatter
- Fix curly bracket spacing around f-string expressions containing curly braces ([#15471](https://github.com/astral-sh/ruff/pull/15471))
- Fix joining of f-strings with different quotes when using quote style `Preserve` ([#15524](https://github.com/astral-sh/ruff/pull/15524))
### Server
- Avoid indexing the same workspace multiple times ([#15495](https://github.com/astral-sh/ruff/pull/15495))
- Display context for `ruff.configuration` errors ([#15452](https://github.com/astral-sh/ruff/pull/15452))
### Configuration
- Remove `flatten` to improve deserialization error messages ([#15414](https://github.com/astral-sh/ruff/pull/15414))
### Bug fixes
- Parse triple-quoted string annotations as if parenthesized ([#15387](https://github.com/astral-sh/ruff/pull/15387))
- \[`fastapi`\] Update `Annotated` fixes (`FAST002`) ([#15462](https://github.com/astral-sh/ruff/pull/15462))
- \[`flake8-bandit`\] Check for `builtins` instead of `builtin` (`S102`, `PTH123`) ([#15443](https://github.com/astral-sh/ruff/pull/15443))
- \[`flake8-pathlib`\] Fix `--select` for `os-path-dirname` (`PTH120`) ([#15446](https://github.com/astral-sh/ruff/pull/15446))
- \[`ruff`\] Fix false positive on global keyword (`RUF052`) ([#15235](https://github.com/astral-sh/ruff/pull/15235))
## 0.9.1
### Preview features
- \[`pycodestyle`\] Run `too-many-newlines-at-end-of-file` on each cell in notebooks (`W391`) ([#15308](https://github.com/astral-sh/ruff/pull/15308))
- \[`ruff`\] Omit diagnostic for shadowed private function parameters in `used-dummy-variable` (`RUF052`) ([#15376](https://github.com/astral-sh/ruff/pull/15376))
### Rule changes
- \[`flake8-bugbear`\] Improve `assert-raises-exception` message (`B017`) ([#15389](https://github.com/astral-sh/ruff/pull/15389))
### Formatter
- Preserve trailing end-of line comments for the last string literal in implicitly concatenated strings ([#15378](https://github.com/astral-sh/ruff/pull/15378))
### Server
- Fix a bug where the server and client notebooks were out of sync after reordering cells ([#15398](https://github.com/astral-sh/ruff/pull/15398))
### Bug fixes
- \[`flake8-pie`\] Correctly remove wrapping parentheses (`PIE800`) ([#15394](https://github.com/astral-sh/ruff/pull/15394))
- \[`pyupgrade`\] Handle comments and multiline expressions correctly (`UP037`) ([#15337](https://github.com/astral-sh/ruff/pull/15337))
## 0.9.0
Check out the [blog post](https://astral.sh/blog/ruff-v0.9.0) for a migration guide and overview of the changes!
### Breaking changes
Ruff now formats your code according to the 2025 style guide. As a result, your code might now get formatted differently. See the formatter section for a detailed list of changes.
This release doesnt remove or remap any existing stable rules.
### Stabilization
The following rules have been stabilized and are no longer in preview:
- [`stdlib-module-shadowing`](https://docs.astral.sh/ruff/rules/stdlib-module-shadowing/) (`A005`).
This rule has also been renamed: previously, it was called `builtin-module-shadowing`.
- [`builtin-lambda-argument-shadowing`](https://docs.astral.sh/ruff/rules/builtin-lambda-argument-shadowing/) (`A006`)
- [`slice-to-remove-prefix-or-suffix`](https://docs.astral.sh/ruff/rules/slice-to-remove-prefix-or-suffix/) (`FURB188`)
- [`boolean-chained-comparison`](https://docs.astral.sh/ruff/rules/boolean-chained-comparison/) (`PLR1716`)
- [`decimal-from-float-literal`](https://docs.astral.sh/ruff/rules/decimal-from-float-literal/) (`RUF032`)
- [`post-init-default`](https://docs.astral.sh/ruff/rules/post-init-default/) (`RUF033`)
- [`useless-if-else`](https://docs.astral.sh/ruff/rules/useless-if-else/) (`RUF034`)
The following behaviors have been stabilized:
- [`pytest-parametrize-names-wrong-type`](https://docs.astral.sh/ruff/rules/pytest-parametrize-names-wrong-type/) (`PT006`): Detect [`pytest.parametrize`](https://docs.pytest.org/en/7.1.x/how-to/parametrize.html#parametrize) calls outside decorators and calls with keyword arguments.
- [`module-import-not-at-top-of-file`](https://docs.astral.sh/ruff/rules/module-import-not-at-top-of-file/) (`E402`): Ignore [`pytest.importorskip`](https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest-importorskip) calls between import statements.
- [`mutable-dataclass-default`](https://docs.astral.sh/ruff/rules/mutable-dataclass-default/) (`RUF008`) and [`function-call-in-dataclass-default-argument`](https://docs.astral.sh/ruff/rules/function-call-in-dataclass-default-argument/) (`RUF009`): Add support for [`attrs`](https://www.attrs.org/en/stable/).
- [`bad-version-info-comparison`](https://docs.astral.sh/ruff/rules/bad-version-info-comparison/) (`PYI006`): Extend the rule to check non-stub files.
The following fixes or improvements to fixes have been stabilized:
- [`redundant-numeric-union`](https://docs.astral.sh/ruff/rules/redundant-numeric-union/) (`PYI041`)
- [`duplicate-union-members`](https://docs.astral.sh/ruff/rules/duplicate-union-member/) (`PYI016`)
### Formatter
This release introduces the new 2025 stable style ([#13371](https://github.com/astral-sh/ruff/issues/13371)), stabilizing the following changes:
- Format expressions in f-string elements ([#7594](https://github.com/astral-sh/ruff/issues/7594))
- Alternate quotes for strings inside f-strings ([#13860](https://github.com/astral-sh/ruff/pull/13860))
- Preserve the casing of hex codes in f-string debug expressions ([#14766](https://github.com/astral-sh/ruff/issues/14766))
- Choose the quote style for each string literal in an implicitly concatenated f-string rather than for the entire string ([#13539](https://github.com/astral-sh/ruff/pull/13539))
- Automatically join an implicitly concatenated string into a single string literal if it fits on a single line ([#9457](https://github.com/astral-sh/ruff/issues/9457))
- Remove the [`ISC001`](https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/) incompatibility warning ([#15123](https://github.com/astral-sh/ruff/pull/15123))
- Prefer parenthesizing the `assert` message over breaking the assertion expression ([#9457](https://github.com/astral-sh/ruff/issues/9457))
- Automatically parenthesize over-long `if` guards in `match` `case` clauses ([#13513](https://github.com/astral-sh/ruff/pull/13513))
- More consistent formatting for `match` `case` patterns ([#6933](https://github.com/astral-sh/ruff/issues/6933))
- Avoid unnecessary parentheses around return type annotations ([#13381](https://github.com/astral-sh/ruff/pull/13381))
- Keep the opening parentheses on the same line as the `if` keyword for comprehensions where the condition has a leading comment ([#12282](https://github.com/astral-sh/ruff/pull/12282))
- More consistent formatting for `with` statements with a single context manager for Python 3.8 or older ([#10276](https://github.com/astral-sh/ruff/pull/10276))
- Correctly calculate the line-width for code blocks in docstrings when using `max-doc-code-line-length = "dynamic"` ([#13523](https://github.com/astral-sh/ruff/pull/13523))
### Preview features
- \[`flake8-bugbear`\] Implement `class-as-data-structure` (`B903`) ([#9601](https://github.com/astral-sh/ruff/pull/9601))
- \[`flake8-type-checking`\] Apply `quoted-type-alias` more eagerly in `TYPE_CHECKING` blocks and ignore it in stubs (`TC008`) ([#15180](https://github.com/astral-sh/ruff/pull/15180))
- \[`pylint`\] Ignore `eq-without-hash` in stub files (`PLW1641`) ([#15310](https://github.com/astral-sh/ruff/pull/15310))
- \[`pyupgrade`\] Split `UP007` into two individual rules: `UP007` for `Union` and `UP045` for `Optional` (`UP007`, `UP045`) ([#15313](https://github.com/astral-sh/ruff/pull/15313))
- \[`ruff`\] New rule that detects classes that are both an enum and a `dataclass` (`RUF049`) ([#15299](https://github.com/astral-sh/ruff/pull/15299))
- \[`ruff`\] Recode `RUF025` to `RUF037` (`RUF037`) ([#15258](https://github.com/astral-sh/ruff/pull/15258))
### Rule changes
- \[`flake8-builtins`\] Ignore [`stdlib-module-shadowing`](https://docs.astral.sh/ruff/rules/stdlib-module-shadowing/) in stub files(`A005`) ([#15350](https://github.com/astral-sh/ruff/pull/15350))
- \[`flake8-return`\] Add support for functions returning `typing.Never` (`RET503`) ([#15298](https://github.com/astral-sh/ruff/pull/15298))
### Server
- Improve the observability by removing the need for the ["trace" value](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#traceValue) to turn on or off logging. The server logging is solely controlled using the [`logLevel` server setting](https://docs.astral.sh/ruff/editors/settings/#loglevel)
which defaults to `info`. This addresses the issue where users were notified about an error and told to consult the log, but it didnt contain any messages. ([#15232](https://github.com/astral-sh/ruff/pull/15232))
- Ignore diagnostics from other sources for code action requests ([#15373](https://github.com/astral-sh/ruff/pull/15373))
### CLI
- Improve the error message for `--config key=value` when the `key` is for a table and its a simple `value`
### Bug fixes
- \[`eradicate`\] Ignore metadata blocks directly followed by normal blocks (`ERA001`) ([#15330](https://github.com/astral-sh/ruff/pull/15330))
- \[`flake8-django`\] Recognize other magic methods (`DJ012`) ([#15365](https://github.com/astral-sh/ruff/pull/15365))
- \[`pycodestyle`\] Avoid false positives related to type aliases (`E252`) ([#15356](https://github.com/astral-sh/ruff/pull/15356))
- \[`pydocstyle`\] Avoid treating newline-separated sections as sub-sections (`D405`) ([#15311](https://github.com/astral-sh/ruff/pull/15311))
- \[`pyflakes`\] Remove call when removing final argument from `format` (`F523`) ([#15309](https://github.com/astral-sh/ruff/pull/15309))
- \[`refurb`\] Mark fix as unsafe when the right-hand side is a string (`FURB171`) ([#15273](https://github.com/astral-sh/ruff/pull/15273))
- \[`ruff`\] Treat `)` as a regex metacharacter (`RUF043`, `RUF055`) ([#15318](https://github.com/astral-sh/ruff/pull/15318))
- \[`ruff`\] Parenthesize the `int`-call argument when removing the `int` call would change semantics (`RUF046`) ([#15277](https://github.com/astral-sh/ruff/pull/15277))
## 0.8.6
### Preview features
- \[`format`\]: Preserve multiline implicit concatenated strings in docstring positions ([#15126](https://github.com/astral-sh/ruff/pull/15126))
- \[`ruff`\] Add rule to detect empty literal in deque call (`RUF025`) ([#15104](https://github.com/astral-sh/ruff/pull/15104))
- \[`ruff`\] Avoid reporting when `ndigits` is possibly negative (`RUF057`) ([#15234](https://github.com/astral-sh/ruff/pull/15234))
### Rule changes
- \[`flake8-todos`\] remove issue code length restriction (`TD003`) ([#15175](https://github.com/astral-sh/ruff/pull/15175))
- \[`pyflakes`\] Ignore errors in `@no_type_check` string annotations (`F722`, `F821`) ([#15215](https://github.com/astral-sh/ruff/pull/15215))
### CLI
- Show errors for attempted fixes only when passed `--verbose` ([#15237](https://github.com/astral-sh/ruff/pull/15237))
### Bug fixes
- \[`ruff`\] Avoid syntax error when removing int over multiple lines (`RUF046`) ([#15230](https://github.com/astral-sh/ruff/pull/15230))
- \[`pyupgrade`\] Revert "Add all PEP-585 names to `UP006` rule" ([#15250](https://github.com/astral-sh/ruff/pull/15250))
## 0.8.5
### Preview features
- \[`airflow`\] Extend names moved from core to provider (`AIR303`) ([#15145](https://github.com/astral-sh/ruff/pull/15145), [#15159](https://github.com/astral-sh/ruff/pull/15159), [#15196](https://github.com/astral-sh/ruff/pull/15196), [#15216](https://github.com/astral-sh/ruff/pull/15216))
- \[`airflow`\] Extend rule to check class attributes, methods, arguments (`AIR302`) ([#15054](https://github.com/astral-sh/ruff/pull/15054), [#15083](https://github.com/astral-sh/ruff/pull/15083))
- \[`fastapi`\] Update `FAST002` to check keyword-only arguments ([#15119](https://github.com/astral-sh/ruff/pull/15119))
- \[`flake8-type-checking`\] Disable `TC006` and `TC007` in stub files ([#15179](https://github.com/astral-sh/ruff/pull/15179))
- \[`pylint`\] Detect nested methods correctly (`PLW1641`) ([#15032](https://github.com/astral-sh/ruff/pull/15032))
- \[`ruff`\] Detect more strict-integer expressions (`RUF046`) ([#14833](https://github.com/astral-sh/ruff/pull/14833))
- \[`ruff`\] Implement `falsy-dict-get-fallback` (`RUF056`) ([#15160](https://github.com/astral-sh/ruff/pull/15160))
- \[`ruff`\] Implement `unnecessary-round` (`RUF057`) ([#14828](https://github.com/astral-sh/ruff/pull/14828))
### Rule changes
- Visit PEP 764 inline `TypedDict` keys as non-type-expressions ([#15073](https://github.com/astral-sh/ruff/pull/15073))
- \[`flake8-comprehensions`\] Skip `C416` if comprehension contains unpacking ([#14909](https://github.com/astral-sh/ruff/pull/14909))
- \[`flake8-pie`\] Allow `cast(SomeType, ...)` (`PIE796`) ([#15141](https://github.com/astral-sh/ruff/pull/15141))
- \[`flake8-simplify`\] More precise inference for dictionaries (`SIM300`) ([#15164](https://github.com/astral-sh/ruff/pull/15164))
- \[`flake8-use-pathlib`\] Catch redundant joins in `PTH201` and avoid syntax errors ([#15177](https://github.com/astral-sh/ruff/pull/15177))
- \[`pycodestyle`\] Preserve original value format (`E731`) ([#15097](https://github.com/astral-sh/ruff/pull/15097))
- \[`pydocstyle`\] Split on first whitespace character (`D403`) ([#15082](https://github.com/astral-sh/ruff/pull/15082))
- \[`pyupgrade`\] Add all PEP-585 names to `UP006` rule ([#5454](https://github.com/astral-sh/ruff/pull/5454))
### Configuration
- \[`flake8-type-checking`\] Improve flexibility of `runtime-evaluated-decorators` ([#15204](https://github.com/astral-sh/ruff/pull/15204))
- \[`pydocstyle`\] Add setting to ignore missing documentation for `*args` and `**kwargs` parameters (`D417`) ([#15210](https://github.com/astral-sh/ruff/pull/15210))
- \[`ruff`\] Add an allowlist for `unsafe-markup-use` (`RUF035`) ([#15076](https://github.com/astral-sh/ruff/pull/15076))
### Bug fixes
- Fix type subscript on older python versions ([#15090](https://github.com/astral-sh/ruff/pull/15090))
- Use `TypeChecker` for detecting `fastapi` routes ([#15093](https://github.com/astral-sh/ruff/pull/15093))
- \[`pycodestyle`\] Avoid false positives and negatives related to type parameter default syntax (`E225`, `E251`) ([#15214](https://github.com/astral-sh/ruff/pull/15214))
### Documentation
- Fix incorrect doc in `shebang-not-executable` (`EXE001`) and add git+windows solution to executable bit ([#15208](https://github.com/astral-sh/ruff/pull/15208))
- Rename rules currently not conforming to naming convention ([#15102](https://github.com/astral-sh/ruff/pull/15102))
## 0.8.4
### Preview features
- \[`airflow`\] Extend `AIR302` with additional functions and classes ([#15015](https://github.com/astral-sh/ruff/pull/15015))
- \[`airflow`\] Implement `moved-to-provider-in-3` for modules that has been moved to Airflow providers (`AIR303`) ([#14764](https://github.com/astral-sh/ruff/pull/14764))
- \[`flake8-use-pathlib`\] Extend check for invalid path suffix to include the case `"."` (`PTH210`) ([#14902](https://github.com/astral-sh/ruff/pull/14902))
- \[`perflint`\] Fix panic in `PERF401` when list variable is after the `for` loop ([#14971](https://github.com/astral-sh/ruff/pull/14971))
- \[`perflint`\] Simplify finding the loop target in `PERF401` ([#15025](https://github.com/astral-sh/ruff/pull/15025))
- \[`pylint`\] Preserve original value format (`PLR6104`) ([#14978](https://github.com/astral-sh/ruff/pull/14978))
- \[`ruff`\] Avoid false positives for `RUF027` for typing context bindings ([#15037](https://github.com/astral-sh/ruff/pull/15037))
- \[`ruff`\] Check for ambiguous pattern passed to `pytest.raises()` (`RUF043`) ([#14966](https://github.com/astral-sh/ruff/pull/14966))
### Rule changes
- \[`flake8-bandit`\] Check `S105` for annotated assignment ([#15059](https://github.com/astral-sh/ruff/pull/15059))
- \[`flake8-pyi`\] More autofixes for `redundant-none-literal` (`PYI061`) ([#14872](https://github.com/astral-sh/ruff/pull/14872))
- \[`pydocstyle`\] Skip leading whitespace for `D403` ([#14963](https://github.com/astral-sh/ruff/pull/14963))
- \[`ruff`\] Skip `SQLModel` base classes for `mutable-class-default` (`RUF012`) ([#14949](https://github.com/astral-sh/ruff/pull/14949))
### Bug
- \[`perflint`\] Parenthesize walrus expressions in autofix for `manual-list-comprehension` (`PERF401`) ([#15050](https://github.com/astral-sh/ruff/pull/15050))
### Server
- Check diagnostic refresh support from client capability which enables dynamic configuration for various editors ([#15014](https://github.com/astral-sh/ruff/pull/15014))
## 0.8.3
### Preview features
- Fix fstring formatting removing overlong implicit concatenated string in expression part ([#14811](https://github.com/astral-sh/ruff/pull/14811))
- \[`airflow`\] Add fix to remove deprecated keyword arguments (`AIR302`) ([#14887](https://github.com/astral-sh/ruff/pull/14887))
- \[`airflow`\]: Extend rule to include deprecated names for Airflow 3.0 (`AIR302`) ([#14765](https://github.com/astral-sh/ruff/pull/14765) and [#14804](https://github.com/astral-sh/ruff/pull/14804))
- \[`flake8-bugbear`\] Improve error messages for `except*` (`B025`, `B029`, `B030`, `B904`) ([#14815](https://github.com/astral-sh/ruff/pull/14815))
- \[`flake8-bugbear`\] `itertools.batched()` without explicit `strict` (`B911`) ([#14408](https://github.com/astral-sh/ruff/pull/14408))
- \[`flake8-use-pathlib`\] Dotless suffix passed to `Path.with_suffix()` (`PTH210`) ([#14779](https://github.com/astral-sh/ruff/pull/14779))
- \[`pylint`\] Include parentheses and multiple comparators in check for `boolean-chained-comparison` (`PLR1716`) ([#14781](https://github.com/astral-sh/ruff/pull/14781))
- \[`ruff`\] Do not simplify `round()` calls (`RUF046`) ([#14832](https://github.com/astral-sh/ruff/pull/14832))
- \[`ruff`\] Don't emit `used-dummy-variable` on function parameters (`RUF052`) ([#14818](https://github.com/astral-sh/ruff/pull/14818))
- \[`ruff`\] Implement `if-key-in-dict-del` (`RUF051`) ([#14553](https://github.com/astral-sh/ruff/pull/14553))
- \[`ruff`\] Mark autofix for `RUF052` as always unsafe ([#14824](https://github.com/astral-sh/ruff/pull/14824))
- \[`ruff`\] Teach autofix for `used-dummy-variable` about TypeVars etc. (`RUF052`) ([#14819](https://github.com/astral-sh/ruff/pull/14819))
### Rule changes
- \[`flake8-bugbear`\] Offer unsafe autofix for `no-explicit-stacklevel` (`B028`) ([#14829](https://github.com/astral-sh/ruff/pull/14829))
- \[`flake8-pyi`\] Skip all type definitions in `string-or-bytes-too-long` (`PYI053`) ([#14797](https://github.com/astral-sh/ruff/pull/14797))
- \[`pyupgrade`\] Do not report when a UTF-8 comment is followed by a non-UTF-8 one (`UP009`) ([#14728](https://github.com/astral-sh/ruff/pull/14728))
- \[`pyupgrade`\] Mark fixes for `convert-typed-dict-functional-to-class` and `convert-named-tuple-functional-to-class` as unsafe if they will remove comments (`UP013`, `UP014`) ([#14842](https://github.com/astral-sh/ruff/pull/14842))
### Bug fixes
- Raise syntax error for mixing `except` and `except*` ([#14895](https://github.com/astral-sh/ruff/pull/14895))
- \[`flake8-bugbear`\] Fix `B028` to allow `stacklevel` to be explicitly assigned as a positional argument ([#14868](https://github.com/astral-sh/ruff/pull/14868))
- \[`flake8-bugbear`\] Skip `B028` if `warnings.warn` is called with `*args` or `**kwargs` ([#14870](https://github.com/astral-sh/ruff/pull/14870))
- \[`flake8-comprehensions`\] Skip iterables with named expressions in `unnecessary-map` (`C417`) ([#14827](https://github.com/astral-sh/ruff/pull/14827))
- \[`flake8-pyi`\] Also remove `self` and `cls`'s annotation (`PYI034`) ([#14801](https://github.com/astral-sh/ruff/pull/14801))
- \[`flake8-pytest-style`\] Fix `pytest-parametrize-names-wrong-type` (`PT006`) to edit both `argnames` and `argvalues` if both of them are single-element tuples/lists ([#14699](https://github.com/astral-sh/ruff/pull/14699))
- \[`perflint`\] Improve autofix for `PERF401` ([#14369](https://github.com/astral-sh/ruff/pull/14369))
- \[`pylint`\] Fix `PLW1508` false positive for default string created via a mult operation ([#14841](https://github.com/astral-sh/ruff/pull/14841))
## 0.8.2
### Preview features

View File

@@ -467,7 +467,7 @@ cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
```
You can run `uv venv --project ./scripts/benchmarks`, activate the venv and then run `uv sync --project ./scripts/benchmarks` to create a working environment for the
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
above. All reported benchmarks were computed using the versions specified by
`./scripts/benchmarks/pyproject.toml` on Python 3.11.

1573
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,6 @@ license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_annotate_snippets = { path = "crates/ruff_annotate_snippets" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db", default-features = false }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
@@ -41,11 +40,10 @@ ruff_workspace = { path = "crates/ruff_workspace" }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
red_knot_server = { path = "crates/red_knot_server" }
red_knot_test = { path = "crates/red_knot_test" }
red_knot_project = { path = "crates/red_knot_project", default-features = false }
red_knot_workspace = { path = "crates/red_knot_workspace", default-features = false }
aho-corasick = { version = "1.1.3" }
anstream = { version = "0.6.18" }
anstyle = { version = "1.0.10" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
anyhow = { version = "1.0.80" }
assert_fs = { version = "1.1.0" }
argfile = { version = "0.2.0" }
@@ -57,9 +55,9 @@ camino = { version = "1.1.7" }
chrono = { version = "0.4.35", default-features = false, features = ["clock"] }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete_command = { version = "0.6.0" }
clearscreen = { version = "4.0.0" }
clearscreen = { version = "3.0.0" }
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
colored = { version = "3.0.0" }
colored = { version = "2.1.0" }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
@@ -74,13 +72,11 @@ env_logger = { version = "0.11.0" }
etcetera = { version = "0.8.0" }
fern = { version = "0.7.0" }
filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
globwalk = { version = "0.9.1" }
hashbrown = { version = "0.15.0", default-features = false, features = [
"raw-entry",
"equivalent",
"inline-more",
] }
ignore = { version = "0.4.22" }
@@ -93,7 +89,7 @@ insta = { version = "1.35.1" }
insta-cmd = { version = "0.6.0" }
is-macro = { version = "0.3.5" }
is-wsl = { version = "0.4.0" }
itertools = { version = "0.14.0" }
itertools = { version = "0.13.0" }
js-sys = { version = "0.3.69" }
jod-thread = { version = "0.1.2" }
libc = { version = "0.2.153" }
@@ -107,7 +103,7 @@ matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" }
notify = { version = "8.0.0" }
notify = { version = "7.0.0" }
ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" }
@@ -118,12 +114,11 @@ proc-macro2 = { version = "1.0.79" }
pyproject-toml = { version = "0.13.4" }
quick-junit = { version = "0.5.0" }
quote = { version = "1.0.23" }
rand = { version = "0.9.0" }
rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
# When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml`
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "88a1d7774d78f048fbd77d40abca9ebd729fd1f0" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "254c749b02cde2fd29852a7463a33e800b771758" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }
@@ -136,15 +131,9 @@ serde_with = { version = "3.6.0", default-features = false, features = [
shellexpand = { version = "3.0.0" }
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
snapbox = { version = "0.6.0", features = [
"diff",
"term-svg",
"cmd",
"examples",
] }
static_assertions = "1.1.0"
strum = { version = "0.27.0", features = ["strum_macros"] }
strum_macros = { version = "0.27.0" }
strum = { version = "0.26.0", features = ["strum_macros"] }
strum_macros = { version = "0.26.0" }
syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" }
test-case = { version = "3.3.1" }
@@ -159,13 +148,13 @@ tracing-subscriber = { version = "0.3.18", default-features = false, features =
"fmt",
] }
tracing-tree = { version = "0.4.0" }
tryfn = { version = "0.2.1" }
typed-arena = { version = "2.0.2" }
unic-ucd-category = { version = "0.9" }
unicode-ident = { version = "1.0.12" }
unicode-width = { version = "0.2.0" }
unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
ureq = { version = "2.9.6" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
@@ -179,10 +168,6 @@ wasm-bindgen-test = { version = "0.3.42" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false }
[workspace.metadata.cargo-shear]
ignored = ["getrandom"]
[workspace.lints.rust]
unsafe_code = "warn"
unreachable_pub = "warn"
@@ -225,9 +210,6 @@ redundant_clone = "warn"
debug_assert_with_mut_call = "warn"
unused_peekable = "warn"
# Diagnostics are not actionable: Enable once https://github.com/rust-lang/rust-clippy/issues/13774 is resolved.
large_stack_arrays = "allow"
[profile.release]
# Note that we set these explicitly, and these values
# were chosen based on a trade-off between compile times
@@ -315,11 +297,7 @@ local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Post-announce jobs to run in CI
post-announce-jobs = [
"./notify-dependents",
"./publish-docs",
"./publish-playground",
]
post-announce-jobs = ["./notify-dependents", "./publish-docs", "./publish-playground"]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program

View File

@@ -116,21 +116,12 @@ For more, see the [documentation](https://docs.astral.sh/ruff/).
### Installation
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI.
Invoke Ruff directly with [`uvx`](https://docs.astral.sh/uv/):
```shell
uvx ruff check # Lint all files in the current directory.
uvx ruff format # Format all files in the current directory.
```
Or install Ruff with `uv` (recommended), `pip`, or `pipx`:
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI:
```shell
# With uv.
uv tool install ruff@latest # Install Ruff globally.
uv add --dev ruff # Or add Ruff to your project.
uv add --dev ruff # to add ruff to your project
uv tool install ruff # to install ruff globally
# With pip.
pip install ruff
@@ -149,8 +140,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.9.6/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.9.6/install.ps1 | iex"
curl -LsSf https://astral.sh/ruff/0.8.2/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.8.2/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -183,7 +174,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.9.6
rev: v0.8.2
hooks:
# Run the linter.
- id: ruff
@@ -205,7 +196,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v3
- uses: astral-sh/ruff-action@v1
```
### Configuration<a id="configuration"></a>
@@ -452,7 +443,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- ING Bank ([popmon](https://github.com/ing-bank/popmon), [probatus](https://github.com/ing-bank/probatus))
- [Ibis](https://github.com/ibis-project/ibis)
- [ivy](https://github.com/unifyai/ivy)
- [JAX](https://github.com/jax-ml/jax)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [Kraken Tech](https://kraken.tech/)
- [LangChain](https://github.com/hwchase17/langchain)

View File

@@ -1,9 +1,10 @@
[files]
# https://github.com/crate-ci/typos/issues/868
extend-exclude = [
"crates/red_knot_vendored/vendor/**/*",
"**/resources/**/*",
"**/snapshots/**/*",
"crates/red_knot_vendored/vendor/**/*",
"**/resources/**/*",
"**/snapshots/**/*",
"crates/red_knot_workspace/src/workspace/pyproject/package_name.rs"
]
[default.extend-words]
@@ -20,10 +21,7 @@ Numer = "Numer" # Library name 'NumerBlox' in "Who's Using Ruff?"
[default]
extend-ignore-re = [
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
]
[default.extend-identifiers]
"FrIeNdLy" = "FrIeNdLy"

View File

@@ -13,7 +13,7 @@ license.workspace = true
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_project = { workspace = true, features = ["zstd"] }
red_knot_workspace = { workspace = true, features = ["zstd"] }
red_knot_server = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
@@ -32,15 +32,9 @@ tracing-flame = { workspace = true }
tracing-tree = { workspace = true }
[dev-dependencies]
ruff_db = { workspace = true, features = ["testing"] }
ruff_python_trivia = { workspace = true }
insta = { workspace = true, features = ["filters"] }
insta-cmd = { workspace = true }
filetime = { workspace = true }
regex = { workspace = true }
tempfile = { workspace = true }
toml = { workspace = true }
ruff_db = { workspace = true, features = ["testing"] }
[lints]
workspace = true

View File

@@ -1,104 +0,0 @@
use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
fn main() {
// The workspace root directory is not available without walking up the tree
// https://github.com/rust-lang/cargo/issues/3946
let workspace_root = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap())
.join("..")
.join("..");
commit_info(&workspace_root);
#[allow(clippy::disallowed_methods)]
let target = std::env::var("TARGET").unwrap();
println!("cargo::rustc-env=RUST_HOST_TARGET={target}");
}
fn commit_info(workspace_root: &Path) {
// If not in a git repository, do not attempt to retrieve commit information
let git_dir = workspace_root.join(".git");
if !git_dir.exists() {
return;
}
if let Some(git_head_path) = git_head(&git_dir) {
println!("cargo:rerun-if-changed={}", git_head_path.display());
let git_head_contents = fs::read_to_string(git_head_path);
if let Ok(git_head_contents) = git_head_contents {
// The contents are either a commit or a reference in the following formats
// - "<commit>" when the head is detached
// - "ref <ref>" when working on a branch
// If a commit, checking if the HEAD file has changed is sufficient
// If a ref, we need to add the head file for that ref to rebuild on commit
let mut git_ref_parts = git_head_contents.split_whitespace();
git_ref_parts.next();
if let Some(git_ref) = git_ref_parts.next() {
let git_ref_path = git_dir.join(git_ref);
println!("cargo:rerun-if-changed={}", git_ref_path.display());
}
}
}
let output = match Command::new("git")
.arg("log")
.arg("-1")
.arg("--date=short")
.arg("--abbrev=9")
.arg("--format=%H %h %cd %(describe)")
.output()
{
Ok(output) if output.status.success() => output,
_ => return,
};
let stdout = String::from_utf8(output.stdout).unwrap();
let mut parts = stdout.split_whitespace();
let mut next = || parts.next().unwrap();
let _commit_hash = next();
println!("cargo::rustc-env=RED_KNOT_COMMIT_SHORT_HASH={}", next());
println!("cargo::rustc-env=RED_KNOT_COMMIT_DATE={}", next());
// Describe can fail for some commits
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emdescribeoptionsem
if let Some(describe) = parts.next() {
let mut describe_parts = describe.split('-');
let _last_tag = describe_parts.next().unwrap();
// If this is the tagged commit, this component will be missing
println!(
"cargo::rustc-env=RED_KNOT_LAST_TAG_DISTANCE={}",
describe_parts.next().unwrap_or("0")
);
}
}
fn git_head(git_dir: &Path) -> Option<PathBuf> {
// The typical case is a standard git repository.
let git_head_path = git_dir.join("HEAD");
if git_head_path.exists() {
return Some(git_head_path);
}
if !git_dir.is_file() {
return None;
}
// If `.git/HEAD` doesn't exist and `.git` is actually a file,
// then let's try to attempt to read it as a worktree. If it's
// a worktree, then its contents will look like this, e.g.:
//
// gitdir: /home/andrew/astral/uv/main/.git/worktrees/pr2
//
// And the HEAD file we want to watch will be at:
//
// /home/andrew/astral/uv/main/.git/worktrees/pr2/HEAD
let contents = fs::read_to_string(git_dir).ok()?;
let (label, worktree_path) = contents.split_once(':')?;
if label != "gitdir" {
return None;
}
let worktree_path = worktree_path.trim();
Some(PathBuf::from(worktree_path))
}

View File

@@ -1,201 +0,0 @@
use crate::logging::Verbosity;
use crate::python_version::PythonVersion;
use clap::{ArgAction, ArgMatches, Error, Parser};
use red_knot_project::metadata::options::{EnvironmentOptions, Options};
use red_knot_project::metadata::value::{RangedValue, RelativePathBuf};
use red_knot_python_semantic::lint;
use ruff_db::system::SystemPathBuf;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
pub(crate) struct Args {
#[command(subcommand)]
pub(crate) command: Command,
}
#[derive(Debug, clap::Subcommand)]
pub(crate) enum Command {
/// Check a project for type errors.
Check(CheckCommand),
/// Start the language server
Server,
/// Display Red Knot's version
Version,
}
#[derive(Debug, Parser)]
pub(crate) struct CheckCommand {
/// Run the command within the given project directory.
///
/// All `pyproject.toml` files will be discovered by walking up the directory tree from the given project directory,
/// as will the project's virtual environment (`.venv`) unless the `venv-path` option is set.
///
/// Other command-line arguments (such as relative paths) will be resolved relative to the current working directory.
#[arg(long, value_name = "PROJECT")]
pub(crate) project: Option<SystemPathBuf>,
/// Path to the virtual environment the project uses.
///
/// If provided, red-knot will use the `site-packages` directory of this virtual environment
/// to resolve type information for the project's third-party dependencies.
#[arg(long, value_name = "PATH")]
pub(crate) venv_path: Option<SystemPathBuf>,
/// Custom directory to use for stdlib typeshed stubs.
#[arg(long, value_name = "PATH", alias = "custom-typeshed-dir")]
pub(crate) typeshed: Option<SystemPathBuf>,
/// Additional path to use as a module-resolution source (can be passed multiple times).
#[arg(long, value_name = "PATH")]
pub(crate) extra_search_path: Option<Vec<SystemPathBuf>>,
/// Python version to assume when resolving types.
#[arg(long, value_name = "VERSION", alias = "target-version")]
pub(crate) python_version: Option<PythonVersion>,
#[clap(flatten)]
pub(crate) verbosity: Verbosity,
#[clap(flatten)]
pub(crate) rules: RulesArg,
/// Use exit code 1 if there are any warning-level diagnostics.
#[arg(long, conflicts_with = "exit_zero")]
pub(crate) error_on_warning: bool,
/// Always use exit code 0, even when there are error-level diagnostics.
#[arg(long)]
pub(crate) exit_zero: bool,
/// Run in watch mode by re-running whenever files change.
#[arg(long, short = 'W')]
pub(crate) watch: bool,
}
impl CheckCommand {
pub(crate) fn into_options(self) -> Options {
let rules = if self.rules.is_empty() {
None
} else {
Some(
self.rules
.into_iter()
.map(|(rule, level)| (RangedValue::cli(rule), RangedValue::cli(level)))
.collect(),
)
};
Options {
environment: Some(EnvironmentOptions {
python_version: self
.python_version
.map(|version| RangedValue::cli(version.into())),
venv_path: self.venv_path.map(RelativePathBuf::cli),
typeshed: self.typeshed.map(RelativePathBuf::cli),
extra_paths: self.extra_search_path.map(|extra_search_paths| {
extra_search_paths
.into_iter()
.map(RelativePathBuf::cli)
.collect()
}),
..EnvironmentOptions::default()
}),
rules,
..Default::default()
}
}
}
/// A list of rules to enable or disable with a given severity.
///
/// This type is used to parse the `--error`, `--warn`, and `--ignore` arguments
/// while preserving the order in which they were specified (arguments last override previous severities).
#[derive(Debug)]
pub(crate) struct RulesArg(Vec<(String, lint::Level)>);
impl RulesArg {
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn into_iter(self) -> impl Iterator<Item = (String, lint::Level)> {
self.0.into_iter()
}
}
impl clap::FromArgMatches for RulesArg {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut rules = Vec::new();
for (level, arg_id) in [
(lint::Level::Ignore, "ignore"),
(lint::Level::Warn, "warn"),
(lint::Level::Error, "error"),
] {
let indices = matches.indices_of(arg_id).into_iter().flatten();
let levels = matches.get_many::<String>(arg_id).into_iter().flatten();
rules.extend(
indices
.zip(levels)
.map(|(index, rule)| (index, rule, level)),
);
}
// Sort by their index so that values specified later override earlier ones.
rules.sort_by_key(|(index, _, _)| *index);
Ok(Self(
rules
.into_iter()
.map(|(_, rule, level)| (rule.to_owned(), level))
.collect(),
))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
self.0 = Self::from_arg_matches(matches)?.0;
Ok(())
}
}
impl clap::Args for RulesArg {
fn augment_args(cmd: clap::Command) -> clap::Command {
const HELP_HEADING: &str = "Enabling / disabling rules";
cmd.arg(
clap::Arg::new("error")
.long("error")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'error'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("warn")
.long("warn")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'warn'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("ignore")
.long("ignore")
.action(ArgAction::Append)
.help("Disables the rule. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
}
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
}

View File

@@ -1,29 +1,125 @@
use std::io::{self, BufWriter, Write};
use std::process::{ExitCode, Termination};
use anyhow::Result;
use std::sync::Mutex;
use crate::args::{Args, CheckCommand, Command};
use crate::logging::setup_tracing;
use anyhow::{anyhow, Context};
use clap::Parser;
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use red_knot_project::metadata::options::Options;
use red_knot_project::watch;
use red_knot_project::watch::ProjectWatcher;
use red_knot_project::{ProjectDatabase, ProjectMetadata};
use red_knot_python_semantic::SitePackages;
use red_knot_server::run_server;
use ruff_db::diagnostic::{Diagnostic, Severity};
use red_knot_workspace::db::RootDatabase;
use red_knot_workspace::watch;
use red_knot_workspace::watch::WorkspaceWatcher;
use red_knot_workspace::workspace::settings::Configuration;
use red_knot_workspace::workspace::WorkspaceMetadata;
use ruff_db::diagnostic::Diagnostic;
use ruff_db::system::{OsSystem, System, SystemPath, SystemPathBuf};
use salsa::plumbing::ZalsaDatabase;
use target_version::TargetVersion;
use crate::logging::{setup_tracing, Verbosity};
mod args;
mod logging;
mod python_version;
mod target_version;
mod verbosity;
mod version;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
struct Args {
#[command(subcommand)]
pub(crate) command: Option<Command>,
#[arg(
long,
help = "Changes the current working directory.",
long_help = "Changes the current working directory before any specified operations. This affects the workspace and configuration discovery.",
value_name = "PATH"
)]
current_directory: Option<SystemPathBuf>,
#[arg(
long,
help = "Path to the virtual environment the project uses",
long_help = "\
Path to the virtual environment the project uses. \
If provided, red-knot will use the `site-packages` directory of this virtual environment \
to resolve type information for the project's third-party dependencies.",
value_name = "PATH"
)]
venv_path: Option<SystemPathBuf>,
#[arg(
long,
value_name = "DIRECTORY",
help = "Custom directory to use for stdlib typeshed stubs"
)]
custom_typeshed_dir: Option<SystemPathBuf>,
#[arg(
long,
value_name = "PATH",
help = "Additional path to use as a module-resolution source (can be passed multiple times)"
)]
extra_search_path: Option<Vec<SystemPathBuf>>,
#[arg(
long,
help = "Python version to assume when resolving types",
value_name = "VERSION"
)]
target_version: Option<TargetVersion>,
#[clap(flatten)]
verbosity: Verbosity,
#[arg(
long,
help = "Run in watch mode by re-running whenever files change",
short = 'W'
)]
watch: bool,
}
impl Args {
fn to_configuration(&self, cli_cwd: &SystemPath) -> Configuration {
let mut configuration = Configuration::default();
if let Some(target_version) = self.target_version {
configuration.target_version = Some(target_version.into());
}
if let Some(venv_path) = &self.venv_path {
configuration.search_paths.site_packages = Some(SitePackages::Derived {
venv_path: SystemPath::absolute(venv_path, cli_cwd),
});
}
if let Some(custom_typeshed_dir) = &self.custom_typeshed_dir {
configuration.search_paths.custom_typeshed =
Some(SystemPath::absolute(custom_typeshed_dir, cli_cwd));
}
if let Some(extra_search_paths) = &self.extra_search_path {
configuration.search_paths.extra_paths = extra_search_paths
.iter()
.map(|path| Some(SystemPath::absolute(path, cli_cwd)))
.collect();
}
configuration
}
}
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Start the language server
Server,
}
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
pub fn main() -> ExitStatus {
@@ -50,21 +146,10 @@ pub fn main() -> ExitStatus {
fn run() -> anyhow::Result<ExitStatus> {
let args = Args::parse_from(std::env::args());
match args.command {
Command::Server => run_server().map(|()| ExitStatus::Success),
Command::Check(check_args) => run_check(check_args),
Command::Version => version().map(|()| ExitStatus::Success),
if matches!(args.command, Some(Command::Server)) {
return run_server().map(|()| ExitStatus::Success);
}
}
pub(crate) fn version() -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
let version_info = crate::version::version();
writeln!(stdout, "red knot {}", &version_info)?;
Ok(())
}
fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
let verbosity = args.verbosity.level();
countme::enable(verbosity.is_trace());
let _guard = setup_tracing(verbosity)?;
@@ -82,34 +167,33 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
};
let cwd = args
.project
.current_directory
.as_ref()
.map(|cwd| {
if cwd.as_std_path().is_dir() {
Ok(SystemPath::absolute(cwd, &cli_base_path))
} else {
Err(anyhow!("Provided project path `{cwd}` is not a directory"))
Err(anyhow!(
"Provided current-directory path `{cwd}` is not a directory"
))
}
})
.transpose()?
.unwrap_or_else(|| cli_base_path.clone());
let system = OsSystem::new(cwd);
let watch = args.watch;
let exit_zero = args.exit_zero;
let min_error_severity = if args.error_on_warning {
Severity::Warning
} else {
Severity::Error
};
let system = OsSystem::new(cwd.clone());
let cli_configuration = args.to_configuration(&cwd);
let workspace_metadata = WorkspaceMetadata::discover(
system.current_directory(),
&system,
Some(&cli_configuration),
)?;
let cli_options = args.into_options();
let mut workspace_metadata = ProjectMetadata::discover(system.current_directory(), &system)?;
workspace_metadata.apply_cli_options(cli_options.clone());
// TODO: Use the `program_settings` to compute the key for the database's persistent
// cache and load the cache if it exists.
let mut db = RootDatabase::new(workspace_metadata, system)?;
let mut db = ProjectDatabase::new(workspace_metadata, system)?;
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_options, min_error_severity);
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_configuration);
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
@@ -121,7 +205,7 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
}
})?;
let exit_status = if watch {
let exit_status = if args.watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)
@@ -131,11 +215,7 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
std::mem::forget(db);
if exit_zero {
Ok(ExitStatus::Success)
} else {
Ok(exit_status)
}
Ok(exit_status)
}
#[derive(Copy, Clone)]
@@ -164,21 +244,13 @@ struct MainLoop {
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
/// The file system watcher, if running in watch mode.
watcher: Option<ProjectWatcher>,
watcher: Option<WorkspaceWatcher>,
cli_options: Options,
/// The minimum severity to consider an error when deciding the exit status.
///
/// TODO(micha): Get from the terminal settings.
min_error_severity: Severity,
cli_configuration: Configuration,
}
impl MainLoop {
fn new(
cli_options: Options,
min_error_severity: Severity,
) -> (Self, MainLoopCancellationToken) {
fn new(cli_configuration: Configuration) -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
(
@@ -186,28 +258,27 @@ impl MainLoop {
sender: sender.clone(),
receiver,
watcher: None,
cli_options,
min_error_severity,
cli_configuration,
},
MainLoopCancellationToken { sender },
)
}
fn watch(mut self, db: &mut ProjectDatabase) -> anyhow::Result<ExitStatus> {
fn watch(mut self, db: &mut RootDatabase) -> anyhow::Result<ExitStatus> {
tracing::debug!("Starting watch mode");
let sender = self.sender.clone();
let watcher = watch::directory_watcher(move |event| {
sender.send(MainLoopMessage::ApplyChanges(event)).unwrap();
})?;
self.watcher = Some(ProjectWatcher::new(watcher, db));
self.watcher = Some(WorkspaceWatcher::new(watcher, db));
self.run(db);
Ok(ExitStatus::Success)
}
fn run(mut self, db: &mut ProjectDatabase) -> ExitStatus {
fn run(mut self, db: &mut RootDatabase) -> ExitStatus {
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
let result = self.main_loop(db);
@@ -217,7 +288,7 @@ impl MainLoop {
result
}
fn main_loop(&mut self, db: &mut ProjectDatabase) -> ExitStatus {
fn main_loop(&mut self, db: &mut RootDatabase) -> ExitStatus {
// Schedule the first check.
tracing::debug!("Starting main loop");
@@ -226,10 +297,10 @@ impl MainLoop {
while let Ok(message) = self.receiver.recv() {
match message {
MainLoopMessage::CheckWorkspace => {
let db = db.clone();
let db = db.snapshot();
let sender = self.sender.clone();
// Spawn a new task that checks the project. This needs to be done in a separate thread
// Spawn a new task that checks the workspace. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
if let Ok(result) = db.check() {
@@ -245,10 +316,7 @@ impl MainLoop {
result,
revision: check_revision,
} => {
let failed = result
.iter()
.any(|diagnostic| diagnostic.severity() >= self.min_error_severity);
let has_diagnostics = !result.is_empty();
if check_revision == revision {
#[allow(clippy::print_stdout)]
for diagnostic in result {
@@ -261,7 +329,7 @@ impl MainLoop {
}
if self.watcher.is_none() {
return if failed {
return if has_diagnostics {
ExitStatus::Failure
} else {
ExitStatus::Success
@@ -274,7 +342,7 @@ impl MainLoop {
MainLoopMessage::ApplyChanges(changes) => {
revision += 1;
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes, Some(&self.cli_options));
db.apply_changes(changes, Some(&self.cli_configuration));
if let Some(watcher) = self.watcher.as_mut() {
watcher.update(db);
}

View File

@@ -1,68 +0,0 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum PythonVersion {
#[value(name = "3.7")]
Py37,
#[value(name = "3.8")]
Py38,
#[default]
#[value(name = "3.9")]
Py39,
#[value(name = "3.10")]
Py310,
#[value(name = "3.11")]
Py311,
#[value(name = "3.12")]
Py312,
#[value(name = "3.13")]
Py313,
}
impl PythonVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "3.7",
Self::Py38 => "3.8",
Self::Py39 => "3.9",
Self::Py310 => "3.10",
Self::Py311 => "3.11",
Self::Py312 => "3.12",
Self::Py313 => "3.13",
}
}
}
impl std::fmt::Display for PythonVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<PythonVersion> for red_knot_python_semantic::PythonVersion {
fn from(value: PythonVersion) -> Self {
match value {
PythonVersion::Py37 => Self::PY37,
PythonVersion::Py38 => Self::PY38,
PythonVersion::Py39 => Self::PY39,
PythonVersion::Py310 => Self::PY310,
PythonVersion::Py311 => Self::PY311,
PythonVersion::Py312 => Self::PY312,
PythonVersion::Py313 => Self::PY313,
}
}
}
#[cfg(test)]
mod tests {
use crate::python_version::PythonVersion;
#[test]
fn same_default_as_python_version() {
assert_eq!(
red_knot_python_semantic::PythonVersion::from(PythonVersion::default()),
red_knot_python_semantic::PythonVersion::default()
);
}
}

View File

@@ -0,0 +1,62 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum TargetVersion {
Py37,
Py38,
#[default]
Py39,
Py310,
Py311,
Py312,
Py313,
}
impl TargetVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "py37",
Self::Py38 => "py38",
Self::Py39 => "py39",
Self::Py310 => "py310",
Self::Py311 => "py311",
Self::Py312 => "py312",
Self::Py313 => "py313",
}
}
}
impl std::fmt::Display for TargetVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<TargetVersion> for red_knot_python_semantic::PythonVersion {
fn from(value: TargetVersion) -> Self {
match value {
TargetVersion::Py37 => Self::PY37,
TargetVersion::Py38 => Self::PY38,
TargetVersion::Py39 => Self::PY39,
TargetVersion::Py310 => Self::PY310,
TargetVersion::Py311 => Self::PY311,
TargetVersion::Py312 => Self::PY312,
TargetVersion::Py313 => Self::PY313,
}
}
}
#[cfg(test)]
mod tests {
use crate::target_version::TargetVersion;
use red_knot_python_semantic::PythonVersion;
#[test]
fn same_default_as_python_version() {
assert_eq!(
PythonVersion::from(TargetVersion::default()),
PythonVersion::default()
);
}
}

View File

@@ -1,105 +0,0 @@
//! Code for representing Red Knot's release version number.
use std::fmt;
/// Information about the git repository where Red Knot was built from.
pub(crate) struct CommitInfo {
short_commit_hash: String,
commit_date: String,
commits_since_last_tag: u32,
}
/// Red Knot's version.
pub(crate) struct VersionInfo {
/// Red Knot's version, such as "0.5.1"
version: String,
/// Information about the git commit we may have been built from.
///
/// `None` if not built from a git repo or if retrieval failed.
commit_info: Option<CommitInfo>,
}
impl fmt::Display for VersionInfo {
/// Formatted version information: `<version>[+<commits>] (<commit> <date>)`
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.version)?;
if let Some(ref ci) = self.commit_info {
if ci.commits_since_last_tag > 0 {
write!(f, "+{}", ci.commits_since_last_tag)?;
}
write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?;
}
Ok(())
}
}
/// Returns information about Red Knot's version.
pub(crate) fn version() -> VersionInfo {
// Environment variables are only read at compile-time
macro_rules! option_env_str {
($name:expr) => {
option_env!($name).map(|s| s.to_string())
};
}
// This version is pulled from Cargo.toml and set by Cargo
let version = option_env_str!("CARGO_PKG_VERSION").unwrap();
// Commit info is pulled from git and set by `build.rs`
let commit_info =
option_env_str!("RED_KNOT_COMMIT_SHORT_HASH").map(|short_commit_hash| CommitInfo {
short_commit_hash,
commit_date: option_env_str!("RED_KNOT_COMMIT_DATE").unwrap(),
commits_since_last_tag: option_env_str!("RED_KNOT_LAST_TAG_DISTANCE")
.as_deref()
.map_or(0, |value| value.parse::<u32>().unwrap_or(0)),
});
VersionInfo {
version,
commit_info,
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use super::{CommitInfo, VersionInfo};
#[test]
fn version_formatting() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: None,
};
assert_snapshot!(version, @"0.0.0");
}
#[test]
fn version_formatting_with_commit_info() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
}),
};
assert_snapshot!(version, @"0.0.0 (53b0f5d92 2023-10-19)");
}
#[test]
fn version_formatting_with_commits_since_last_tag() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 24,
}),
};
assert_snapshot!(version, @"0.0.0+24 (53b0f5d92 2023-10-19)");
}
}

View File

@@ -1,769 +0,0 @@
use anyhow::Context;
use insta::internals::SettingsBindDropGuard;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use std::path::{Path, PathBuf};
use std::process::Command;
use tempfile::TempDir;
/// Specifying an option on the CLI should take precedence over the same setting in the
/// project's configuration.
#[test]
fn config_override() -> anyhow::Result<()> {
let case = TestCase::with_files([
(
"pyproject.toml",
r#"
[tool.knot.environment]
python-version = "3.11"
"#,
),
(
"test.py",
r#"
import sys
# Access `sys.last_exc` that was only added in Python 3.12
print(sys.last_exc)
"#,
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:unresolved-attribute
--> <temp_dir>/test.py:5:7
|
4 | # Access `sys.last_exc` that was only added in Python 3.12
5 | print(sys.last_exc)
| ^^^^^^^^^^^^ Type `<module 'sys'>` has no attribute `last_exc`
|
----- stderr -----
"###);
assert_cmd_snapshot!(case.command().arg("--python-version").arg("3.12"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
Ok(())
}
/// Paths specified on the CLI are relative to the current working directory and not the project root.
///
/// We test this by adding an extra search path from the CLI to the libs directory when
/// running the CLI from the child directory (using relative paths).
///
/// Project layout:
/// ```
/// - libs
/// |- utils.py
/// - child
/// | - test.py
/// - pyproject.toml
/// ```
///
/// And the command is run in the `child` directory.
#[test]
fn cli_arguments_are_relative_to_the_current_directory() -> anyhow::Result<()> {
let case = TestCase::with_files([
(
"pyproject.toml",
r#"
[tool.knot.environment]
python-version = "3.11"
"#,
),
(
"libs/utils.py",
r#"
def add(a: int, b: int) -> int:
a + b
"#,
),
(
"child/test.py",
r#"
from utils import add
stat = add(10, 15)
"#,
),
])?;
// Make sure that the CLI fails when the `libs` directory is not in the search path.
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:unresolved-import
--> <temp_dir>/child/test.py:2:6
|
2 | from utils import add
| ^^^^^ Cannot resolve import `utils`
3 |
4 | stat = add(10, 15)
|
----- stderr -----
"###);
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")).arg("--extra-search-path").arg("../libs"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
Ok(())
}
/// Paths specified in a configuration file are relative to the project root.
///
/// We test this by adding `libs` (as a relative path) to the extra search path in the configuration and run
/// the CLI from a subdirectory.
///
/// Project layout:
/// ```
/// - libs
/// |- utils.py
/// - child
/// | - test.py
/// - pyproject.toml
/// ```
#[test]
fn paths_in_configuration_files_are_relative_to_the_project_root() -> anyhow::Result<()> {
let case = TestCase::with_files([
(
"pyproject.toml",
r#"
[tool.knot.environment]
python-version = "3.11"
extra-paths = ["libs"]
"#,
),
(
"libs/utils.py",
r#"
def add(a: int, b: int) -> int:
a + b
"#,
),
(
"child/test.py",
r#"
from utils import add
stat = add(10, 15)
"#,
),
])?;
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
Ok(())
}
/// The rule severity can be changed in the configuration file
#[test]
fn configuration_rule_severity() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
y = 4 / 0
for a in range(0, y):
x = a
print(x) # possibly-unresolved-reference
"#,
)?;
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ^^^^^ Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
warning: lint:possibly-unresolved-reference
--> <temp_dir>/test.py:7:7
|
5 | x = a
6 |
7 | print(x) # possibly-unresolved-reference
| - Name `x` used when possibly not defined
|
----- stderr -----
"###);
case.write_file(
"pyproject.toml",
r#"
[tool.knot.rules]
division-by-zero = "warn" # demote to warn
possibly-unresolved-reference = "ignore"
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ----- Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
----- stderr -----
"###);
Ok(())
}
/// The rule severity can be changed using `--ignore`, `--warn`, and `--error`
#[test]
fn cli_rule_severity() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
import does_not_exit
y = 4 / 0
for a in range(0, y):
x = a
print(x) # possibly-unresolved-reference
"#,
)?;
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:unresolved-import
--> <temp_dir>/test.py:2:8
|
2 | import does_not_exit
| ^^^^^^^^^^^^^ Cannot resolve import `does_not_exit`
3 |
4 | y = 4 / 0
|
error: lint:division-by-zero
--> <temp_dir>/test.py:4:5
|
2 | import does_not_exit
3 |
4 | y = 4 / 0
| ^^^^^ Cannot divide object of type `Literal[4]` by zero
5 |
6 | for a in range(0, y):
|
warning: lint:possibly-unresolved-reference
--> <temp_dir>/test.py:9:7
|
7 | x = a
8 |
9 | print(x) # possibly-unresolved-reference
| - Name `x` used when possibly not defined
|
----- stderr -----
"###);
assert_cmd_snapshot!(
case
.command()
.arg("--ignore")
.arg("possibly-unresolved-reference")
.arg("--warn")
.arg("division-by-zero")
.arg("--warn")
.arg("unresolved-import"),
@r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:unresolved-import
--> <temp_dir>/test.py:2:8
|
2 | import does_not_exit
| ------------- Cannot resolve import `does_not_exit`
3 |
4 | y = 4 / 0
|
warning: lint:division-by-zero
--> <temp_dir>/test.py:4:5
|
2 | import does_not_exit
3 |
4 | y = 4 / 0
| ----- Cannot divide object of type `Literal[4]` by zero
5 |
6 | for a in range(0, y):
|
----- stderr -----
"###
);
Ok(())
}
/// The rule severity can be changed using `--ignore`, `--warn`, and `--error` and
/// values specified last override previous severities.
#[test]
fn cli_rule_severity_precedence() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
y = 4 / 0
for a in range(0, y):
x = a
print(x) # possibly-unresolved-reference
"#,
)?;
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ^^^^^ Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
warning: lint:possibly-unresolved-reference
--> <temp_dir>/test.py:7:7
|
5 | x = a
6 |
7 | print(x) # possibly-unresolved-reference
| - Name `x` used when possibly not defined
|
----- stderr -----
"###);
assert_cmd_snapshot!(
case
.command()
.arg("--error")
.arg("possibly-unresolved-reference")
.arg("--warn")
.arg("division-by-zero")
// Override the error severity with warning
.arg("--ignore")
.arg("possibly-unresolved-reference"),
@r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ----- Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
----- stderr -----
"###
);
Ok(())
}
/// Red Knot warns about unknown rules specified in a configuration file
#[test]
fn configuration_unknown_rules() -> anyhow::Result<()> {
let case = TestCase::with_files([
(
"pyproject.toml",
r#"
[tool.knot.rules]
division-by-zer = "warn" # incorrect rule name
"#,
),
("test.py", "print(10)"),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning: unknown-rule
--> <temp_dir>/pyproject.toml:3:1
|
2 | [tool.knot.rules]
3 | division-by-zer = "warn" # incorrect rule name
| --------------- Unknown lint rule `division-by-zer`
|
----- stderr -----
"###);
Ok(())
}
/// Red Knot warns about unknown rules specified in a CLI argument
#[test]
fn cli_unknown_rules() -> anyhow::Result<()> {
let case = TestCase::with_file("test.py", "print(10)")?;
assert_cmd_snapshot!(case.command().arg("--ignore").arg("division-by-zer"), @r###"
success: true
exit_code: 0
----- stdout -----
warning: unknown-rule: Unknown lint rule `division-by-zer`
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_only_warnings() -> anyhow::Result<()> {
let case = TestCase::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:1:7
|
1 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_only_info() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
reveal_type(1)
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
info: revealed-type
--> <temp_dir>/test.py:3:1
|
2 | from typing_extensions import reveal_type
3 | reveal_type(1)
| -------------- info: Revealed type is `Literal[1]`
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_only_info_and_error_on_warning_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
reveal_type(1)
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: true
exit_code: 0
----- stdout -----
info: revealed-type
--> <temp_dir>/test.py:3:1
|
2 | from typing_extensions import reveal_type
3 | reveal_type(1)
| -------------- info: Revealed type is `Literal[1]`
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_no_errors_but_error_on_warning_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: false
exit_code: 1
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:1:7
|
1 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_both_warnings_and_errors() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [non-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:2:7
|
2 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
3 | print(4[1]) # [non-subscriptable]
|
error: lint:non-subscriptable
--> <temp_dir>/test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [non-subscriptable]
| ^ Cannot subscript object of type `Literal[4]` with no `__getitem__` method
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_both_warnings_and_errors_and_error_on_warning_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r###"
print(x) # [unresolved-reference]
print(4[1]) # [non-subscriptable]
"###,
)?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: false
exit_code: 1
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:2:7
|
2 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
3 | print(4[1]) # [non-subscriptable]
|
error: lint:non-subscriptable
--> <temp_dir>/test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [non-subscriptable]
| ^ Cannot subscript object of type `Literal[4]` with no `__getitem__` method
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_exit_zero_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [non-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--exit-zero"), @r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:2:7
|
2 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
3 | print(4[1]) # [non-subscriptable]
|
error: lint:non-subscriptable
--> <temp_dir>/test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [non-subscriptable]
| ^ Cannot subscript object of type `Literal[4]` with no `__getitem__` method
|
----- stderr -----
"###);
Ok(())
}
struct TestCase {
_temp_dir: TempDir,
_settings_scope: SettingsBindDropGuard,
project_dir: PathBuf,
}
impl TestCase {
fn new() -> anyhow::Result<Self> {
let temp_dir = TempDir::new()?;
// Canonicalize the tempdir path because macos uses symlinks for tempdirs
// and that doesn't play well with our snapshot filtering.
let project_dir = temp_dir
.path()
.canonicalize()
.context("Failed to canonicalize project path")?;
let mut settings = insta::Settings::clone_current();
settings.add_filter(&tempdir_filter(&project_dir), "<temp_dir>/");
settings.add_filter(r#"\\(\w\w|\s|\.|")"#, "/$1");
let settings_scope = settings.bind_to_scope();
Ok(Self {
project_dir,
_temp_dir: temp_dir,
_settings_scope: settings_scope,
})
}
fn with_files<'a>(files: impl IntoIterator<Item = (&'a str, &'a str)>) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_files(files)?;
Ok(case)
}
fn with_file(path: impl AsRef<Path>, content: &str) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_file(path, content)?;
Ok(case)
}
fn write_files<'a>(
&self,
files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> anyhow::Result<()> {
for (path, content) in files {
self.write_file(path, content)?;
}
Ok(())
}
fn write_file(&self, path: impl AsRef<Path>, content: &str) -> anyhow::Result<()> {
let path = path.as_ref();
let path = self.project_dir.join(path);
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.with_context(|| format!("Failed to create directory `{}`", parent.display()))?;
}
std::fs::write(&path, &*ruff_python_trivia::textwrap::dedent(content))
.with_context(|| format!("Failed to write file `{path}`", path = path.display()))?;
Ok(())
}
fn project_dir(&self) -> &Path {
&self.project_dir
}
fn command(&self) -> Command {
let mut command = Command::new(get_cargo_bin("red_knot"));
command.current_dir(&self.project_dir).arg("check");
command
}
}
fn tempdir_filter(path: &Path) -> String {
format!(r"{}\\?/?", regex::escape(path.to_str().unwrap()))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
"""
Regression test that makes sure we do not short-circuit here after
determining that the overall type will be `Never` and still infer
a type for the second tuple element `2`.
Relevant discussion:
https://github.com/astral-sh/ruff/pull/15218#discussion_r1900811073
"""
from typing_extensions import Never
def never() -> Never:
return never()
(never(), 2)

View File

@@ -1,190 +0,0 @@
use std::{collections::HashMap, hash::BuildHasher};
use red_knot_python_semantic::{PythonPlatform, PythonVersion, SitePackages};
use ruff_db::system::SystemPathBuf;
/// Combine two values, preferring the values in `self`.
///
/// The logic should follow that of Cargo's `config.toml`:
///
/// > If a key is specified in multiple config files, the values will get merged together.
/// > Numbers, strings, and booleans will use the value in the deeper config directory taking
/// > precedence over ancestor directories, where the home directory is the lowest priority.
/// > Arrays will be joined together with higher precedence items being placed later in the
/// > merged array.
///
/// ## uv Compatibility
///
/// The merging behavior differs from uv in that values with higher precedence in arrays
/// are placed later in the merged array. This is because we want to support overriding
/// earlier values and values from other configurations, including unsetting them.
/// For example: patterns coming last in file inclusion and exclusion patterns
/// allow overriding earlier patterns, matching the `gitignore` behavior.
/// Generally speaking, it feels more intuitive if later values override earlier values
/// than the other way around: `knot --exclude png --exclude "!important.png"`.
///
/// The main downside of this approach is that the ordering can be surprising in cases
/// where the option has a "first match" semantic and not a "last match" wins.
/// One such example is `extra-paths` where the semantics is given by Python:
/// the module on the first matching search path wins.
///
/// ```toml
/// [environment]
/// extra-paths = ["b", "c"]
/// ```
///
/// ```bash
/// knot --extra-paths a
/// ```
///
/// That's why a user might expect that this configuration results in `["a", "b", "c"]`,
/// because the CLI has higher precedence. However, the current implementation results in a
/// resolved extra search path of `["b", "c", "a"]`, which means `a` will be tried last.
///
/// There's an argument here that the user should be able to specify the order of the paths,
/// because only then is the user in full control of where to insert the path when specyifing `extra-paths`
/// in multiple sources.
///
/// ## Macro
/// You can automatically derive `Combine` for structs with named fields by using `derive(ruff_macros::Combine)`.
pub trait Combine {
#[must_use]
fn combine(mut self, other: Self) -> Self
where
Self: Sized,
{
self.combine_with(other);
self
}
fn combine_with(&mut self, other: Self);
}
impl<T> Combine for Option<T>
where
T: Combine,
{
fn combine(self, other: Self) -> Self
where
Self: Sized,
{
match (self, other) {
(Some(a), Some(b)) => Some(a.combine(b)),
(None, Some(b)) => Some(b),
(a, _) => a,
}
}
fn combine_with(&mut self, other: Self) {
match (self, other) {
(Some(a), Some(b)) => {
a.combine_with(b);
}
(a @ None, Some(b)) => {
*a = Some(b);
}
_ => {}
}
}
}
impl<T> Combine for Vec<T> {
fn combine_with(&mut self, mut other: Self) {
// `self` takes precedence over `other` but values with higher precedence must be placed after.
// Swap the vectors so that `other` is the one that gets extended, so that the values of `self` come after.
std::mem::swap(self, &mut other);
self.extend(other);
}
}
impl<K, V, S> Combine for HashMap<K, V, S>
where
K: Eq + std::hash::Hash,
S: BuildHasher,
{
fn combine_with(&mut self, mut other: Self) {
// `self` takes precedence over `other` but `extend` overrides existing values.
// Swap the hash maps so that `self` is the one that gets extended.
std::mem::swap(self, &mut other);
self.extend(other);
}
}
/// Implements [`Combine`] for a value that always returns `self` when combined with another value.
macro_rules! impl_noop_combine {
($name:ident) => {
impl Combine for $name {
#[inline(always)]
fn combine_with(&mut self, _other: Self) {}
#[inline(always)]
fn combine(self, _other: Self) -> Self {
self
}
}
};
}
impl_noop_combine!(SystemPathBuf);
impl_noop_combine!(PythonPlatform);
impl_noop_combine!(SitePackages);
impl_noop_combine!(PythonVersion);
// std types
impl_noop_combine!(bool);
impl_noop_combine!(usize);
impl_noop_combine!(u8);
impl_noop_combine!(u16);
impl_noop_combine!(u32);
impl_noop_combine!(u64);
impl_noop_combine!(u128);
impl_noop_combine!(isize);
impl_noop_combine!(i8);
impl_noop_combine!(i16);
impl_noop_combine!(i32);
impl_noop_combine!(i64);
impl_noop_combine!(i128);
impl_noop_combine!(String);
#[cfg(test)]
mod tests {
use crate::combine::Combine;
use std::collections::HashMap;
#[test]
fn combine_option() {
assert_eq!(Some(1).combine(Some(2)), Some(1));
assert_eq!(None.combine(Some(2)), Some(2));
assert_eq!(Some(1).combine(None), Some(1));
}
#[test]
fn combine_vec() {
assert_eq!(None.combine(Some(vec![1, 2, 3])), Some(vec![1, 2, 3]));
assert_eq!(Some(vec![1, 2, 3]).combine(None), Some(vec![1, 2, 3]));
assert_eq!(
Some(vec![1, 2, 3]).combine(Some(vec![4, 5, 6])),
Some(vec![4, 5, 6, 1, 2, 3])
);
}
#[test]
fn combine_map() {
let a: HashMap<u32, _> = HashMap::from_iter([(1, "a"), (2, "a"), (3, "a")]);
let b: HashMap<u32, _> = HashMap::from_iter([(0, "b"), (2, "b"), (5, "b")]);
assert_eq!(None.combine(Some(b.clone())), Some(b.clone()));
assert_eq!(Some(a.clone()).combine(None), Some(a.clone()));
assert_eq!(
Some(a).combine(Some(b)),
Some(HashMap::from_iter([
(0, "b"),
// The value from `a` takes precedence
(1, "a"),
(2, "a"),
(3, "a"),
(5, "b")
]))
);
}
}

View File

@@ -1,510 +0,0 @@
#![allow(clippy::ref_option)]
use crate::metadata::options::OptionDiagnostic;
pub use db::{Db, ProjectDatabase};
use files::{Index, Indexed, IndexedFiles};
pub use metadata::{ProjectDiscoveryError, ProjectMetadata};
use red_knot_python_semantic::lint::{LintRegistry, LintRegistryBuilder, RuleSelection};
use red_knot_python_semantic::register_lints;
use red_knot_python_semantic::types::check_types;
use ruff_db::diagnostic::{Diagnostic, DiagnosticId, ParseDiagnostic, Severity};
use ruff_db::files::{system_path_to_file, File};
use ruff_db::parsed::parsed_module;
use ruff_db::source::{source_text, SourceTextError};
use ruff_db::system::walk_directory::WalkState;
use ruff_db::system::{FileType, SystemPath};
use ruff_python_ast::PySourceType;
use ruff_text_size::TextRange;
use rustc_hash::{FxBuildHasher, FxHashSet};
use salsa::Durability;
use salsa::Setter;
use std::borrow::Cow;
use std::sync::Arc;
pub mod combine;
mod db;
mod files;
pub mod metadata;
pub mod watch;
pub static DEFAULT_LINT_REGISTRY: std::sync::LazyLock<LintRegistry> =
std::sync::LazyLock::new(default_lints_registry);
pub fn default_lints_registry() -> LintRegistry {
let mut builder = LintRegistryBuilder::default();
register_lints(&mut builder);
builder.build()
}
/// The project as a Salsa ingredient.
///
/// ## How is a project different from a program?
/// There are two (related) motivations:
///
/// 1. Program is defined in `ruff_db` and it can't reference the settings types for the linter and formatter
/// without introducing a cyclic dependency. The project is defined in a higher level crate
/// where it can reference these setting types.
/// 2. Running `ruff check` with different target versions results in different programs (settings) but
/// it remains the same project. That's why program is a narrowed view of the project only
/// holding on to the most fundamental settings required for checking.
#[salsa::input]
pub struct Project {
/// The files that are open in the project.
///
/// Setting the open files to a non-`None` value changes `check` to only check the
/// open files rather than all files in the project.
#[return_ref]
#[default]
open_fileset: Option<Arc<FxHashSet<File>>>,
/// The first-party files of this project.
#[default]
#[return_ref]
file_set: IndexedFiles,
/// The metadata describing the project, including the unresolved options.
#[return_ref]
pub metadata: ProjectMetadata,
}
#[salsa::tracked]
impl Project {
pub fn from_metadata(db: &dyn Db, metadata: ProjectMetadata) -> Self {
Project::builder(metadata)
.durability(Durability::MEDIUM)
.open_fileset_durability(Durability::LOW)
.file_set_durability(Durability::LOW)
.new(db)
}
pub fn root(self, db: &dyn Db) -> &SystemPath {
self.metadata(db).root()
}
pub fn name(self, db: &dyn Db) -> &str {
self.metadata(db).name()
}
pub fn reload(self, db: &mut dyn Db, metadata: ProjectMetadata) {
tracing::debug!("Reloading project");
assert_eq!(self.root(db), metadata.root());
if &metadata != self.metadata(db) {
self.set_metadata(db).to(metadata);
}
self.reload_files(db);
}
pub fn rule_selection(self, db: &dyn Db) -> &RuleSelection {
let (selection, _) = self.rule_selection_with_diagnostics(db);
selection
}
#[salsa::tracked(return_ref)]
fn rule_selection_with_diagnostics(
self,
db: &dyn Db,
) -> (RuleSelection, Vec<OptionDiagnostic>) {
self.metadata(db).options().to_rule_selection(db)
}
/// Checks all open files in the project and its dependencies.
pub(crate) fn check(self, db: &ProjectDatabase) -> Vec<Box<dyn Diagnostic>> {
let project_span = tracing::debug_span!("Project::check");
let _span = project_span.enter();
tracing::debug!("Checking project '{name}'", name = self.name(db));
let mut diagnostics: Vec<Box<dyn Diagnostic>> = Vec::new();
let (_, options_diagnostics) = self.rule_selection_with_diagnostics(db);
diagnostics.extend(options_diagnostics.iter().map(|diagnostic| {
let diagnostic: Box<dyn Diagnostic> = Box::new(diagnostic.clone());
diagnostic
}));
let result = Arc::new(std::sync::Mutex::new(diagnostics));
let inner_result = Arc::clone(&result);
let db = db.clone();
let project_span = project_span.clone();
rayon::scope(move |scope| {
let files = ProjectFiles::new(&db, self);
for file in &files {
let result = inner_result.clone();
let db = db.clone();
let project_span = project_span.clone();
scope.spawn(move |_| {
let check_file_span = tracing::debug_span!(parent: &project_span, "check_file", file=%file.path(&db));
let _entered = check_file_span.entered();
let file_diagnostics = check_file_impl(&db, file);
result.lock().unwrap().extend(file_diagnostics);
});
}
});
Arc::into_inner(result).unwrap().into_inner().unwrap()
}
pub(crate) fn check_file(self, db: &dyn Db, file: File) -> Vec<Box<dyn Diagnostic>> {
let (_, options_diagnostics) = self.rule_selection_with_diagnostics(db);
let mut file_diagnostics: Vec<_> = options_diagnostics
.iter()
.map(|diagnostic| {
let diagnostic: Box<dyn Diagnostic> = Box::new(diagnostic.clone());
diagnostic
})
.collect();
let check_diagnostics = check_file_impl(db, file);
file_diagnostics.extend(check_diagnostics);
file_diagnostics
}
/// Opens a file in the project.
///
/// This changes the behavior of `check` to only check the open files rather than all files in the project.
pub fn open_file(self, db: &mut dyn Db, file: File) {
tracing::debug!("Opening file `{}`", file.path(db));
let mut open_files = self.take_open_files(db);
open_files.insert(file);
self.set_open_files(db, open_files);
}
/// Closes a file in the project.
pub fn close_file(self, db: &mut dyn Db, file: File) -> bool {
tracing::debug!("Closing file `{}`", file.path(db));
let mut open_files = self.take_open_files(db);
let removed = open_files.remove(&file);
if removed {
self.set_open_files(db, open_files);
}
removed
}
/// Returns the open files in the project or `None` if the entire project should be checked.
pub fn open_files(self, db: &dyn Db) -> Option<&FxHashSet<File>> {
self.open_fileset(db).as_deref()
}
/// Sets the open files in the project.
///
/// This changes the behavior of `check` to only check the open files rather than all files in the project.
#[tracing::instrument(level = "debug", skip(self, db))]
pub fn set_open_files(self, db: &mut dyn Db, open_files: FxHashSet<File>) {
tracing::debug!("Set open project files (count: {})", open_files.len());
self.set_open_fileset(db).to(Some(Arc::new(open_files)));
}
/// This takes the open files from the project and returns them.
///
/// This changes the behavior of `check` to check all files in the project instead of just the open files.
fn take_open_files(self, db: &mut dyn Db) -> FxHashSet<File> {
tracing::debug!("Take open project files");
// Salsa will cancel any pending queries and remove its own reference to `open_files`
// so that the reference counter to `open_files` now drops to 1.
let open_files = self.set_open_fileset(db).to(None);
if let Some(open_files) = open_files {
Arc::try_unwrap(open_files).unwrap()
} else {
FxHashSet::default()
}
}
/// Returns `true` if the file is open in the project.
///
/// A file is considered open when:
/// * explicitly set as an open file using [`open_file`](Self::open_file)
/// * It has a [`SystemPath`] and belongs to a package's `src` files
/// * It has a [`SystemVirtualPath`](ruff_db::system::SystemVirtualPath)
pub fn is_file_open(self, db: &dyn Db, file: File) -> bool {
if let Some(open_files) = self.open_files(db) {
open_files.contains(&file)
} else if file.path(db).is_system_path() {
self.contains_file(db, file)
} else {
file.path(db).is_system_virtual_path()
}
}
/// Returns `true` if `file` is a first-party file part of this package.
pub fn contains_file(self, db: &dyn Db, file: File) -> bool {
self.files(db).contains(&file)
}
#[tracing::instrument(level = "debug", skip(db))]
pub fn remove_file(self, db: &mut dyn Db, file: File) {
tracing::debug!(
"Removing file `{}` from project `{}`",
file.path(db),
self.name(db)
);
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.remove(file);
}
pub fn add_file(self, db: &mut dyn Db, file: File) {
tracing::debug!(
"Adding file `{}` to project `{}`",
file.path(db),
self.name(db)
);
let Some(mut index) = IndexedFiles::indexed_mut(db, self) else {
return;
};
index.insert(file);
}
/// Returns the files belonging to this project.
pub fn files(self, db: &dyn Db) -> Indexed<'_> {
let files = self.file_set(db);
let indexed = match files.get() {
Index::Lazy(vacant) => {
let _entered =
tracing::debug_span!("Project::index_files", package = %self.name(db))
.entered();
let files = discover_project_files(db, self);
tracing::info!("Found {} files in project `{}`", files.len(), self.name(db));
vacant.set(files)
}
Index::Indexed(indexed) => indexed,
};
indexed
}
pub fn reload_files(self, db: &mut dyn Db) {
tracing::debug!("Reloading files for project `{}`", self.name(db));
if !self.file_set(db).is_lazy() {
// Force a re-index of the files in the next revision.
self.set_file_set(db).to(IndexedFiles::lazy());
}
}
}
fn check_file_impl(db: &dyn Db, file: File) -> Vec<Box<dyn Diagnostic>> {
let mut diagnostics: Vec<Box<dyn Diagnostic>> = Vec::new();
// Abort checking if there are IO errors.
let source = source_text(db.upcast(), file);
if let Some(read_error) = source.read_error() {
diagnostics.push(Box::new(IOErrorDiagnostic {
file,
error: read_error.clone(),
}));
return diagnostics;
}
let parsed = parsed_module(db.upcast(), file);
diagnostics.extend(parsed.errors().iter().map(|error| {
let diagnostic: Box<dyn Diagnostic> = Box::new(ParseDiagnostic::new(file, error.clone()));
diagnostic
}));
diagnostics.extend(check_types(db.upcast(), file).iter().map(|diagnostic| {
let boxed: Box<dyn Diagnostic> = Box::new(diagnostic.clone());
boxed
}));
diagnostics.sort_unstable_by_key(|diagnostic| diagnostic.range().unwrap_or_default().start());
diagnostics
}
fn discover_project_files(db: &dyn Db, project: Project) -> FxHashSet<File> {
let paths = std::sync::Mutex::new(Vec::new());
db.system().walk_directory(project.root(db)).run(|| {
Box::new(|entry| {
match entry {
Ok(entry) => {
// Skip over any non python files to avoid creating too many entries in `Files`.
match entry.file_type() {
FileType::File => {
if entry
.path()
.extension()
.and_then(PySourceType::try_from_extension)
.is_some()
{
let mut paths = paths.lock().unwrap();
paths.push(entry.into_path());
}
}
FileType::Directory | FileType::Symlink => {}
}
}
Err(error) => {
// TODO Handle error
tracing::error!("Failed to walk path: {error}");
}
}
WalkState::Continue
})
});
let paths = paths.into_inner().unwrap();
let mut files = FxHashSet::with_capacity_and_hasher(paths.len(), FxBuildHasher);
for path in paths {
// If this returns `None`, then the file was deleted between the `walk_directory` call and now.
// We can ignore this.
if let Ok(file) = system_path_to_file(db.upcast(), &path) {
files.insert(file);
}
}
files
}
#[derive(Debug)]
enum ProjectFiles<'a> {
OpenFiles(&'a FxHashSet<File>),
Indexed(files::Indexed<'a>),
}
impl<'a> ProjectFiles<'a> {
fn new(db: &'a dyn Db, project: Project) -> Self {
if let Some(open_files) = project.open_files(db) {
ProjectFiles::OpenFiles(open_files)
} else {
ProjectFiles::Indexed(project.files(db))
}
}
}
impl<'a> IntoIterator for &'a ProjectFiles<'a> {
type Item = File;
type IntoIter = ProjectFilesIter<'a>;
fn into_iter(self) -> Self::IntoIter {
match self {
ProjectFiles::OpenFiles(files) => ProjectFilesIter::OpenFiles(files.iter()),
ProjectFiles::Indexed(indexed) => ProjectFilesIter::Indexed {
files: indexed.into_iter(),
},
}
}
}
enum ProjectFilesIter<'db> {
OpenFiles(std::collections::hash_set::Iter<'db, File>),
Indexed { files: files::IndexedIter<'db> },
}
impl Iterator for ProjectFilesIter<'_> {
type Item = File;
fn next(&mut self) -> Option<Self::Item> {
match self {
ProjectFilesIter::OpenFiles(files) => files.next().copied(),
ProjectFilesIter::Indexed { files } => files.next(),
}
}
}
#[derive(Debug)]
pub struct IOErrorDiagnostic {
file: File,
error: SourceTextError,
}
impl Diagnostic for IOErrorDiagnostic {
fn id(&self) -> DiagnosticId {
DiagnosticId::Io
}
fn message(&self) -> Cow<str> {
self.error.to_string().into()
}
fn file(&self) -> Option<File> {
Some(self.file)
}
fn range(&self) -> Option<TextRange> {
None
}
fn severity(&self) -> Severity {
Severity::Error
}
}
#[cfg(test)]
mod tests {
use crate::db::tests::TestDb;
use crate::{check_file_impl, ProjectMetadata};
use red_knot_python_semantic::types::check_types;
use ruff_db::diagnostic::Diagnostic;
use ruff_db::files::system_path_to_file;
use ruff_db::source::source_text;
use ruff_db::system::{DbWithTestSystem, SystemPath, SystemPathBuf};
use ruff_db::testing::assert_function_query_was_not_run;
use ruff_python_ast::name::Name;
#[test]
fn check_file_skips_type_checking_when_file_cant_be_read() -> ruff_db::system::Result<()> {
let project = ProjectMetadata::new(Name::new_static("test"), SystemPathBuf::from("/"));
let mut db = TestDb::new(project);
let path = SystemPath::new("test.py");
db.write_file(path, "x = 10")?;
let file = system_path_to_file(&db, path).unwrap();
// Now the file gets deleted before we had a chance to read its source text.
db.memory_file_system().remove_file(path)?;
file.sync(&mut db);
assert_eq!(source_text(&db, file).as_str(), "");
assert_eq!(
check_file_impl(&db, file)
.into_iter()
.map(|diagnostic| diagnostic.message().into_owned())
.collect::<Vec<_>>(),
vec!["Failed to read file: No such file or directory".to_string()]
);
let events = db.take_salsa_events();
assert_function_query_was_not_run(&db, check_types, file, &events);
// The user now creates a new file with an empty text. The source text
// content returned by `source_text` remains unchanged, but the diagnostics should get updated.
db.write_file(path, "").unwrap();
assert_eq!(source_text(&db, file).as_str(), "");
assert_eq!(
check_file_impl(&db, file)
.into_iter()
.map(|diagnostic| diagnostic.message().into_owned())
.collect::<Vec<_>>(),
vec![] as Vec<String>
);
Ok(())
}
}

View File

@@ -1,531 +0,0 @@
use red_knot_python_semantic::ProgramSettings;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_python_ast::name::Name;
use std::sync::Arc;
use thiserror::Error;
use crate::combine::Combine;
use crate::metadata::pyproject::{Project, PyProject, PyProjectError};
use crate::metadata::value::ValueSource;
use options::KnotTomlError;
use options::Options;
pub mod options;
pub mod pyproject;
pub mod value;
#[derive(Debug, PartialEq, Eq)]
#[cfg_attr(test, derive(serde::Serialize))]
pub struct ProjectMetadata {
pub(super) name: Name,
pub(super) root: SystemPathBuf,
/// The raw options
pub(super) options: Options,
}
impl ProjectMetadata {
/// Creates a project with the given name and root that uses the default options.
pub fn new(name: Name, root: SystemPathBuf) -> Self {
Self {
name,
root,
options: Options::default(),
}
}
/// Loads a project from a `pyproject.toml` file.
pub(crate) fn from_pyproject(pyproject: PyProject, root: SystemPathBuf) -> Self {
Self::from_options(
pyproject
.tool
.and_then(|tool| tool.knot)
.unwrap_or_default(),
root,
pyproject.project.as_ref(),
)
}
/// Loads a project from a set of options with an optional pyproject-project table.
pub(crate) fn from_options(
options: Options,
root: SystemPathBuf,
project: Option<&Project>,
) -> Self {
let name = project
.and_then(|project| project.name.as_ref())
.map(|name| Name::new(&***name))
.unwrap_or_else(|| Name::new(root.file_name().unwrap_or("root")));
// TODO(https://github.com/astral-sh/ruff/issues/15491): Respect requires-python
Self {
name,
root,
options,
}
}
/// Discovers the closest project at `path` and returns its metadata.
///
/// The algorithm traverses upwards in the `path`'s ancestor chain and uses the following precedence
/// the resolve the project's root.
///
/// 1. The closest `pyproject.toml` with a `tool.knot` section or `knot.toml`.
/// 1. The closest `pyproject.toml`.
/// 1. Fallback to use `path` as the root and use the default settings.
pub fn discover(
path: &SystemPath,
system: &dyn System,
) -> Result<ProjectMetadata, ProjectDiscoveryError> {
tracing::debug!("Searching for a project in '{path}'");
if !system.is_directory(path) {
return Err(ProjectDiscoveryError::NotADirectory(path.to_path_buf()));
}
let mut closest_project: Option<ProjectMetadata> = None;
for project_root in path.ancestors() {
let pyproject_path = project_root.join("pyproject.toml");
let pyproject = if let Ok(pyproject_str) = system.read_to_string(&pyproject_path) {
match PyProject::from_toml_str(
&pyproject_str,
ValueSource::File(Arc::new(pyproject_path.clone())),
) {
Ok(pyproject) => Some(pyproject),
Err(error) => {
return Err(ProjectDiscoveryError::InvalidPyProject {
path: pyproject_path,
source: Box::new(error),
})
}
}
} else {
None
};
// A `knot.toml` takes precedence over a `pyproject.toml`.
let knot_toml_path = project_root.join("knot.toml");
if let Ok(knot_str) = system.read_to_string(&knot_toml_path) {
let options = match Options::from_toml_str(
&knot_str,
ValueSource::File(Arc::new(knot_toml_path.clone())),
) {
Ok(options) => options,
Err(error) => {
return Err(ProjectDiscoveryError::InvalidKnotToml {
path: knot_toml_path,
source: Box::new(error),
})
}
};
if pyproject
.as_ref()
.is_some_and(|project| project.knot().is_some())
{
// TODO: Consider using a diagnostic here
tracing::warn!("Ignoring the `tool.knot` section in `{pyproject_path}` because `{knot_toml_path}` takes precedence.");
}
tracing::debug!("Found project at '{}'", project_root);
return Ok(ProjectMetadata::from_options(
options,
project_root.to_path_buf(),
pyproject
.as_ref()
.and_then(|pyproject| pyproject.project.as_ref()),
));
}
if let Some(pyproject) = pyproject {
let has_knot_section = pyproject.knot().is_some();
let metadata =
ProjectMetadata::from_pyproject(pyproject, project_root.to_path_buf());
if has_knot_section {
tracing::debug!("Found project at '{}'", project_root);
return Ok(metadata);
}
// Not a project itself, keep looking for an enclosing project.
if closest_project.is_none() {
closest_project = Some(metadata);
}
}
}
// No project found, but maybe a pyproject.toml was found.
let metadata = if let Some(closest_project) = closest_project {
tracing::debug!(
"Project without `tool.knot` section: '{}'",
closest_project.root()
);
closest_project
} else {
tracing::debug!("The ancestor directories contain no `pyproject.toml`. Falling back to a virtual project.");
// Create a project with a default configuration
Self::new(
path.file_name().unwrap_or("root").into(),
path.to_path_buf(),
)
};
Ok(metadata)
}
pub fn root(&self) -> &SystemPath {
&self.root
}
pub fn name(&self) -> &str {
&self.name
}
pub fn options(&self) -> &Options {
&self.options
}
pub fn to_program_settings(&self, system: &dyn System) -> ProgramSettings {
self.options.to_program_settings(self.root(), system)
}
/// Combine the project options with the CLI options where the CLI options take precedence.
pub fn apply_cli_options(&mut self, options: Options) {
self.options = options.combine(std::mem::take(&mut self.options));
}
/// Combine the project options with the user options where project options take precedence.
pub fn apply_user_options(&mut self, options: Options) {
self.options.combine_with(options);
}
}
#[derive(Debug, Error)]
pub enum ProjectDiscoveryError {
#[error("project path '{0}' is not a directory")]
NotADirectory(SystemPathBuf),
#[error("{path} is not a valid `pyproject.toml`: {source}")]
InvalidPyProject {
source: Box<PyProjectError>,
path: SystemPathBuf,
},
#[error("{path} is not a valid `knot.toml`: {source}")]
InvalidKnotToml {
source: Box<KnotTomlError>,
path: SystemPathBuf,
},
}
#[cfg(test)]
mod tests {
//! Integration tests for project discovery
use crate::snapshot_project;
use anyhow::{anyhow, Context};
use insta::assert_ron_snapshot;
use ruff_db::system::{SystemPathBuf, TestSystem};
use crate::{ProjectDiscoveryError, ProjectMetadata};
#[test]
fn project_without_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([(root.join("foo.py"), ""), (root.join("bar.py"), "")])
.context("Failed to write files")?;
let project =
ProjectMetadata::discover(&root, &system).context("Failed to discover project")?;
assert_eq!(project.root(), &*root);
snapshot_project!(project);
Ok(())
}
#[test]
fn project_with_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "backend"
"#,
),
(root.join("db/__init__.py"), ""),
])
.context("Failed to write files")?;
let project =
ProjectMetadata::discover(&root, &system).context("Failed to discover project")?;
assert_eq!(project.root(), &*root);
snapshot_project!(project);
// Discovering the same package from a subdirectory should give the same result
let from_src = ProjectMetadata::discover(&root.join("db"), &system)
.context("Failed to discover project from src sub-directory")?;
assert_eq!(from_src, project);
Ok(())
}
#[test]
fn project_with_invalid_pyproject() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "backend"
[tool.knot
"#,
),
(root.join("db/__init__.py"), ""),
])
.context("Failed to write files")?;
let Err(error) = ProjectMetadata::discover(&root, &system) else {
return Err(anyhow!("Expected project discovery to fail because of invalid syntax in the pyproject.toml"));
};
assert_error_eq(
&error,
r#"/app/pyproject.toml is not a valid `pyproject.toml`: TOML parse error at line 5, column 31
|
5 | [tool.knot
| ^
invalid table header
expected `.`, `]`
"#,
);
Ok(())
}
#[test]
fn nested_projects_in_sub_project() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.knot.src]
root = "src"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
[tool.knot.src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let sub_project = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
snapshot_project!(sub_project);
Ok(())
}
#[test]
fn nested_projects_in_root_project() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.knot.src]
root = "src"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
[tool.knot.src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root, &system)?;
snapshot_project!(root);
Ok(())
}
#[test]
fn nested_projects_without_knot_sections() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
"#,
),
])
.context("Failed to write files")?;
let sub_project = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
snapshot_project!(sub_project);
Ok(())
}
#[test]
fn nested_projects_with_outer_knot_section() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "project-root"
[tool.knot.environment]
python-version = "3.10"
"#,
),
(
root.join("packages/a/pyproject.toml"),
r#"
[project]
name = "nested-project"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root.join("packages/a"), &system)?;
snapshot_project!(root);
Ok(())
}
/// A `knot.toml` takes precedence over any `pyproject.toml`.
///
/// However, the `pyproject.toml` is still loaded to get the project name and, in the future,
/// the requires-python constraint.
#[test]
fn project_with_knot_and_pyproject_toml() -> anyhow::Result<()> {
let system = TestSystem::default();
let root = SystemPathBuf::from("/app");
system
.memory_file_system()
.write_files([
(
root.join("pyproject.toml"),
r#"
[project]
name = "super-app"
requires-python = ">=3.12"
[tool.knot.src]
root = "this_option_is_ignored"
"#,
),
(
root.join("knot.toml"),
r#"
[src]
root = "src"
"#,
),
])
.context("Failed to write files")?;
let root = ProjectMetadata::discover(&root, &system)?;
snapshot_project!(root);
Ok(())
}
#[track_caller]
fn assert_error_eq(error: &ProjectDiscoveryError, message: &str) {
assert_eq!(error.to_string().replace('\\', "/"), message);
}
/// Snapshots a project but with all paths using unix separators.
#[macro_export]
macro_rules! snapshot_project {
($project:expr) => {{
assert_ron_snapshot!($project,{
".root" => insta::dynamic_redaction(|content, _content_path| {
content.as_str().unwrap().replace("\\", "/")
}),
});
}};
}
}

View File

@@ -1,369 +0,0 @@
use crate::metadata::value::{RangedValue, RelativePathBuf, ValueSource, ValueSourceGuard};
use crate::Db;
use red_knot_python_semantic::lint::{GetLintError, Level, LintSource, RuleSelection};
use red_knot_python_semantic::{
ProgramSettings, PythonPlatform, PythonVersion, SearchPathSettings, SitePackages,
};
use ruff_db::diagnostic::{Diagnostic, DiagnosticId, Severity};
use ruff_db::files::{system_path_to_file, File};
use ruff_db::system::{System, SystemPath};
use ruff_macros::Combine;
use ruff_text_size::TextRange;
use rustc_hash::FxHashMap;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use std::fmt::Debug;
use thiserror::Error;
/// The options for the project.
#[derive(Debug, Default, Clone, PartialEq, Eq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Options {
/// Configures the type checking environment.
#[serde(skip_serializing_if = "Option::is_none")]
pub environment: Option<EnvironmentOptions>,
#[serde(skip_serializing_if = "Option::is_none")]
pub src: Option<SrcOptions>,
/// Configures the enabled lints and their severity.
#[serde(skip_serializing_if = "Option::is_none")]
pub rules: Option<Rules>,
}
impl Options {
pub(crate) fn from_toml_str(content: &str, source: ValueSource) -> Result<Self, KnotTomlError> {
let _guard = ValueSourceGuard::new(source);
let options = toml::from_str(content)?;
Ok(options)
}
pub(crate) fn to_program_settings(
&self,
project_root: &SystemPath,
system: &dyn System,
) -> ProgramSettings {
let (python_version, python_platform) = self
.environment
.as_ref()
.map(|env| {
(
env.python_version.as_deref().copied(),
env.python_platform.as_deref(),
)
})
.unwrap_or_default();
ProgramSettings {
python_version: python_version.unwrap_or_default(),
python_platform: python_platform.cloned().unwrap_or_default(),
search_paths: self.to_search_path_settings(project_root, system),
}
}
fn to_search_path_settings(
&self,
project_root: &SystemPath,
system: &dyn System,
) -> SearchPathSettings {
let src_roots = if let Some(src_root) = self.src.as_ref().and_then(|src| src.root.as_ref())
{
vec![src_root.absolute(project_root, system)]
} else {
let src = project_root.join("src");
// Default to `src` and the project root if `src` exists and the root hasn't been specified.
if system.is_directory(&src) {
vec![project_root.to_path_buf(), src]
} else {
vec![project_root.to_path_buf()]
}
};
let (extra_paths, python, typeshed) = self
.environment
.as_ref()
.map(|env| {
(
env.extra_paths.clone(),
env.venv_path.clone(),
env.typeshed.clone(),
)
})
.unwrap_or_default();
SearchPathSettings {
extra_paths: extra_paths
.unwrap_or_default()
.into_iter()
.map(|path| path.absolute(project_root, system))
.collect(),
src_roots,
custom_typeshed: typeshed.map(|path| path.absolute(project_root, system)),
site_packages: python
.map(|venv_path| SitePackages::Derived {
venv_path: venv_path.absolute(project_root, system),
})
.unwrap_or(SitePackages::Known(vec![])),
}
}
#[must_use]
pub(crate) fn to_rule_selection(&self, db: &dyn Db) -> (RuleSelection, Vec<OptionDiagnostic>) {
let registry = db.lint_registry();
let mut diagnostics = Vec::new();
// Initialize the selection with the defaults
let mut selection = RuleSelection::from_registry(registry);
let rules = self
.rules
.as_ref()
.into_iter()
.flat_map(|rules| rules.inner.iter());
for (rule_name, level) in rules {
let source = rule_name.source();
match registry.get(rule_name) {
Ok(lint) => {
let lint_source = match source {
ValueSource::File(_) => LintSource::File,
ValueSource::Cli => LintSource::Cli,
};
if let Ok(severity) = Severity::try_from(**level) {
selection.enable(lint, severity, lint_source);
} else {
selection.disable(lint);
}
}
Err(error) => {
// `system_path_to_file` can return `Err` if the file was deleted since the configuration
// was read. This should be rare and it should be okay to default to not showing a configuration
// file in that case.
let file = source
.file()
.and_then(|path| system_path_to_file(db.upcast(), path).ok());
// TODO: Add a note if the value was configured on the CLI
let diagnostic = match error {
GetLintError::Unknown(_) => OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!("Unknown lint rule `{rule_name}`"),
Severity::Warning,
),
GetLintError::PrefixedWithCategory { suggestion, .. } => {
OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!(
"Unknown lint rule `{rule_name}`. Did you mean `{suggestion}`?"
),
Severity::Warning,
)
}
GetLintError::Removed(_) => OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!("Unknown lint rule `{rule_name}`"),
Severity::Warning,
),
};
diagnostics.push(diagnostic.with_file(file).with_range(rule_name.range()));
}
}
}
(selection, diagnostics)
}
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct EnvironmentOptions {
/// Specifies the version of Python that will be used to execute the source code.
/// The version should be specified as a string in the format `M.m` where `M` is the major version
/// and `m` is the minor (e.g. "3.0" or "3.6").
/// If a version is provided, knot will generate errors if the source code makes use of language features
/// that are not supported in that version.
/// It will also tailor its use of type stub files, which conditionalizes type definitions based on the version.
#[serde(skip_serializing_if = "Option::is_none")]
pub python_version: Option<RangedValue<PythonVersion>>,
/// Specifies the target platform that will be used to execute the source code.
/// If specified, Red Knot will tailor its use of type stub files,
/// which conditionalize type definitions based on the platform.
///
/// If no platform is specified, knot will use `all` or the current platform in the LSP use case.
#[serde(skip_serializing_if = "Option::is_none")]
pub python_platform: Option<RangedValue<PythonPlatform>>,
/// List of user-provided paths that should take first priority in the module resolution.
/// Examples in other type checkers are mypy's MYPYPATH environment variable,
/// or pyright's stubPath configuration setting.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra_paths: Option<Vec<RelativePathBuf>>,
/// Optional path to a "typeshed" directory on disk for us to use for standard-library types.
/// If this is not provided, we will fallback to our vendored typeshed stubs for the stdlib,
/// bundled as a zip file in the binary
#[serde(skip_serializing_if = "Option::is_none")]
pub typeshed: Option<RelativePathBuf>,
// TODO: Rename to python, see https://github.com/astral-sh/ruff/issues/15530
/// The path to the user's `site-packages` directory, where third-party packages from ``PyPI`` are installed.
#[serde(skip_serializing_if = "Option::is_none")]
pub venv_path: Option<RelativePathBuf>,
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SrcOptions {
/// The root of the project, used for finding first-party modules.
#[serde(skip_serializing_if = "Option::is_none")]
pub root: Option<RelativePathBuf>,
}
#[derive(Debug, Default, Clone, Eq, PartialEq, Combine, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Rules {
#[cfg_attr(feature = "schemars", schemars(with = "schema::Rules"))]
inner: FxHashMap<RangedValue<String>, RangedValue<Level>>,
}
impl FromIterator<(RangedValue<String>, RangedValue<Level>)> for Rules {
fn from_iter<T: IntoIterator<Item = (RangedValue<String>, RangedValue<Level>)>>(
iter: T,
) -> Self {
Self {
inner: iter.into_iter().collect(),
}
}
}
#[cfg(feature = "schemars")]
mod schema {
use crate::DEFAULT_LINT_REGISTRY;
use red_knot_python_semantic::lint::Level;
use schemars::gen::SchemaGenerator;
use schemars::schema::{
InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SubschemaValidation,
};
use schemars::JsonSchema;
pub(super) struct Rules;
impl JsonSchema for Rules {
fn schema_name() -> String {
"Rules".to_string()
}
fn json_schema(gen: &mut SchemaGenerator) -> Schema {
let registry = &*DEFAULT_LINT_REGISTRY;
let level_schema = gen.subschema_for::<Level>();
let properties: schemars::Map<String, Schema> = registry
.lints()
.iter()
.map(|lint| {
(
lint.name().to_string(),
Schema::Object(SchemaObject {
metadata: Some(Box::new(Metadata {
title: Some(lint.summary().to_string()),
description: Some(lint.documentation()),
deprecated: lint.status.is_deprecated(),
default: Some(lint.default_level.to_string().into()),
..Metadata::default()
})),
subschemas: Some(Box::new(SubschemaValidation {
one_of: Some(vec![level_schema.clone()]),
..Default::default()
})),
..Default::default()
}),
)
})
.collect();
Schema::Object(SchemaObject {
instance_type: Some(InstanceType::Object.into()),
object: Some(Box::new(ObjectValidation {
properties,
// Allow unknown rules: Red Knot will warn about them.
// It gives a better experience when using an older Red Knot version because
// the schema will not deny rules that have been removed in newer versions.
additional_properties: Some(Box::new(level_schema)),
..ObjectValidation::default()
})),
..Default::default()
})
}
}
}
#[derive(Error, Debug)]
pub enum KnotTomlError {
#[error(transparent)]
TomlSyntax(#[from] toml::de::Error),
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct OptionDiagnostic {
id: DiagnosticId,
message: String,
severity: Severity,
file: Option<File>,
range: Option<TextRange>,
}
impl OptionDiagnostic {
pub fn new(id: DiagnosticId, message: String, severity: Severity) -> Self {
Self {
id,
message,
severity,
file: None,
range: None,
}
}
#[must_use]
fn with_file(mut self, file: Option<File>) -> Self {
self.file = file;
self
}
#[must_use]
fn with_range(mut self, range: Option<TextRange>) -> Self {
self.range = range;
self
}
}
impl Diagnostic for OptionDiagnostic {
fn id(&self) -> DiagnosticId {
self.id
}
fn message(&self) -> Cow<str> {
Cow::Borrowed(&self.message)
}
fn file(&self) -> Option<File> {
self.file
}
fn range(&self) -> Option<TextRange> {
self.range
}
fn severity(&self) -> Severity {
self.severity
}
}

View File

@@ -1,330 +0,0 @@
use crate::combine::Combine;
use crate::Db;
use ruff_db::system::{System, SystemPath, SystemPathBuf};
use ruff_macros::Combine;
use ruff_text_size::{TextRange, TextSize};
use serde::{Deserialize, Deserializer};
use std::cell::RefCell;
use std::cmp::Ordering;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use toml::Spanned;
#[derive(Clone, Debug)]
pub enum ValueSource {
/// Value loaded from a project's configuration file.
///
/// Ideally, we'd use [`ruff_db::files::File`] but we can't because the database hasn't been
/// created when loading the configuration.
File(Arc<SystemPathBuf>),
/// The value comes from a CLI argument, while it's left open if specified using a short argument,
/// long argument (`--extra-paths`) or `--config key=value`.
Cli,
}
impl ValueSource {
pub fn file(&self) -> Option<&SystemPath> {
match self {
ValueSource::File(path) => Some(&**path),
ValueSource::Cli => None,
}
}
}
thread_local! {
/// Serde doesn't provide any easy means to pass a value to a [`Deserialize`] implementation,
/// but we want to associate each deserialized [`RelativePath`] with the source from
/// which it originated. We use a thread local variable to work around this limitation.
///
/// Use the [`ValueSourceGuard`] to initialize the thread local before calling into any
/// deserialization code. It ensures that the thread local variable gets cleaned up
/// once deserialization is done (once the guard gets dropped).
static VALUE_SOURCE: RefCell<Option<ValueSource>> = const { RefCell::new(None) };
}
/// Guard to safely change the [`VALUE_SOURCE`] for the current thread.
#[must_use]
pub(super) struct ValueSourceGuard {
prev_value: Option<ValueSource>,
}
impl ValueSourceGuard {
pub(super) fn new(source: ValueSource) -> Self {
let prev = VALUE_SOURCE.replace(Some(source));
Self { prev_value: prev }
}
}
impl Drop for ValueSourceGuard {
fn drop(&mut self) {
VALUE_SOURCE.set(self.prev_value.take());
}
}
/// A value that "remembers" where it comes from (source) and its range in source.
///
/// ## Equality, Hash, and Ordering
/// The equality, hash, and ordering are solely based on the value. They disregard the value's range
/// or source.
///
/// This ensures that two resolved configurations are identical even if the position of a value has changed
/// or if the values were loaded from different sources.
#[derive(Clone, serde::Serialize)]
#[serde(transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct RangedValue<T> {
value: T,
#[serde(skip)]
source: ValueSource,
/// The byte range of `value` in `source`.
///
/// Can be `None` because not all sources support a range.
/// For example, arguments provided on the CLI won't have a range attached.
#[serde(skip)]
range: Option<TextRange>,
}
impl<T> RangedValue<T> {
pub fn new(value: T, source: ValueSource) -> Self {
Self::with_range(value, source, TextRange::default())
}
pub fn cli(value: T) -> Self {
Self::with_range(value, ValueSource::Cli, TextRange::default())
}
pub fn with_range(value: T, source: ValueSource, range: TextRange) -> Self {
Self {
value,
range: Some(range),
source,
}
}
pub fn range(&self) -> Option<TextRange> {
self.range
}
pub fn source(&self) -> &ValueSource {
&self.source
}
#[must_use]
pub fn with_source(mut self, source: ValueSource) -> Self {
self.source = source;
self
}
pub fn into_inner(self) -> T {
self.value
}
}
impl<T> Combine for RangedValue<T> {
fn combine(self, _other: Self) -> Self
where
Self: Sized,
{
self
}
fn combine_with(&mut self, _other: Self) {}
}
impl<T> IntoIterator for RangedValue<T>
where
T: IntoIterator,
{
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
// The type already has an `iter` method thanks to `Deref`.
#[allow(clippy::into_iter_without_iter)]
impl<'a, T> IntoIterator for &'a RangedValue<T>
where
&'a T: IntoIterator,
{
type Item = <&'a T as IntoIterator>::Item;
type IntoIter = <&'a T as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
// The type already has a `into_iter_mut` method thanks to `DerefMut`.
#[allow(clippy::into_iter_without_iter)]
impl<'a, T> IntoIterator for &'a mut RangedValue<T>
where
&'a mut T: IntoIterator,
{
type Item = <&'a mut T as IntoIterator>::Item;
type IntoIter = <&'a mut T as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.value.into_iter()
}
}
impl<T> fmt::Debug for RangedValue<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)
}
}
impl<T> fmt::Display for RangedValue<T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)
}
}
impl<T> Deref for RangedValue<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> DerefMut for RangedValue<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T, U: ?Sized> AsRef<U> for RangedValue<T>
where
T: AsRef<U>,
{
fn as_ref(&self) -> &U {
self.value.as_ref()
}
}
impl<T: PartialEq> PartialEq for RangedValue<T> {
fn eq(&self, other: &Self) -> bool {
self.value.eq(&other.value)
}
}
impl<T: PartialEq<T>> PartialEq<T> for RangedValue<T> {
fn eq(&self, other: &T) -> bool {
self.value.eq(other)
}
}
impl<T: Eq> Eq for RangedValue<T> {}
impl<T: Hash> Hash for RangedValue<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.value.hash(state);
}
}
impl<T: PartialOrd> PartialOrd for RangedValue<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<T: PartialOrd<T>> PartialOrd<T> for RangedValue<T> {
fn partial_cmp(&self, other: &T) -> Option<Ordering> {
self.value.partial_cmp(other)
}
}
impl<T: Ord> Ord for RangedValue<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.value.cmp(&other.value)
}
}
impl<'de, T> Deserialize<'de> for RangedValue<T>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let spanned: Spanned<T> = Spanned::deserialize(deserializer)?;
let span = spanned.span();
let range = TextRange::new(
TextSize::try_from(span.start).expect("Configuration file to be smaller than 4GB"),
TextSize::try_from(span.end).expect("Configuration file to be smaller than 4GB"),
);
Ok(VALUE_SOURCE.with_borrow(|source| {
let source = source.clone().unwrap();
Self::with_range(spanned.into_inner(), source, range)
}))
}
}
/// A possibly relative path in a configuration file.
///
/// Relative paths in configuration files or from CLI options
/// require different anchoring:
///
/// * CLI: The path is relative to the current working directory
/// * Configuration file: The path is relative to the project's root.
#[derive(
Debug,
Clone,
serde::Serialize,
serde::Deserialize,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Combine,
)]
#[serde(transparent)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct RelativePathBuf(RangedValue<SystemPathBuf>);
impl RelativePathBuf {
pub fn new(path: impl AsRef<SystemPath>, source: ValueSource) -> Self {
Self(RangedValue::new(path.as_ref().to_path_buf(), source))
}
pub fn cli(path: impl AsRef<SystemPath>) -> Self {
Self::new(path, ValueSource::Cli)
}
/// Returns the relative path as specified by the user.
pub fn path(&self) -> &SystemPath {
&self.0
}
/// Returns the owned relative path.
pub fn into_path_buf(self) -> SystemPathBuf {
self.0.into_inner()
}
/// Resolves the absolute path for `self` based on its origin.
pub fn absolute_with_db(&self, db: &dyn Db) -> SystemPathBuf {
self.absolute(db.project().root(db), db.system())
}
/// Resolves the absolute path for `self` based on its origin.
pub fn absolute(&self, project_root: &SystemPath, system: &dyn System) -> SystemPathBuf {
let relative_to = match &self.0.source {
ValueSource::File(_) => project_root,
ValueSource::Cli => system.current_directory(),
};
SystemPath::absolute(&self.0, relative_to)
}
}

View File

@@ -1,13 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: root
---
ProjectMetadata(
name: Name("project-root"),
root: "/app",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)

View File

@@ -1,13 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: sub_project
---
ProjectMetadata(
name: Name("nested-project"),
root: "/app/packages/a",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)

View File

@@ -1,13 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: root
---
ProjectMetadata(
name: Name("project-root"),
root: "/app",
options: Options(
environment: Some(EnvironmentOptions(
r#python-version: Some("3.10"),
)),
),
)

View File

@@ -1,9 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: sub_project
---
ProjectMetadata(
name: Name("nested-project"),
root: "/app/packages/a",
options: Options(),
)

View File

@@ -1,13 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: root
---
ProjectMetadata(
name: Name("super-app"),
root: "/app",
options: Options(
src: Some(SrcOptions(
root: Some("src"),
)),
),
)

View File

@@ -1,9 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: project
---
ProjectMetadata(
name: Name("backend"),
root: "/app",
options: Options(),
)

View File

@@ -1,9 +0,0 @@
---
source: crates/red_knot_project/src/metadata.rs
expression: project
---
ProjectMetadata(
name: Name("app"),
root: "/app",
options: Options(),
)

View File

@@ -20,14 +20,12 @@ ruff_python_stdlib = { workspace = true }
ruff_source_file = { workspace = true }
ruff_text_size = { workspace = true }
ruff_python_literal = { workspace = true }
ruff_python_trivia = { workspace = true }
anyhow = { workspace = true }
bitflags = { workspace = true }
camino = { workspace = true }
compact_str = { workspace = true }
countme = { workspace = true }
drop_bomb = { workspace = true }
indexmap = { workspace = true }
itertools = { workspace = true }
ordermap = { workspace = true }
@@ -36,7 +34,6 @@ thiserror = { workspace = true }
tracing = { workspace = true }
rustc-hash = { workspace = true }
hashbrown = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
smallvec = { workspace = true }
static_assertions = { workspace = true }
@@ -56,8 +53,5 @@ tempfile = { workspace = true }
quickcheck = { version = "1.0.3", default-features = false }
quickcheck_macros = { version = "1.0.0" }
[features]
serde = ["ruff_db/serde", "dep:serde"]
[lints]
workspace = true

View File

@@ -1,220 +0,0 @@
"""A runner for Markdown-based tests for Red Knot"""
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "rich",
# "watchfiles",
# ]
# ///
from __future__ import annotations
import json
import os
import subprocess
from pathlib import Path
from typing import Final, Literal, Never, assert_never
from rich.console import Console
from watchfiles import Change, watch
CRATE_NAME: Final = "red_knot_python_semantic"
CRATE_ROOT: Final = Path(__file__).resolve().parent
MDTEST_DIR: Final = CRATE_ROOT / "resources" / "mdtest"
class MDTestRunner:
mdtest_executable: Path | None
console: Console
def __init__(self) -> None:
self.mdtest_executable = None
self.console = Console()
def _run_cargo_test(self, *, message_format: Literal["human", "json"]) -> str:
return subprocess.check_output(
[
"cargo",
"test",
"--package",
CRATE_NAME,
"--no-run",
"--color=always",
"--message-format",
message_format,
],
cwd=CRATE_ROOT,
env=dict(os.environ, CLI_COLOR="1"),
stderr=subprocess.STDOUT,
text=True,
)
def _recompile_tests(
self, status_message: str, *, message_on_success: bool = True
) -> bool:
with self.console.status(status_message):
# Run it with 'human' format in case there are errors:
try:
self._run_cargo_test(message_format="human")
except subprocess.CalledProcessError as e:
print(e.output)
return False
# Run it again with 'json' format to find the mdtest executable:
try:
json_output = self._run_cargo_test(message_format="json")
except subprocess.CalledProcessError as _:
# `cargo test` can still fail if something changed in between the two runs.
# Here we don't have a human-readable output, so just show a generic message:
self.console.print("[red]Error[/red]: Failed to compile tests")
return False
if json_output:
self._get_executable_path_from_json(json_output)
if message_on_success:
self.console.print("[dim]Tests compiled successfully[/dim]")
return True
def _get_executable_path_from_json(self, json_output: str) -> None:
for json_line in json_output.splitlines():
try:
data = json.loads(json_line)
except json.JSONDecodeError:
continue
if data.get("target", {}).get("name") == "mdtest":
self.mdtest_executable = Path(data["executable"])
break
else:
raise RuntimeError(
"Could not find mdtest executable after successful compilation"
)
def _run_mdtest(
self, arguments: list[str] | None = None, *, capture_output: bool = False
) -> subprocess.CompletedProcess:
assert self.mdtest_executable is not None
arguments = arguments or []
return subprocess.run(
[self.mdtest_executable, *arguments],
cwd=CRATE_ROOT,
env=dict(os.environ, CLICOLOR_FORCE="1"),
capture_output=capture_output,
text=True,
check=False,
)
def _run_mdtests_for_file(self, markdown_file: Path) -> None:
path_mangled = (
markdown_file.as_posix()
.replace("/", "_")
.replace("-", "_")
.removesuffix(".md")
)
test_name = f"mdtest__{path_mangled}"
output = self._run_mdtest(["--exact", test_name], capture_output=True)
if output.returncode == 0:
if "running 0 tests\n" in output.stdout:
self.console.log(
f"[yellow]Warning[/yellow]: No tests were executed with filter '{test_name}'"
)
else:
self.console.print(
f"Test for [bold green]{markdown_file}[/bold green] succeeded"
)
else:
self.console.print()
self.console.rule(
f"Test for [bold red]{markdown_file}[/bold red] failed",
style="gray",
)
self._print_trimmed_cargo_test_output(
output.stdout + output.stderr, test_name
)
def _print_trimmed_cargo_test_output(self, output: str, test_name: str) -> None:
# Skip 'cargo test' boilerplate at the beginning:
lines = output.splitlines()
start_index = 0
for i, line in enumerate(lines):
if f"{test_name} stdout" in line:
start_index = i
break
for line in lines[start_index + 1 :]:
if "MDTEST_TEST_FILTER" in line:
continue
if line.strip() == "-" * 50:
# Skip 'cargo test' boilerplate at the end
break
print(line)
def watch(self) -> Never:
self._recompile_tests("Compiling tests...", message_on_success=False)
self.console.print("[dim]Ready to watch for changes...[/dim]")
for changes in watch(CRATE_ROOT):
new_md_files = set()
changed_md_files = set()
rust_code_has_changed = False
for change, path_str in changes:
path = Path(path_str)
if path.suffix == ".rs":
rust_code_has_changed = True
continue
if path.suffix != ".md":
continue
try:
relative_path = Path(path).relative_to(MDTEST_DIR)
except ValueError:
continue
match change:
case Change.added:
# When saving a file, some editors (looking at you, Vim) might first
# save the file with a temporary name (e.g. `file.md~`) and then rename
# it to the final name. This creates a `deleted` and `added` change.
# We treat those files as `changed` here.
if (Change.deleted, path_str) in changes:
changed_md_files.add(relative_path)
else:
new_md_files.add(relative_path)
case Change.modified:
changed_md_files.add(relative_path)
case Change.deleted:
# No need to do anything when a Markdown test is deleted
pass
case _ as unreachable:
assert_never(unreachable)
if rust_code_has_changed:
if self._recompile_tests("Rust code has changed, recompiling tests..."):
self._run_mdtest()
elif new_md_files:
files = " ".join(file.as_posix() for file in new_md_files)
self._recompile_tests(
f"New Markdown test [yellow]{files}[/yellow] detected, recompiling tests..."
)
for path in new_md_files | changed_md_files:
self._run_mdtests_for_file(path)
def main() -> None:
try:
runner = MDTestRunner()
runner.watch()
except KeyboardInterrupt:
print()
if __name__ == "__main__":
main()

View File

@@ -1,141 +0,0 @@
version = 1
requires-python = ">=3.11"
[manifest]
requirements = [
{ name = "rich" },
{ name = "watchfiles" },
]
[[package]]
name = "anyio"
version = "4.8.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "idna" },
{ name = "sniffio" },
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 },
]
[[package]]
name = "idna"
version = "3.10"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
]
[[package]]
name = "markdown-it-py"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mdurl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 },
]
[[package]]
name = "mdurl"
version = "0.1.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
]
[[package]]
name = "pygments"
version = "2.19.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 },
]
[[package]]
name = "rich"
version = "13.9.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 },
]
[[package]]
name = "sniffio"
version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
]
[[package]]
name = "typing-extensions"
version = "4.12.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 },
]
[[package]]
name = "watchfiles"
version = "1.0.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
]
sdist = { url = "https://files.pythonhosted.org/packages/f5/26/c705fc77d0a9ecdb9b66f1e2976d95b81df3cae518967431e7dbf9b5e219/watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205", size = 94625 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0f/bb/8461adc4b1fed009546fb797fc0d5698dcfe5e289cb37e1b8f16a93cdc30/watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19", size = 394869 },
{ url = "https://files.pythonhosted.org/packages/55/88/9ebf36b3547176d1709c320de78c1fa3263a46be31b5b1267571d9102686/watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235", size = 384905 },
{ url = "https://files.pythonhosted.org/packages/03/8a/04335ce23ef78d8c69f0913e8b20cf7d9233e3986543aeef95ef2d6e43d2/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202", size = 449944 },
{ url = "https://files.pythonhosted.org/packages/17/4e/c8d5dcd14fe637f4633616dabea8a4af0a10142dccf3b43e0f081ba81ab4/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6", size = 456020 },
{ url = "https://files.pythonhosted.org/packages/5e/74/3e91e09e1861dd7fbb1190ce7bd786700dc0fbc2ccd33bb9fff5de039229/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317", size = 482983 },
{ url = "https://files.pythonhosted.org/packages/a1/3d/e64de2d1ce4eb6a574fd78ce3a28c279da263be9ef3cfcab6f708df192f2/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee", size = 520320 },
{ url = "https://files.pythonhosted.org/packages/2c/bd/52235f7063b57240c66a991696ed27e2a18bd6fcec8a1ea5a040b70d0611/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49", size = 500988 },
{ url = "https://files.pythonhosted.org/packages/3a/b0/ff04194141a5fe650c150400dd9e42667916bc0f52426e2e174d779b8a74/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c", size = 452573 },
{ url = "https://files.pythonhosted.org/packages/3d/9d/966164332c5a178444ae6d165082d4f351bd56afd9c3ec828eecbf190e6a/watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1", size = 615114 },
{ url = "https://files.pythonhosted.org/packages/94/df/f569ae4c1877f96ad4086c153a8eee5a19a3b519487bf5c9454a3438c341/watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226", size = 613076 },
{ url = "https://files.pythonhosted.org/packages/15/ae/8ce5f29e65d5fa5790e3c80c289819c55e12be2e1b9f5b6a0e55e169b97d/watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105", size = 271013 },
{ url = "https://files.pythonhosted.org/packages/a4/c6/79dc4a7c598a978e5fafa135090aaf7bbb03b8dec7bada437dfbe578e7ed/watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74", size = 284229 },
{ url = "https://files.pythonhosted.org/packages/37/3d/928633723211753f3500bfb138434f080363b87a1b08ca188b1ce54d1e05/watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3", size = 276824 },
{ url = "https://files.pythonhosted.org/packages/5b/1a/8f4d9a1461709756ace48c98f07772bc6d4519b1e48b5fa24a4061216256/watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2", size = 391345 },
{ url = "https://files.pythonhosted.org/packages/bc/d2/6750b7b3527b1cdaa33731438432e7238a6c6c40a9924049e4cebfa40805/watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9", size = 381515 },
{ url = "https://files.pythonhosted.org/packages/4e/17/80500e42363deef1e4b4818729ed939aaddc56f82f4e72b2508729dd3c6b/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712", size = 449767 },
{ url = "https://files.pythonhosted.org/packages/10/37/1427fa4cfa09adbe04b1e97bced19a29a3462cc64c78630787b613a23f18/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12", size = 455677 },
{ url = "https://files.pythonhosted.org/packages/c5/7a/39e9397f3a19cb549a7d380412fd9e507d4854eddc0700bfad10ef6d4dba/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844", size = 482219 },
{ url = "https://files.pythonhosted.org/packages/45/2d/7113931a77e2ea4436cad0c1690c09a40a7f31d366f79c6f0a5bc7a4f6d5/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733", size = 518830 },
{ url = "https://files.pythonhosted.org/packages/f9/1b/50733b1980fa81ef3c70388a546481ae5fa4c2080040100cd7bf3bf7b321/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af", size = 497997 },
{ url = "https://files.pythonhosted.org/packages/2b/b4/9396cc61b948ef18943e7c85ecfa64cf940c88977d882da57147f62b34b1/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a", size = 452249 },
{ url = "https://files.pythonhosted.org/packages/fb/69/0c65a5a29e057ad0dc691c2fa6c23b2983c7dabaa190ba553b29ac84c3cc/watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff", size = 614412 },
{ url = "https://files.pythonhosted.org/packages/7f/b9/319fcba6eba5fad34327d7ce16a6b163b39741016b1996f4a3c96b8dd0e1/watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e", size = 611982 },
{ url = "https://files.pythonhosted.org/packages/f1/47/143c92418e30cb9348a4387bfa149c8e0e404a7c5b0585d46d2f7031b4b9/watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94", size = 271822 },
{ url = "https://files.pythonhosted.org/packages/ea/94/b0165481bff99a64b29e46e07ac2e0df9f7a957ef13bec4ceab8515f44e3/watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c", size = 285441 },
{ url = "https://files.pythonhosted.org/packages/11/de/09fe56317d582742d7ca8c2ca7b52a85927ebb50678d9b0fa8194658f536/watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90", size = 277141 },
{ url = "https://files.pythonhosted.org/packages/08/98/f03efabec64b5b1fa58c0daab25c68ef815b0f320e54adcacd0d6847c339/watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9", size = 390954 },
{ url = "https://files.pythonhosted.org/packages/16/09/4dd49ba0a32a45813debe5fb3897955541351ee8142f586303b271a02b40/watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60", size = 381133 },
{ url = "https://files.pythonhosted.org/packages/76/59/5aa6fc93553cd8d8ee75c6247763d77c02631aed21551a97d94998bf1dae/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407", size = 449516 },
{ url = "https://files.pythonhosted.org/packages/4c/aa/df4b6fe14b6317290b91335b23c96b488d365d65549587434817e06895ea/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d", size = 454820 },
{ url = "https://files.pythonhosted.org/packages/5e/71/185f8672f1094ce48af33252c73e39b48be93b761273872d9312087245f6/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d", size = 481550 },
{ url = "https://files.pythonhosted.org/packages/85/d7/50ebba2c426ef1a5cb17f02158222911a2e005d401caf5d911bfca58f4c4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b", size = 518647 },
{ url = "https://files.pythonhosted.org/packages/f0/7a/4c009342e393c545d68987e8010b937f72f47937731225b2b29b7231428f/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590", size = 497547 },
{ url = "https://files.pythonhosted.org/packages/0f/7c/1cf50b35412d5c72d63b2bf9a4fffee2e1549a245924960dd087eb6a6de4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902", size = 452179 },
{ url = "https://files.pythonhosted.org/packages/d6/a9/3db1410e1c1413735a9a472380e4f431ad9a9e81711cda2aaf02b7f62693/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1", size = 614125 },
{ url = "https://files.pythonhosted.org/packages/f2/e1/0025d365cf6248c4d1ee4c3d2e3d373bdd3f6aff78ba4298f97b4fad2740/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303", size = 611911 },
{ url = "https://files.pythonhosted.org/packages/55/55/035838277d8c98fc8c917ac9beeb0cd6c59d675dc2421df5f9fcf44a0070/watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80", size = 271152 },
{ url = "https://files.pythonhosted.org/packages/f0/e5/96b8e55271685ddbadc50ce8bc53aa2dff278fb7ac4c2e473df890def2dc/watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc", size = 285216 },
]

View File

@@ -1,94 +0,0 @@
# `Annotated`
`Annotated` attaches arbitrary metadata to a given type.
## Usages
`Annotated[T, ...]` is equivalent to `T`: All metadata arguments are simply ignored.
```py
from typing_extensions import Annotated
def _(x: Annotated[int, "foo"]):
reveal_type(x) # revealed: int
def _(x: Annotated[int, lambda: 0 + 1 * 2 // 3, _(4)]):
reveal_type(x) # revealed: int
def _(x: Annotated[int, "arbitrary", "metadata", "elements", "are", "fine"]):
reveal_type(x) # revealed: int
def _(x: Annotated[tuple[str, int], bytes]):
reveal_type(x) # revealed: tuple[str, int]
```
## Parameterization
It is invalid to parameterize `Annotated` with less than two arguments.
```py
from typing_extensions import Annotated
# error: [invalid-type-form] "`Annotated` requires at least two arguments when used in an annotation or type expression"
def _(x: Annotated):
reveal_type(x) # revealed: Unknown
def _(flag: bool):
if flag:
X = Annotated
else:
X = bool
# error: [invalid-type-form] "`Annotated` requires at least two arguments when used in an annotation or type expression"
def f(y: X):
reveal_type(y) # revealed: Unknown | bool
# error: [invalid-type-form] "`Annotated` requires at least two arguments when used in an annotation or type expression"
def _(x: Annotated | bool):
reveal_type(x) # revealed: Unknown | bool
# error: [invalid-type-form]
def _(x: Annotated[()]):
reveal_type(x) # revealed: Unknown
# error: [invalid-type-form]
def _(x: Annotated[int]):
# `Annotated[T]` is invalid and will raise an error at runtime,
# but we treat it the same as `T` to provide better diagnostics later on.
# The subscription itself is still reported, regardless.
# Same for the `(int,)` form below.
reveal_type(x) # revealed: int
# error: [invalid-type-form]
def _(x: Annotated[(int,)]):
reveal_type(x) # revealed: int
```
## Inheritance
### Correctly parameterized
Inheriting from `Annotated[T, ...]` is equivalent to inheriting from `T` itself.
```py
from typing_extensions import Annotated
# TODO: False positive
# error: [invalid-base]
class C(Annotated[int, "foo"]): ...
# TODO: Should be `tuple[Literal[C], Literal[int], Literal[object]]`
reveal_type(C.__mro__) # revealed: tuple[Literal[C], Unknown, Literal[object]]
```
### Not parameterized
```py
from typing_extensions import Annotated
# At runtime, this is an error.
# error: [invalid-base]
class C(Annotated): ...
reveal_type(C.__mro__) # revealed: tuple[Literal[C], Unknown, Literal[object]]
```

View File

@@ -34,7 +34,8 @@ If you define your own class named `Any`, using that in a type expression refers
isn't a spelling of the Any type.
```py
class Any: ...
class Any:
pass
x: Any
@@ -58,7 +59,8 @@ assignable to `int`.
```py
from typing import Any
class Subclass(Any): ...
class Subclass(Any):
pass
reveal_type(Subclass.__mro__) # revealed: tuple[Literal[Subclass], Any, Literal[object]]
@@ -66,18 +68,8 @@ x: Subclass = 1 # error: [invalid-assignment]
# TODO: no diagnostic
y: int = Subclass() # error: [invalid-assignment]
def _(s: Subclass):
reveal_type(s) # revealed: Subclass
```
## Invalid
`Any` cannot be parameterized:
```py
from typing import Any
# error: [invalid-type-form] "Type `typing.Any` expected no type parameter"
def f(x: Any[int]):
reveal_type(x) # revealed: Unknown
def f() -> Subclass:
pass
reveal_type(f()) # revealed: Subclass
```

View File

@@ -1,46 +0,0 @@
# Deferred annotations
## Deferred annotations in stubs always resolve
`mod.pyi`:
```pyi
def get_foo() -> Foo: ...
class Foo: ...
```
```py
from mod import get_foo
reveal_type(get_foo()) # revealed: Foo
```
## Deferred annotations in regular code fail
In (regular) source files, annotations are *not* deferred. This also tests that imports from
`__future__` that are not `annotations` are ignored.
```py
from __future__ import with_statement as annotations
# error: [unresolved-reference]
def get_foo() -> Foo: ...
class Foo: ...
reveal_type(get_foo()) # revealed: Unknown
```
## Deferred annotations in regular code with `__future__.annotations`
If `__future__.annotations` is imported, annotations *are* deferred.
```py
from __future__ import annotations
def get_foo() -> Foo: ...
class Foo: ...
reveal_type(get_foo()) # revealed: Foo
```

View File

@@ -1,155 +0,0 @@
# Literal
<https://typing.readthedocs.io/en/latest/spec/literal.html#literals>
## Parameterization
```py
from typing import Literal
from enum import Enum
mode: Literal["w", "r"]
a1: Literal[26]
a2: Literal[0x1A]
a3: Literal[-4]
a4: Literal["hello world"]
a5: Literal[b"hello world"]
a6: Literal[True]
a7: Literal[None]
a8: Literal[Literal[1]]
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
b1: Literal[Color.RED]
def f():
reveal_type(mode) # revealed: Literal["w", "r"]
reveal_type(a1) # revealed: Literal[26]
reveal_type(a2) # revealed: Literal[26]
reveal_type(a3) # revealed: Literal[-4]
reveal_type(a4) # revealed: Literal["hello world"]
reveal_type(a5) # revealed: Literal[b"hello world"]
reveal_type(a6) # revealed: Literal[True]
reveal_type(a7) # revealed: None
reveal_type(a8) # revealed: Literal[1]
# TODO: This should be Color.RED
reveal_type(b1) # revealed: Unknown | Literal[0]
# error: [invalid-type-form]
invalid1: Literal[3 + 4]
# error: [invalid-type-form]
invalid2: Literal[4 + 3j]
# error: [invalid-type-form]
invalid3: Literal[(3, 4)]
hello = "hello"
invalid4: Literal[
1 + 2, # error: [invalid-type-form]
"foo",
hello, # error: [invalid-type-form]
(1, 2, 3), # error: [invalid-type-form]
]
```
## Shortening unions of literals
When a Literal is parameterized with more than one value, its treated as exactly to equivalent to
the union of those types.
```py
from typing import Literal
def x(
a1: Literal[Literal[Literal[1, 2, 3], "foo"], 5, None],
a2: Literal["w"] | Literal["r"],
a3: Literal[Literal["w"], Literal["r"], Literal[Literal["w+"]]],
a4: Literal[True] | Literal[1, 2] | Literal["foo"],
):
reveal_type(a1) # revealed: Literal[1, 2, 3, "foo", 5] | None
reveal_type(a2) # revealed: Literal["w", "r"]
reveal_type(a3) # revealed: Literal["w", "r", "w+"]
reveal_type(a4) # revealed: Literal[True, 1, 2, "foo"]
```
## Display of heterogeneous unions of literals
```py
from typing import Literal, Union
def foo(x: int) -> int:
return x + 1
def bar(s: str) -> str:
return s
class A: ...
class B: ...
def union_example(
x: Union[
# unknown type
# error: [unresolved-reference]
y,
Literal[-1],
Literal["A"],
Literal[b"A"],
Literal[b"\x00"],
Literal[b"\x07"],
Literal[0],
Literal[1],
Literal["B"],
Literal["foo"],
Literal["bar"],
Literal["B"],
Literal[True],
None,
],
):
reveal_type(x) # revealed: Unknown | Literal[-1, "A", b"A", b"\x00", b"\x07", 0, 1, "B", "foo", "bar", True] | None
```
## Detecting Literal outside typing and typing_extensions
Only Literal that is defined in typing and typing_extension modules is detected as the special
Literal.
`other.pyi`:
```pyi
from typing import _SpecialForm
Literal: _SpecialForm
```
```py
from other import Literal
a1: Literal[26]
def f():
reveal_type(a1) # revealed: @Todo(generics)
```
## Detecting typing_extensions.Literal
```py
from typing_extensions import Literal
a1: Literal[26]
def f():
reveal_type(a1) # revealed: Literal[26]
```
## Invalid
```py
from typing import Literal
# error: [invalid-type-form] "`Literal` requires at least one argument when used in a type expression"
def _(x: Literal):
reveal_type(x) # revealed: Unknown
```

View File

@@ -27,19 +27,19 @@ def f():
```py
from typing_extensions import Literal, LiteralString
bad_union: Literal["hello", LiteralString] # error: [invalid-type-form]
bad_nesting: Literal[LiteralString] # error: [invalid-type-form]
bad_union: Literal["hello", LiteralString] # error: [invalid-literal-parameter]
bad_nesting: Literal[LiteralString] # error: [invalid-literal-parameter]
```
### Parameterized
### Parametrized
`LiteralString` cannot be parameterized.
`LiteralString` cannot be parametrized.
```py
from typing_extensions import LiteralString
a: LiteralString[str] # error: [invalid-type-form]
b: LiteralString["foo"] # error: [invalid-type-form]
a: LiteralString[str] # error: [invalid-type-parameter]
b: LiteralString["foo"] # error: [invalid-type-parameter]
```
### As a base class
@@ -73,12 +73,12 @@ qux = (foo, bar)
reveal_type(qux) # revealed: tuple[Literal["foo"], Literal["bar"]]
# TODO: Infer "LiteralString"
reveal_type(foo.join(qux)) # revealed: @Todo(Attribute access on `StringLiteral` types)
reveal_type(foo.join(qux)) # revealed: @Todo(call todo)
template: LiteralString = "{}, {}"
reveal_type(template) # revealed: Literal["{}, {}"]
# TODO: Infer `LiteralString`
reveal_type(template.format(foo, bar)) # revealed: @Todo(Attribute access on `StringLiteral` types)
reveal_type(template.format(foo, bar)) # revealed: @Todo(call todo)
```
### Assignability
@@ -89,26 +89,28 @@ vice versa.
```py
from typing_extensions import Literal, LiteralString
def _(flag: bool):
foo_1: Literal["foo"] = "foo"
bar_1: LiteralString = foo_1 # fine
def coinflip() -> bool:
return True
foo_2 = "foo" if flag else "bar"
reveal_type(foo_2) # revealed: Literal["foo", "bar"]
bar_2: LiteralString = foo_2 # fine
foo_1: Literal["foo"] = "foo"
bar_1: LiteralString = foo_1 # fine
foo_3: LiteralString = "foo" * 1_000_000_000
bar_3: str = foo_2 # fine
foo_2 = "foo" if coinflip() else "bar"
reveal_type(foo_2) # revealed: Literal["foo", "bar"]
bar_2: LiteralString = foo_2 # fine
baz_1: str = repr(object())
qux_1: LiteralString = baz_1 # error: [invalid-assignment]
foo_3: LiteralString = "foo" * 1_000_000_000
bar_3: str = foo_2 # fine
baz_2: LiteralString = "baz" * 1_000_000_000
qux_2: Literal["qux"] = baz_2 # error: [invalid-assignment]
baz_1: str = str()
qux_1: LiteralString = baz_1 # error: [invalid-assignment]
baz_3 = "foo" if flag else 1
reveal_type(baz_3) # revealed: Literal["foo", 1]
qux_3: LiteralString = baz_3 # error: [invalid-assignment]
baz_2: LiteralString = "baz" * 1_000_000_000
qux_2: Literal["qux"] = baz_2 # error: [invalid-assignment]
baz_3 = "foo" if coinflip() else 1
reveal_type(baz_3) # revealed: Literal["foo"] | Literal[1]
qux_3: LiteralString = baz_3 # error: [invalid-assignment]
```
### Narrowing
@@ -135,7 +137,7 @@ if "" < lorem == "ipsum":
```toml
[environment]
python-version = "3.11"
target-version = "3.11"
```
```py

View File

@@ -21,7 +21,7 @@ reveal_type(stop())
```py
from typing_extensions import NoReturn, Never, Any
# error: [invalid-type-form] "Type `typing.Never` expected no type parameter"
# error: [invalid-type-parameter] "Type `typing.Never` expected no type parameter"
x: Never[int]
a1: NoReturn
a2: Never
@@ -47,29 +47,18 @@ def f():
## `typing.Never`
`typing.Never` is only available in Python 3.11 and later.
### Python 3.11
`typing.Never` is only available in Python 3.11 and later:
```toml
[environment]
python-version = "3.11"
target-version = "3.11"
```
```py
from typing import Never
reveal_type(Never) # revealed: typing.Never
```
x: Never
### Python 3.10
```toml
[environment]
python-version = "3.10"
```
```py
# error: [unresolved-import]
from typing import Never
def f():
reveal_type(x) # revealed: Never
```

View File

@@ -1,127 +0,0 @@
# Typing-module aliases to other stdlib classes
The `typing` module has various aliases to other stdlib classes. These are a legacy feature, but
still need to be supported by a type checker.
## Correspondence
All of the following symbols can be mapped one-to-one with the actual type:
```py
import typing
def f(
list_bare: typing.List,
list_parametrized: typing.List[int],
dict_bare: typing.Dict,
dict_parametrized: typing.Dict[int, str],
set_bare: typing.Set,
set_parametrized: typing.Set[int],
frozen_set_bare: typing.FrozenSet,
frozen_set_parametrized: typing.FrozenSet[str],
chain_map_bare: typing.ChainMap,
chain_map_parametrized: typing.ChainMap[int],
counter_bare: typing.Counter,
counter_parametrized: typing.Counter[int],
default_dict_bare: typing.DefaultDict,
default_dict_parametrized: typing.DefaultDict[str, int],
deque_bare: typing.Deque,
deque_parametrized: typing.Deque[str],
ordered_dict_bare: typing.OrderedDict,
ordered_dict_parametrized: typing.OrderedDict[int, str],
):
reveal_type(list_bare) # revealed: list
reveal_type(list_parametrized) # revealed: list
reveal_type(dict_bare) # revealed: dict
reveal_type(dict_parametrized) # revealed: dict
reveal_type(set_bare) # revealed: set
reveal_type(set_parametrized) # revealed: set
reveal_type(frozen_set_bare) # revealed: frozenset
reveal_type(frozen_set_parametrized) # revealed: frozenset
reveal_type(chain_map_bare) # revealed: ChainMap
reveal_type(chain_map_parametrized) # revealed: ChainMap
reveal_type(counter_bare) # revealed: Counter
reveal_type(counter_parametrized) # revealed: Counter
reveal_type(default_dict_bare) # revealed: defaultdict
reveal_type(default_dict_parametrized) # revealed: defaultdict
reveal_type(deque_bare) # revealed: deque
reveal_type(deque_parametrized) # revealed: deque
reveal_type(ordered_dict_bare) # revealed: OrderedDict
reveal_type(ordered_dict_parametrized) # revealed: OrderedDict
```
## Inheritance
The aliases can be inherited from. Some of these are still partially or wholly TODOs.
```py
import typing
####################
### Built-ins
class ListSubclass(typing.List): ...
# TODO: should have `Generic`, should not have `Unknown`
# revealed: tuple[Literal[ListSubclass], Literal[list], Unknown, Literal[object]]
reveal_type(ListSubclass.__mro__)
class DictSubclass(typing.Dict): ...
# TODO: should have `Generic`, should not have `Unknown`
# revealed: tuple[Literal[DictSubclass], Literal[dict], Unknown, Literal[object]]
reveal_type(DictSubclass.__mro__)
class SetSubclass(typing.Set): ...
# TODO: should have `Generic`, should not have `Unknown`
# revealed: tuple[Literal[SetSubclass], Literal[set], Unknown, Literal[object]]
reveal_type(SetSubclass.__mro__)
class FrozenSetSubclass(typing.FrozenSet): ...
# TODO: should have `Generic`, should not have `Unknown`
# revealed: tuple[Literal[FrozenSetSubclass], Literal[frozenset], Unknown, Literal[object]]
reveal_type(FrozenSetSubclass.__mro__)
####################
### `collections`
class ChainMapSubclass(typing.ChainMap): ...
# TODO: Should be (ChainMapSubclass, ChainMap, MutableMapping, Mapping, Collection, Sized, Iterable, Container, Generic, object)
# revealed: tuple[Literal[ChainMapSubclass], Literal[ChainMap], Unknown, Literal[object]]
reveal_type(ChainMapSubclass.__mro__)
class CounterSubclass(typing.Counter): ...
# TODO: Should be (CounterSubclass, Counter, dict, MutableMapping, Mapping, Collection, Sized, Iterable, Container, Generic, object)
# revealed: tuple[Literal[CounterSubclass], Literal[Counter], Unknown, Literal[object]]
reveal_type(CounterSubclass.__mro__)
class DefaultDictSubclass(typing.DefaultDict): ...
# TODO: Should be (DefaultDictSubclass, defaultdict, dict, MutableMapping, Mapping, Collection, Sized, Iterable, Container, Generic, object)
# revealed: tuple[Literal[DefaultDictSubclass], Literal[defaultdict], Unknown, Literal[object]]
reveal_type(DefaultDictSubclass.__mro__)
class DequeSubclass(typing.Deque): ...
# TODO: Should be (DequeSubclass, deque, MutableSequence, Sequence, Reversible, Collection, Sized, Iterable, Container, Generic, object)
# revealed: tuple[Literal[DequeSubclass], Literal[deque], Unknown, Literal[object]]
reveal_type(DequeSubclass.__mro__)
class OrderedDictSubclass(typing.OrderedDict): ...
# TODO: Should be (OrderedDictSubclass, OrderedDict, dict, MutableMapping, Mapping, Collection, Sized, Iterable, Container, Generic, object)
# revealed: tuple[Literal[OrderedDictSubclass], Literal[OrderedDict], Unknown, Literal[object]]
reveal_type(OrderedDictSubclass.__mro__)
```

View File

@@ -3,56 +3,75 @@
## Simple
```py
def f(v: "int"):
reveal_type(v) # revealed: int
def f() -> "int":
return 1
reveal_type(f()) # revealed: int
```
## Nested
```py
def f(v: "'int'"):
reveal_type(v) # revealed: int
def f() -> "'int'":
return 1
reveal_type(f()) # revealed: int
```
## Type expression
```py
def f1(v: "int | str", w: "tuple[int, str]"):
reveal_type(v) # revealed: int | str
reveal_type(w) # revealed: tuple[int, str]
def f1() -> "int | str":
return 1
def f2() -> "tuple[int, str]":
return 1
reveal_type(f1()) # revealed: int | str
reveal_type(f2()) # revealed: tuple[int, str]
```
## Partial
```py
def f(v: tuple[int, "str"]):
reveal_type(v) # revealed: tuple[int, str]
def f() -> tuple[int, "str"]:
return 1
reveal_type(f()) # revealed: tuple[int, str]
```
## Deferred
```py
def f(v: "Foo"):
reveal_type(v) # revealed: Foo
def f() -> "Foo":
return Foo()
class Foo: ...
class Foo:
pass
reveal_type(f()) # revealed: Foo
```
## Deferred (undefined)
```py
# error: [unresolved-reference]
def f(v: "Foo"):
reveal_type(v) # revealed: Unknown
def f() -> "Foo":
pass
reveal_type(f()) # revealed: Unknown
```
## Partial deferred
```py
def f(v: int | "Foo"):
reveal_type(v) # revealed: int | Foo
def f() -> int | "Foo":
return 1
class Foo: ...
class Foo:
pass
reveal_type(f()) # revealed: int | Foo
```
## `typing.Literal`
@@ -60,43 +79,65 @@ class Foo: ...
```py
from typing import Literal
def f1(v: Literal["Foo", "Bar"], w: 'Literal["Foo", "Bar"]'):
reveal_type(v) # revealed: Literal["Foo", "Bar"]
reveal_type(w) # revealed: Literal["Foo", "Bar"]
def f1() -> Literal["Foo", "Bar"]:
return "Foo"
class Foo: ...
def f2() -> 'Literal["Foo", "Bar"]':
return "Foo"
class Foo:
pass
reveal_type(f1()) # revealed: Literal["Foo", "Bar"]
reveal_type(f2()) # revealed: Literal["Foo", "Bar"]
```
## Various string kinds
```py
def f1(
# error: [raw-string-type-annotation] "Type expressions cannot use raw string literal"
a: r"int",
# error: [fstring-type-annotation] "Type expressions cannot use f-strings"
b: f"int",
# error: [byte-string-type-annotation] "Type expressions cannot use bytes literal"
c: b"int",
d: "int",
# error: [implicit-concatenated-string-type-annotation] "Type expressions cannot span multiple string literals"
e: "in" "t",
# error: [escape-character-in-forward-annotation] "Type expressions cannot contain escape characters"
f: "\N{LATIN SMALL LETTER I}nt",
# error: [escape-character-in-forward-annotation] "Type expressions cannot contain escape characters"
g: "\x69nt",
h: """int""",
# error: [byte-string-type-annotation] "Type expressions cannot use bytes literal"
i: "b'int'",
):
reveal_type(a) # revealed: Unknown
reveal_type(b) # revealed: Unknown
reveal_type(c) # revealed: Unknown
reveal_type(d) # revealed: int
reveal_type(e) # revealed: Unknown
reveal_type(f) # revealed: Unknown
reveal_type(g) # revealed: Unknown
reveal_type(h) # revealed: int
reveal_type(i) # revealed: Unknown
# error: [raw-string-type-annotation] "Type expressions cannot use raw string literal"
def f1() -> r"int":
return 1
# error: [fstring-type-annotation] "Type expressions cannot use f-strings"
def f2() -> f"int":
return 1
# error: [byte-string-type-annotation] "Type expressions cannot use bytes literal"
def f3() -> b"int":
return 1
def f4() -> "int":
return 1
# error: [implicit-concatenated-string-type-annotation] "Type expressions cannot span multiple string literals"
def f5() -> "in" "t":
return 1
# error: [escape-character-in-forward-annotation] "Type expressions cannot contain escape characters"
def f6() -> "\N{LATIN SMALL LETTER I}nt":
return 1
# error: [escape-character-in-forward-annotation] "Type expressions cannot contain escape characters"
def f7() -> "\x69nt":
return 1
def f8() -> """int""":
return 1
# error: [byte-string-type-annotation] "Type expressions cannot use bytes literal"
def f9() -> "b'int'":
return 1
reveal_type(f1()) # revealed: Unknown
reveal_type(f2()) # revealed: Unknown
reveal_type(f3()) # revealed: Unknown
reveal_type(f4()) # revealed: int
reveal_type(f5()) # revealed: Unknown
reveal_type(f6()) # revealed: Unknown
reveal_type(f7()) # revealed: Unknown
reveal_type(f8()) # revealed: int
reveal_type(f9()) # revealed: Unknown
```
## Various string kinds in `typing.Literal`
@@ -104,8 +145,10 @@ def f1(
```py
from typing import Literal
def f(v: Literal["a", r"b", b"c", "d" "e", "\N{LATIN SMALL LETTER F}", "\x67", """h"""]):
reveal_type(v) # revealed: Literal["a", "b", b"c", "de", "f", "g", "h"]
def f() -> Literal["a", r"b", b"c", "d" "e", "\N{LATIN SMALL LETTER F}", "\x67", """h"""]:
return "normal"
reveal_type(f()) # revealed: Literal["a", "b", "de", "f", "g", "h"] | Literal[b"c"]
```
## Class variables
@@ -132,7 +175,8 @@ c: "Foo"
# error: [invalid-assignment] "Object of type `Literal[1]` is not assignable to `Foo`"
d: "Foo" = 1
class Foo: ...
class Foo:
pass
c = Foo()
@@ -173,40 +217,3 @@ p: "call()"
r: "[1, 2]"
s: "(1, 2)"
```
## Multi line annotation
Quoted type annotations should be parsed as if surrounded by parentheses.
```py
def valid(
a1: """(
int |
str
)
""",
a2: """
int |
str
""",
):
reveal_type(a1) # revealed: int | str
reveal_type(a2) # revealed: int | str
def invalid(
# error: [invalid-syntax-in-forward-annotation]
a1: """
int |
str)
""",
# error: [invalid-syntax-in-forward-annotation]
a2: """
int) |
str
""",
# error: [invalid-syntax-in-forward-annotation]
a3: """
(int)) """,
):
pass
```

View File

@@ -1,71 +0,0 @@
# Unsupported special forms
## Not yet supported
Several special forms are unsupported by red-knot currently. However, we also don't emit
false-positive errors if you use one in an annotation:
```py
from typing_extensions import Self, TypeVarTuple, Unpack, TypeGuard, TypeIs, Concatenate, ParamSpec, TypeAlias, Callable, TypeVar
P = ParamSpec("P")
Ts = TypeVarTuple("Ts")
R_co = TypeVar("R_co", covariant=True)
Alias: TypeAlias = int
def f(*args: Unpack[Ts]) -> tuple[Unpack[Ts]]:
# TODO: should understand the annotation
reveal_type(args) # revealed: tuple
reveal_type(Alias) # revealed: @Todo(Unsupported or invalid type in a type expression)
def g() -> TypeGuard[int]: ...
def h() -> TypeIs[int]: ...
def i(callback: Callable[Concatenate[int, P], R_co], *args: P.args, **kwargs: P.kwargs) -> R_co:
# TODO: should understand the annotation
reveal_type(args) # revealed: tuple
# TODO: should understand the annotation
reveal_type(kwargs) # revealed: dict
return callback(42, *args, **kwargs)
class Foo:
def method(self, x: Self):
reveal_type(x) # revealed: @Todo(Unsupported or invalid type in a type expression)
```
## Inheritance
You can't inherit from most of these. `typing.Callable` is an exception.
```py
from typing import Callable
from typing_extensions import Self, Unpack, TypeGuard, TypeIs, Concatenate
class A(Self): ... # error: [invalid-base]
class B(Unpack): ... # error: [invalid-base]
class C(TypeGuard): ... # error: [invalid-base]
class D(TypeIs): ... # error: [invalid-base]
class E(Concatenate): ... # error: [invalid-base]
class F(Callable): ...
reveal_type(F.__mro__) # revealed: tuple[Literal[F], @Todo(Support for Callable as a base class), Literal[object]]
```
## Subscriptability
Some of these are not subscriptable:
```py
from typing_extensions import Self, TypeAlias
X: TypeAlias[T] = int # error: [invalid-type-form]
class Foo[T]:
# error: [invalid-type-form] "Special form `typing.Self` expected no type parameter"
# error: [invalid-type-form] "Special form `typing.Self` expected no type parameter"
def method(self: Self[int]) -> Self[int]:
reveal_type(self) # revealed: Unknown
```

View File

@@ -1,34 +0,0 @@
# Unsupported type qualifiers
## Not yet supported
Several type qualifiers are unsupported by red-knot currently. However, we also don't emit
false-positive errors if you use one in an annotation:
```py
from typing_extensions import Final, Required, NotRequired, ReadOnly, TypedDict
X: Final = 42
Y: Final[int] = 42
# TODO: `TypedDict` is actually valid as a base
# error: [invalid-base]
class Bar(TypedDict):
x: Required[int]
y: NotRequired[str]
z: ReadOnly[bytes]
```
## Inheritance
You can't inherit from a type qualifier.
```py
from typing_extensions import Final, ClassVar, Required, NotRequired, ReadOnly
class A(Final): ... # error: [invalid-base]
class B(ClassVar): ... # error: [invalid-base]
class C(Required): ... # error: [invalid-base]
class D(NotRequired): ... # error: [invalid-base]
class E(ReadOnly): ... # error: [invalid-base]
```

View File

@@ -25,9 +25,7 @@ x = "foo" # error: [invalid-assignment] "Object of type `Literal["foo"]` is not
## Tuple annotations are understood
`module.py`:
```py
```py path=module.py
from typing_extensions import Unpack
a: tuple[()] = ()
@@ -35,6 +33,8 @@ b: tuple[int] = (42,)
c: tuple[str, int] = ("42", 42)
d: tuple[tuple[str, str], tuple[int, int]] = (("foo", "foo"), (42, 42))
e: tuple[str, ...] = ()
# TODO: we should not emit this error
# error: [call-possibly-unbound-method] "Method `__class_getitem__` of type `Literal[tuple]` is possibly unbound"
f: tuple[str, *tuple[int, ...], bytes] = ("42", b"42")
g: tuple[str, Unpack[tuple[int, ...]], bytes] = ("42", b"42")
h: tuple[list[int], list[int]] = ([], [])
@@ -42,9 +42,7 @@ i: tuple[str | int, str | int] = (42, 42)
j: tuple[str | int] = (42,)
```
`script.py`:
```py
```py path=script.py
from module import a, b, c, d, e, f, g, h, i, j
reveal_type(a) # revealed: tuple[()]
@@ -80,10 +78,20 @@ c: tuple[str | int, str] = ([], "foo")
## PEP-604 annotations are supported
```py
def foo(v: str | int | None, w: str | str | None, x: str | str):
reveal_type(v) # revealed: str | int | None
reveal_type(w) # revealed: str | None
reveal_type(x) # revealed: str
def foo() -> str | int | None:
return None
reveal_type(foo()) # revealed: str | int | None
def bar() -> str | str | None:
return None
reveal_type(bar()) # revealed: str | None
def baz() -> str | str:
return "Hello, world!"
reveal_type(baz()) # revealed: str
```
## Attribute expressions in type annotations are understood
@@ -110,7 +118,8 @@ from __future__ import annotations
x: Foo
class Foo: ...
class Foo:
pass
x = Foo()
reveal_type(x) # revealed: Foo
@@ -118,18 +127,12 @@ reveal_type(x) # revealed: Foo
## Annotations in stub files are deferred
```pyi
```pyi path=main.pyi
x: Foo
class Foo: ...
class Foo:
pass
x = Foo()
reveal_type(x) # revealed: Foo
```
## Annotated assignments in stub files are inferred correctly
```pyi
x: int = 1
reveal_type(x) # revealed: Literal[1]
```

View File

@@ -40,125 +40,143 @@ class C:
return 42
x = C()
# error: [invalid-argument-type]
x -= 1
# TODO: should error, once operand type check is implemented
reveal_type(x) # revealed: int
```
## Method union
```py
def _(flag: bool):
class Foo:
if flag:
def __iadd__(self, other: int) -> str:
return "Hello, world!"
else:
def __iadd__(self, other: int) -> int:
return 42
def bool_instance() -> bool:
return True
f = Foo()
f += 12
flag = bool_instance()
reveal_type(f) # revealed: str | int
class Foo:
if bool_instance():
def __iadd__(self, other: int) -> str:
return "Hello, world!"
else:
def __iadd__(self, other: int) -> int:
return 42
f = Foo()
f += 12
reveal_type(f) # revealed: str | int
```
## Partially bound `__iadd__`
```py
def _(flag: bool):
class Foo:
if flag:
def __iadd__(self, other: str) -> int:
return 42
def bool_instance() -> bool:
return True
f = Foo()
class Foo:
if bool_instance():
def __iadd__(self, other: str) -> int:
return 42
# TODO: We should emit an `unsupported-operator` error here, possibly with the information
# that `Foo.__iadd__` may be unbound as additional context.
f += "Hello, world!"
f = Foo()
reveal_type(f) # revealed: int | Unknown
# TODO: We should emit an `unsupported-operator` error here, possibly with the information
# that `Foo.__iadd__` may be unbound as additional context.
f += "Hello, world!"
reveal_type(f) # revealed: int | Unknown
```
## Partially bound with `__add__`
```py
def _(flag: bool):
class Foo:
def __add__(self, other: str) -> str:
return "Hello, world!"
if flag:
def __iadd__(self, other: str) -> int:
return 42
def bool_instance() -> bool:
return True
f = Foo()
f += "Hello, world!"
class Foo:
def __add__(self, other: str) -> str:
return "Hello, world!"
if bool_instance():
def __iadd__(self, other: str) -> int:
return 42
reveal_type(f) # revealed: int | str
f = Foo()
f += "Hello, world!"
reveal_type(f) # revealed: int | str
```
## Partially bound target union
```py
def _(flag1: bool, flag2: bool):
class Foo:
def __add__(self, other: int) -> str:
return "Hello, world!"
if flag1:
def __iadd__(self, other: int) -> int:
return 42
def bool_instance() -> bool:
return True
if flag2:
f = Foo()
else:
f = 42.0
f += 12
class Foo:
def __add__(self, other: int) -> str:
return "Hello, world!"
if bool_instance():
def __iadd__(self, other: int) -> int:
return 42
reveal_type(f) # revealed: int | str | float
if bool_instance():
f = Foo()
else:
f = 42.0
f += 12
reveal_type(f) # revealed: int | str | float
```
## Target union
```py
def _(flag: bool):
class Foo:
def __iadd__(self, other: int) -> str:
return "Hello, world!"
def bool_instance() -> bool:
return True
if flag:
f = Foo()
else:
f = 42.0
f += 12
flag = bool_instance()
reveal_type(f) # revealed: str | float
class Foo:
def __iadd__(self, other: int) -> str:
return "Hello, world!"
if flag:
f = Foo()
else:
f = 42.0
f += 12
reveal_type(f) # revealed: str | float
```
## Partially bound target union with `__add__`
```py
def f(flag: bool, flag2: bool):
class Foo:
def __add__(self, other: int) -> str:
return "Hello, world!"
if flag:
def __iadd__(self, other: int) -> int:
return 42
def bool_instance() -> bool:
return True
class Bar:
def __add__(self, other: int) -> bytes:
return b"Hello, world!"
flag = bool_instance()
def __iadd__(self, other: int) -> float:
return 42.0
class Foo:
def __add__(self, other: int) -> str:
return "Hello, world!"
if bool_instance():
def __iadd__(self, other: int) -> int:
return 42
if flag2:
f = Foo()
else:
f = Bar()
f += 12
class Bar:
def __add__(self, other: int) -> bytes:
return b"Hello, world!"
reveal_type(f) # revealed: int | str | float
def __iadd__(self, other: int) -> float:
return 42.0
if flag:
f = Foo()
else:
f = Bar()
f += 12
reveal_type(f) # revealed: int | str | float
```

File diff suppressed because it is too large Load Diff

View File

@@ -46,48 +46,3 @@ reveal_type(a | b) # revealed: Literal[True]
reveal_type(b | a) # revealed: Literal[True]
reveal_type(b | b) # revealed: Literal[False]
```
## Arithmetic with a variable
```py
def _(a: bool):
def lhs_is_int(x: int):
reveal_type(x + a) # revealed: int
reveal_type(x - a) # revealed: int
reveal_type(x * a) # revealed: int
reveal_type(x // a) # revealed: int
reveal_type(x / a) # revealed: float
reveal_type(x % a) # revealed: int
def rhs_is_int(x: int):
reveal_type(a + x) # revealed: int
reveal_type(a - x) # revealed: int
reveal_type(a * x) # revealed: int
reveal_type(a // x) # revealed: int
reveal_type(a / x) # revealed: float
reveal_type(a % x) # revealed: int
def lhs_is_bool(x: bool):
reveal_type(x + a) # revealed: int
reveal_type(x - a) # revealed: int
reveal_type(x * a) # revealed: int
reveal_type(x // a) # revealed: int
reveal_type(x / a) # revealed: float
reveal_type(x % a) # revealed: int
def rhs_is_bool(x: bool):
reveal_type(a + x) # revealed: int
reveal_type(a - x) # revealed: int
reveal_type(a * x) # revealed: int
reveal_type(a // x) # revealed: int
reveal_type(a / x) # revealed: float
reveal_type(a % x) # revealed: int
def both_are_bool(x: bool, y: bool):
reveal_type(x + y) # revealed: int
reveal_type(x - y) # revealed: int
reveal_type(x * y) # revealed: int
reveal_type(x // y) # revealed: int
reveal_type(x / y) # revealed: float
reveal_type(x % y) # revealed: int
```

View File

@@ -1,27 +0,0 @@
# Binary operations on classes
## Union of two classes
Unioning two classes via the `|` operator is only available in Python 3.10 and later.
```toml
[environment]
python-version = "3.10"
```
```py
class A: ...
class B: ...
reveal_type(A | B) # revealed: UnionType
```
## Union of two classes (prior to 3.10)
```py
class A: ...
class B: ...
# error: "Operator `|` is unsupported between objects of type `Literal[A]` and `Literal[B]`"
reveal_type(A | B) # revealed: Unknown
```

View File

@@ -1,379 +0,0 @@
# Custom binary operations
## Class instances
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
def __sub__(self, other) -> Literal["-"]:
return "-"
def __mul__(self, other) -> Literal["*"]:
return "*"
def __matmul__(self, other) -> Literal["@"]:
return "@"
def __truediv__(self, other) -> Literal["/"]:
return "/"
def __mod__(self, other) -> Literal["%"]:
return "%"
def __pow__(self, other) -> Literal["**"]:
return "**"
def __lshift__(self, other) -> Literal["<<"]:
return "<<"
def __rshift__(self, other) -> Literal[">>"]:
return ">>"
def __or__(self, other) -> Literal["|"]:
return "|"
def __xor__(self, other) -> Literal["^"]:
return "^"
def __and__(self, other) -> Literal["&"]:
return "&"
def __floordiv__(self, other) -> Literal["//"]:
return "//"
class Sub(Yes): ...
class No: ...
# Yes implements all of the dunder methods.
reveal_type(Yes() + Yes()) # revealed: Literal["+"]
reveal_type(Yes() - Yes()) # revealed: Literal["-"]
reveal_type(Yes() * Yes()) # revealed: Literal["*"]
reveal_type(Yes() @ Yes()) # revealed: Literal["@"]
reveal_type(Yes() / Yes()) # revealed: Literal["/"]
reveal_type(Yes() % Yes()) # revealed: Literal["%"]
reveal_type(Yes() ** Yes()) # revealed: Literal["**"]
reveal_type(Yes() << Yes()) # revealed: Literal["<<"]
reveal_type(Yes() >> Yes()) # revealed: Literal[">>"]
reveal_type(Yes() | Yes()) # revealed: Literal["|"]
reveal_type(Yes() ^ Yes()) # revealed: Literal["^"]
reveal_type(Yes() & Yes()) # revealed: Literal["&"]
reveal_type(Yes() // Yes()) # revealed: Literal["//"]
# Sub inherits Yes's implementation of the dunder methods.
reveal_type(Sub() + Sub()) # revealed: Literal["+"]
reveal_type(Sub() - Sub()) # revealed: Literal["-"]
reveal_type(Sub() * Sub()) # revealed: Literal["*"]
reveal_type(Sub() @ Sub()) # revealed: Literal["@"]
reveal_type(Sub() / Sub()) # revealed: Literal["/"]
reveal_type(Sub() % Sub()) # revealed: Literal["%"]
reveal_type(Sub() ** Sub()) # revealed: Literal["**"]
reveal_type(Sub() << Sub()) # revealed: Literal["<<"]
reveal_type(Sub() >> Sub()) # revealed: Literal[">>"]
reveal_type(Sub() | Sub()) # revealed: Literal["|"]
reveal_type(Sub() ^ Sub()) # revealed: Literal["^"]
reveal_type(Sub() & Sub()) # revealed: Literal["&"]
reveal_type(Sub() // Sub()) # revealed: Literal["//"]
# No does not implement any of the dunder methods.
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `No` and `No`"
reveal_type(No() + No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `-` is unsupported between objects of type `No` and `No`"
reveal_type(No() - No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `*` is unsupported between objects of type `No` and `No`"
reveal_type(No() * No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `@` is unsupported between objects of type `No` and `No`"
reveal_type(No() @ No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `/` is unsupported between objects of type `No` and `No`"
reveal_type(No() / No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `%` is unsupported between objects of type `No` and `No`"
reveal_type(No() % No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `**` is unsupported between objects of type `No` and `No`"
reveal_type(No() ** No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `<<` is unsupported between objects of type `No` and `No`"
reveal_type(No() << No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `>>` is unsupported between objects of type `No` and `No`"
reveal_type(No() >> No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `|` is unsupported between objects of type `No` and `No`"
reveal_type(No() | No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `^` is unsupported between objects of type `No` and `No`"
reveal_type(No() ^ No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `&` is unsupported between objects of type `No` and `No`"
reveal_type(No() & No()) # revealed: Unknown
# error: [unsupported-operator] "Operator `//` is unsupported between objects of type `No` and `No`"
reveal_type(No() // No()) # revealed: Unknown
# Yes does not implement any of the reflected dunder methods.
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() + Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `-` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() - Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `*` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() * Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `@` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() @ Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `/` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() / Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `%` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() % Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `**` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() ** Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `<<` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() << Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `>>` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() >> Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `|` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() | Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `^` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() ^ Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `&` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() & Yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `//` is unsupported between objects of type `No` and `Yes`"
reveal_type(No() // Yes()) # revealed: Unknown
```
## Subclass reflections override superclass dunders
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
def __sub__(self, other) -> Literal["-"]:
return "-"
def __mul__(self, other) -> Literal["*"]:
return "*"
def __matmul__(self, other) -> Literal["@"]:
return "@"
def __truediv__(self, other) -> Literal["/"]:
return "/"
def __mod__(self, other) -> Literal["%"]:
return "%"
def __pow__(self, other) -> Literal["**"]:
return "**"
def __lshift__(self, other) -> Literal["<<"]:
return "<<"
def __rshift__(self, other) -> Literal[">>"]:
return ">>"
def __or__(self, other) -> Literal["|"]:
return "|"
def __xor__(self, other) -> Literal["^"]:
return "^"
def __and__(self, other) -> Literal["&"]:
return "&"
def __floordiv__(self, other) -> Literal["//"]:
return "//"
class Sub(Yes):
def __radd__(self, other) -> Literal["r+"]:
return "r+"
def __rsub__(self, other) -> Literal["r-"]:
return "r-"
def __rmul__(self, other) -> Literal["r*"]:
return "r*"
def __rmatmul__(self, other) -> Literal["r@"]:
return "r@"
def __rtruediv__(self, other) -> Literal["r/"]:
return "r/"
def __rmod__(self, other) -> Literal["r%"]:
return "r%"
def __rpow__(self, other) -> Literal["r**"]:
return "r**"
def __rlshift__(self, other) -> Literal["r<<"]:
return "r<<"
def __rrshift__(self, other) -> Literal["r>>"]:
return "r>>"
def __ror__(self, other) -> Literal["r|"]:
return "r|"
def __rxor__(self, other) -> Literal["r^"]:
return "r^"
def __rand__(self, other) -> Literal["r&"]:
return "r&"
def __rfloordiv__(self, other) -> Literal["r//"]:
return "r//"
class No:
def __radd__(self, other) -> Literal["r+"]:
return "r+"
def __rsub__(self, other) -> Literal["r-"]:
return "r-"
def __rmul__(self, other) -> Literal["r*"]:
return "r*"
def __rmatmul__(self, other) -> Literal["r@"]:
return "r@"
def __rtruediv__(self, other) -> Literal["r/"]:
return "r/"
def __rmod__(self, other) -> Literal["r%"]:
return "r%"
def __rpow__(self, other) -> Literal["r**"]:
return "r**"
def __rlshift__(self, other) -> Literal["r<<"]:
return "r<<"
def __rrshift__(self, other) -> Literal["r>>"]:
return "r>>"
def __ror__(self, other) -> Literal["r|"]:
return "r|"
def __rxor__(self, other) -> Literal["r^"]:
return "r^"
def __rand__(self, other) -> Literal["r&"]:
return "r&"
def __rfloordiv__(self, other) -> Literal["r//"]:
return "r//"
# Subclass reflected dunder methods take precedence over the superclass's regular dunders.
reveal_type(Yes() + Sub()) # revealed: Literal["r+"]
reveal_type(Yes() - Sub()) # revealed: Literal["r-"]
reveal_type(Yes() * Sub()) # revealed: Literal["r*"]
reveal_type(Yes() @ Sub()) # revealed: Literal["r@"]
reveal_type(Yes() / Sub()) # revealed: Literal["r/"]
reveal_type(Yes() % Sub()) # revealed: Literal["r%"]
reveal_type(Yes() ** Sub()) # revealed: Literal["r**"]
reveal_type(Yes() << Sub()) # revealed: Literal["r<<"]
reveal_type(Yes() >> Sub()) # revealed: Literal["r>>"]
reveal_type(Yes() | Sub()) # revealed: Literal["r|"]
reveal_type(Yes() ^ Sub()) # revealed: Literal["r^"]
reveal_type(Yes() & Sub()) # revealed: Literal["r&"]
reveal_type(Yes() // Sub()) # revealed: Literal["r//"]
# But for an unrelated class, the superclass regular dunders are used.
reveal_type(Yes() + No()) # revealed: Literal["+"]
reveal_type(Yes() - No()) # revealed: Literal["-"]
reveal_type(Yes() * No()) # revealed: Literal["*"]
reveal_type(Yes() @ No()) # revealed: Literal["@"]
reveal_type(Yes() / No()) # revealed: Literal["/"]
reveal_type(Yes() % No()) # revealed: Literal["%"]
reveal_type(Yes() ** No()) # revealed: Literal["**"]
reveal_type(Yes() << No()) # revealed: Literal["<<"]
reveal_type(Yes() >> No()) # revealed: Literal[">>"]
reveal_type(Yes() | No()) # revealed: Literal["|"]
reveal_type(Yes() ^ No()) # revealed: Literal["^"]
reveal_type(Yes() & No()) # revealed: Literal["&"]
reveal_type(Yes() // No()) # revealed: Literal["//"]
```
## Classes
Dunder methods defined in a class are available to instances of that class, but not to the class
itself. (For these operators to work on the class itself, they would have to be defined on the
class's type, i.e. `type`.)
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
class Sub(Yes): ...
class No: ...
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `Literal[Yes]` and `Literal[Yes]`"
reveal_type(Yes + Yes) # revealed: Unknown
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `Literal[Sub]` and `Literal[Sub]`"
reveal_type(Sub + Sub) # revealed: Unknown
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `Literal[No]` and `Literal[No]`"
reveal_type(No + No) # revealed: Unknown
```
## Subclass
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
class Sub(Yes): ...
class No: ...
def yes() -> type[Yes]:
return Yes
def sub() -> type[Sub]:
return Sub
def no() -> type[No]:
return No
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `type[Yes]` and `type[Yes]`"
reveal_type(yes() + yes()) # revealed: Unknown
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `type[Sub]` and `type[Sub]`"
reveal_type(sub() + sub()) # revealed: Unknown
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `type[No]` and `type[No]`"
reveal_type(no() + no()) # revealed: Unknown
```
## Function literals
```py
def f():
pass
# error: [unsupported-operator] "Operator `+` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f + f) # revealed: Unknown
# error: [unsupported-operator] "Operator `-` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f - f) # revealed: Unknown
# error: [unsupported-operator] "Operator `*` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f * f) # revealed: Unknown
# error: [unsupported-operator] "Operator `@` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f @ f) # revealed: Unknown
# error: [unsupported-operator] "Operator `/` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f / f) # revealed: Unknown
# error: [unsupported-operator] "Operator `%` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f % f) # revealed: Unknown
# error: [unsupported-operator] "Operator `**` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f**f) # revealed: Unknown
# error: [unsupported-operator] "Operator `<<` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f << f) # revealed: Unknown
# error: [unsupported-operator] "Operator `>>` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f >> f) # revealed: Unknown
# error: [unsupported-operator] "Operator `|` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f | f) # revealed: Unknown
# error: [unsupported-operator] "Operator `^` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f ^ f) # revealed: Unknown
# error: [unsupported-operator] "Operator `&` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f & f) # revealed: Unknown
# error: [unsupported-operator] "Operator `//` is unsupported between objects of type `Literal[f]` and `Literal[f]`"
reveal_type(f // f) # revealed: Unknown
```

View File

@@ -262,8 +262,7 @@ class A:
class B:
__add__ = A()
# TODO: this could be `int` if we declare `B.__add__` using a `Callable` type
reveal_type(B() + B()) # revealed: Unknown | int
reveal_type(B() + B()) # revealed: int
```
## Integration test: numbers from typeshed
@@ -282,12 +281,20 @@ reveal_type(42 + 4.2) # revealed: int
# TODO should be complex, need to check arg type and fall back to `rhs.__radd__`
reveal_type(3 + 3j) # revealed: int
def _(x: bool, y: int):
reveal_type(x + y) # revealed: int
reveal_type(4.2 + x) # revealed: float
def returns_int() -> int:
return 42
# TODO should be float, need to check arg type and fall back to `rhs.__radd__`
reveal_type(y + 4.12) # revealed: int
def returns_bool() -> bool:
return True
x = returns_bool()
y = returns_int()
reveal_type(x + y) # revealed: int
reveal_type(4.2 + x) # revealed: float
# TODO should be float, need to check arg type and fall back to `rhs.__radd__`
reveal_type(y + 4.12) # revealed: int
```
## With literal types

View File

@@ -9,34 +9,6 @@ reveal_type(3 * -1) # revealed: Literal[-3]
reveal_type(-3 // 3) # revealed: Literal[-1]
reveal_type(-3 / 3) # revealed: float
reveal_type(5 % 3) # revealed: Literal[2]
# TODO: We don't currently verify that the actual parameter to int.__add__ matches the declared
# formal parameter type.
reveal_type(2 + "f") # revealed: int
def lhs(x: int):
reveal_type(x + 1) # revealed: int
reveal_type(x - 4) # revealed: int
reveal_type(x * -1) # revealed: int
reveal_type(x // 3) # revealed: int
reveal_type(x / 3) # revealed: float
reveal_type(x % 3) # revealed: int
def rhs(x: int):
reveal_type(2 + x) # revealed: int
reveal_type(3 - x) # revealed: int
reveal_type(3 * x) # revealed: int
reveal_type(-3 // x) # revealed: int
reveal_type(-3 / x) # revealed: float
reveal_type(5 % x) # revealed: int
def both(x: int):
reveal_type(x + x) # revealed: int
reveal_type(x - x) # revealed: int
reveal_type(x * x) # revealed: int
reveal_type(x // x) # revealed: int
reveal_type(x / x) # revealed: float
reveal_type(x % x) # revealed: int
```
## Power
@@ -49,11 +21,6 @@ largest_u32 = 4_294_967_295
reveal_type(2**2) # revealed: Literal[4]
reveal_type(1 ** (largest_u32 + 1)) # revealed: int
reveal_type(2**largest_u32) # revealed: int
def variable(x: int):
reveal_type(x**2) # revealed: @Todo(return type)
reveal_type(2**x) # revealed: @Todo(return type)
reveal_type(x**x) # revealed: @Todo(return type)
```
## Division by Zero

View File

@@ -7,61 +7,72 @@ Similarly, in `and` expressions, if the left-hand side is falsy, the right-hand
evaluated.
```py
def _(flag: bool):
if flag or (x := 1):
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
def bool_instance() -> bool:
return True
if flag and (x := 1):
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
if bool_instance() or (x := 1):
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
if bool_instance() and (x := 1):
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
```
## First expression is always evaluated
```py
def _(flag: bool):
if (x := 1) or flag:
reveal_type(x) # revealed: Literal[1]
def bool_instance() -> bool:
return True
if (x := 1) and flag:
reveal_type(x) # revealed: Literal[1]
if (x := 1) or bool_instance():
reveal_type(x) # revealed: Literal[1]
if (x := 1) and bool_instance():
reveal_type(x) # revealed: Literal[1]
```
## Statically known truthiness
```py
if True or (x := 1):
# error: [unresolved-reference]
reveal_type(x) # revealed: Unknown
# TODO: infer that the second arm is never executed, and raise `unresolved-reference`.
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
if True and (x := 1):
# TODO: infer that the second arm is always executed, do not raise a diagnostic
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
```
## Later expressions can always use variables from earlier expressions
```py
def _(flag: bool):
flag or (x := 1) or reveal_type(x) # revealed: Literal[1]
def bool_instance() -> bool:
return True
# error: [unresolved-reference]
flag or reveal_type(y) or (y := 1) # revealed: Unknown
bool_instance() or (x := 1) or reveal_type(x) # revealed: Literal[1]
# error: [unresolved-reference]
bool_instance() or reveal_type(y) or (y := 1) # revealed: Unknown
```
## Nested expressions
```py
def _(flag1: bool, flag2: bool):
if flag1 or ((x := 1) and flag2):
# error: [possibly-unresolved-reference]
reveal_type(x) # revealed: Literal[1]
if ((y := 1) and flag1) or flag2:
reveal_type(y) # revealed: Literal[1]
def bool_instance() -> bool:
return True
if bool_instance() or ((x := 1) and bool_instance()):
# error: [possibly-unresolved-reference]
if (flag1 and (z := 1)) or reveal_type(z): # revealed: Literal[1]
# error: [possibly-unresolved-reference]
reveal_type(z) # revealed: Literal[1]
reveal_type(x) # revealed: Literal[1]
if ((y := 1) and bool_instance()) or bool_instance():
reveal_type(y) # revealed: Literal[1]
# error: [possibly-unresolved-reference]
if (bool_instance() and (z := 1)) or reveal_type(z): # revealed: Literal[1]
# error: [possibly-unresolved-reference]
reveal_type(z) # revealed: Literal[1]
```

View File

@@ -1,289 +0,0 @@
# Boundness and declaredness: public uses
This document demonstrates how type-inference and diagnostics work for *public* uses of a symbol,
that is, a use of a symbol from another scope. If a symbol has a declared type in its local scope
(e.g. `int`), we use that as the symbol's "public type" (the type of the symbol from the perspective
of other scopes) even if there is a more precise local inferred type for the symbol (`Literal[1]`).
If a symbol has no declared type, we use the union of `Unknown` with the inferred type as the public
type. If there is no declaration, then the symbol can be reassigned to any type from another scope;
the union with `Unknown` reflects that its type must at least be as large as the type of the
assigned value, but could be arbitrarily larger.
We test the whole matrix of possible boundness and declaredness states. The current behavior is
summarized in the following table, while the tests below demonstrate each case. Note that some of
this behavior is questionable and might change in the future. See the TODOs in `symbol_by_id`
(`types.rs`) and [this issue](https://github.com/astral-sh/ruff/issues/14297) for more information.
In particular, we should raise errors in the "possibly-undeclared-and-unbound" as well as the
"undeclared-and-possibly-unbound" cases (marked with a "?").
| **Public type** | declared | possibly-undeclared | undeclared |
| ---------------- | ------------ | -------------------------- | ----------------------- |
| bound | `T_declared` | `T_declared \| T_inferred` | `Unknown \| T_inferred` |
| possibly-unbound | `T_declared` | `T_declared \| T_inferred` | `Unknown \| T_inferred` |
| unbound | `T_declared` | `T_declared` | `Unknown` |
| **Diagnostic** | declared | possibly-undeclared | undeclared |
| ---------------- | -------- | ------------------------- | ------------------- |
| bound | | | |
| possibly-unbound | | `possibly-unbound-import` | ? |
| unbound | | ? | `unresolved-import` |
## Declared
### Declared and bound
If a symbol has a declared type (`int`), we use that even if there is a more precise inferred type
(`Literal[1]`), or a conflicting inferred type (`str` vs. `Literal[2]` below):
`mod.py`:
```py
from typing import Any
def any() -> Any: ...
a: int = 1
b: str = 2 # error: [invalid-assignment]
c: Any = 3
d: int = any()
```
```py
from mod import a, b, c, d
reveal_type(a) # revealed: int
reveal_type(b) # revealed: str
reveal_type(c) # revealed: Any
reveal_type(d) # revealed: int
```
### Declared and possibly unbound
If a symbol is declared and *possibly* unbound, we trust that other module and use the declared type
without raising an error.
`mod.py`:
```py
from typing import Any
def any() -> Any: ...
def flag() -> bool: ...
a: int
b: str
c: Any
d: int
if flag:
a = 1
b = 2 # error: [invalid-assignment]
c = 3
d = any()
```
```py
from mod import a, b, c, d
reveal_type(a) # revealed: int
reveal_type(b) # revealed: str
reveal_type(c) # revealed: Any
reveal_type(d) # revealed: int
```
### Declared and unbound
Similarly, if a symbol is declared but unbound, we do not raise an error. We trust that this symbol
is available somehow and simply use the declared type.
`mod.py`:
```py
from typing import Any
a: int
b: Any
```
```py
from mod import a, b
reveal_type(a) # revealed: int
reveal_type(b) # revealed: Any
```
## Possibly undeclared
### Possibly undeclared and bound
If a symbol is possibly undeclared but definitely bound, we use the union of the declared and
inferred types:
`mod.py`:
```py
from typing import Any
def any() -> Any: ...
def flag() -> bool: ...
a = 1
b = 2
c = 3
d = any()
if flag():
a: int
b: Any
c: str # error: [invalid-declaration]
d: int
```
```py
from mod import a, b, c, d
reveal_type(a) # revealed: int
reveal_type(b) # revealed: Literal[2] | Any
reveal_type(c) # revealed: Literal[3] | Unknown
reveal_type(d) # revealed: Any | int
# External modifications of `a` that violate the declared type are not allowed:
# error: [invalid-assignment]
a = None
```
### Possibly undeclared and possibly unbound
If a symbol is possibly undeclared and possibly unbound, we also use the union of the declared and
inferred types. This case is interesting because the "possibly declared" definition might not be the
same as the "possibly bound" definition (symbol `b`). Note that we raise a `possibly-unbound-import`
error for both `a` and `b`:
`mod.py`:
```py
from typing import Any
def flag() -> bool: ...
if flag():
a: Any = 1
b = 2
else:
b: str
```
```py
# error: [possibly-unbound-import]
# error: [possibly-unbound-import]
from mod import a, b
reveal_type(a) # revealed: Literal[1] | Any
reveal_type(b) # revealed: Literal[2] | str
# External modifications of `b` that violate the declared type are not allowed:
# error: [invalid-assignment]
b = None
```
### Possibly undeclared and unbound
If a symbol is possibly undeclared and definitely unbound, we currently do not raise an error. This
seems inconsistent when compared to the case just above.
`mod.py`:
```py
def flag() -> bool: ...
if flag():
a: int
```
```py
# TODO: this should raise an error. Once we fix this, update the section description and the table
# on top of this document.
from mod import a
reveal_type(a) # revealed: int
# External modifications to `a` that violate the declared type are not allowed:
# error: [invalid-assignment]
a = None
```
## Undeclared
### Undeclared but bound
If a symbol is *undeclared*, we use the union of `Unknown` with the inferred type. Note that we
treat this case differently from the case where a symbol is implicitly declared with `Unknown`,
possibly due to the usage of an unknown name in the annotation:
`mod.py`:
```py
# Undeclared:
a = 1
# Implicitly declared with `Unknown`, due to the usage of an unknown name in the annotation:
b: SomeUnknownName = 1 # error: [unresolved-reference]
```
```py
from mod import a, b
reveal_type(a) # revealed: Unknown | Literal[1]
reveal_type(b) # revealed: Unknown
# All external modifications of `a` are allowed:
a = None
```
### Undeclared and possibly unbound
If a symbol is undeclared and *possibly* unbound, we currently do not raise an error. This seems
inconsistent when compared to the "possibly-undeclared-and-possibly-unbound" case.
`mod.py`:
```py
def flag() -> bool: ...
if flag:
a = 1
b: SomeUnknownName = 1 # error: [unresolved-reference]
```
```py
# TODO: this should raise an error. Once we fix this, update the section description and the table
# on top of this document.
from mod import a, b
reveal_type(a) # revealed: Unknown | Literal[1]
reveal_type(b) # revealed: Unknown
# All external modifications of `a` are allowed:
a = None
```
### Undeclared and unbound
If a symbol is undeclared *and* unbound, we infer `Unknown` and raise an error.
`mod.py`:
```py
if False:
a: int = 1
```
```py
# error: [unresolved-import]
from mod import a
reveal_type(a) # revealed: Unknown
# Modifications allowed in this case:
a = None
```

View File

@@ -22,27 +22,29 @@ reveal_type(b) # revealed: Unknown
## Possibly unbound `__call__` method
```py
def _(flag: bool):
class PossiblyNotCallable:
if flag:
def __call__(self) -> int: ...
def flag() -> bool: ...
a = PossiblyNotCallable()
result = a() # error: "Object of type `PossiblyNotCallable` is not callable (possibly unbound `__call__` method)"
reveal_type(result) # revealed: int
class PossiblyNotCallable:
if flag():
def __call__(self) -> int: ...
a = PossiblyNotCallable()
result = a() # error: "Object of type `PossiblyNotCallable` is not callable (possibly unbound `__call__` method)"
reveal_type(result) # revealed: int
```
## Possibly unbound callable
```py
def _(flag: bool):
if flag:
class PossiblyUnbound:
def __call__(self) -> int: ...
def flag() -> bool: ...
# error: [possibly-unresolved-reference]
a = PossiblyUnbound()
reveal_type(a()) # revealed: int
if flag():
class PossiblyUnbound:
def __call__(self) -> int: ...
# error: [possibly-unresolved-reference]
a = PossiblyUnbound()
reveal_type(a()) # revealed: int
```
## Non-callable `__call__`
@@ -52,50 +54,22 @@ class NonCallable:
__call__ = 1
a = NonCallable()
# error: "Object of type `Unknown | Literal[1]` is not callable (due to union element `Literal[1]`)"
# error: "Object of type `NonCallable` is not callable"
reveal_type(a()) # revealed: Unknown
```
## Possibly non-callable `__call__`
```py
def _(flag: bool):
class NonCallable:
if flag:
__call__ = 1
else:
def __call__(self) -> int: ...
def flag() -> bool: ...
a = NonCallable()
# error: "Object of type `Literal[1] | Literal[__call__]` is not callable (due to union element `Literal[1]`)"
reveal_type(a()) # revealed: Unknown | int
```
## Call binding errors
### Wrong argument type
```py
class C:
def __call__(self, x: int) -> int:
return 1
c = C()
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter 2 (`x`) of function `__call__`; expected type `int`"
reveal_type(c("foo")) # revealed: int
```
### Wrong argument type on `self`
```py
class C:
# TODO this definition should also be an error; `C` must be assignable to type of `self`
def __call__(self: int) -> int:
return 1
c = C()
# error: 13 [invalid-argument-type] "Object of type `C` cannot be assigned to parameter 1 (`self`) of function `__call__`; expected type `int`"
reveal_type(c()) # revealed: int
class NonCallable:
if flag():
__call__ = 1
else:
def __call__(self) -> int: ...
a = NonCallable()
# error: "Object of type `Literal[1] | Literal[__call__]` is not callable (due to union element `Literal[1]`)"
reveal_type(a()) # revealed: Unknown | int
```

View File

@@ -57,276 +57,12 @@ x = nonsense() # error: "Object of type `Literal[123]` is not callable"
## Potentially unbound function
```py
def _(flag: bool):
if flag:
def foo() -> int:
return 42
# error: [possibly-unresolved-reference]
reveal_type(foo()) # revealed: int
```
## Wrong argument type
### Positional argument, positional-or-keyword parameter
```py
def f(x: int) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter 1 (`x`) of function `f`; expected type `int`"
reveal_type(f("foo")) # revealed: int
```
### Positional argument, positional-only parameter
```py
def f(x: int, /) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter 1 (`x`) of function `f`; expected type `int`"
reveal_type(f("foo")) # revealed: int
```
### Positional argument, variadic parameter
```py
def f(*args: int) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter `*args` of function `f`; expected type `int`"
reveal_type(f("foo")) # revealed: int
```
### Keyword argument, positional-or-keyword parameter
```py
def f(x: int) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter `x` of function `f`; expected type `int`"
reveal_type(f(x="foo")) # revealed: int
```
### Keyword argument, keyword-only parameter
```py
def f(*, x: int) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter `x` of function `f`; expected type `int`"
reveal_type(f(x="foo")) # revealed: int
```
### Keyword argument, keywords parameter
```py
def f(**kwargs: int) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter `**kwargs` of function `f`; expected type `int`"
reveal_type(f(x="foo")) # revealed: int
```
### Correctly match keyword out-of-order
```py
def f(x: int = 1, y: str = "foo") -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal[2]` cannot be assigned to parameter `y` of function `f`; expected type `str`"
# error: 20 [invalid-argument-type] "Object of type `Literal["bar"]` cannot be assigned to parameter `x` of function `f`; expected type `int`"
reveal_type(f(y=2, x="bar")) # revealed: int
```
## Too many positional arguments
### One too many
```py
def f() -> int:
return 1
# error: 15 [too-many-positional-arguments] "Too many positional arguments to function `f`: expected 0, got 1"
reveal_type(f("foo")) # revealed: int
```
### Two too many
```py
def f() -> int:
return 1
# error: 15 [too-many-positional-arguments] "Too many positional arguments to function `f`: expected 0, got 2"
reveal_type(f("foo", "bar")) # revealed: int
```
### No too-many-positional if variadic is taken
```py
def f(*args: int) -> int:
return 1
reveal_type(f(1, 2, 3)) # revealed: int
```
### Multiple keyword arguments map to keyword variadic parameter
```py
def f(**kwargs: int) -> int:
return 1
reveal_type(f(foo=1, bar=2)) # revealed: int
```
## Missing arguments
### No defaults or variadic
```py
def f(x: int) -> int:
return 1
# error: 13 [missing-argument] "No argument provided for required parameter `x` of function `f`"
reveal_type(f()) # revealed: int
```
### With default
```py
def f(x: int, y: str = "foo") -> int:
return 1
# error: 13 [missing-argument] "No argument provided for required parameter `x` of function `f`"
reveal_type(f()) # revealed: int
```
### Defaulted argument is not required
```py
def f(x: int = 1) -> int:
return 1
reveal_type(f()) # revealed: int
```
### With variadic
```py
def f(x: int, *y: str) -> int:
return 1
# error: 13 [missing-argument] "No argument provided for required parameter `x` of function `f`"
reveal_type(f()) # revealed: int
```
### Variadic argument is not required
```py
def f(*args: int) -> int:
return 1
reveal_type(f()) # revealed: int
```
### Keywords argument is not required
```py
def f(**kwargs: int) -> int:
return 1
reveal_type(f()) # revealed: int
```
### Multiple
```py
def f(x: int, y: int) -> int:
return 1
# error: 13 [missing-argument] "No arguments provided for required parameters `x`, `y` of function `f`"
reveal_type(f()) # revealed: int
```
## Unknown argument
```py
def f(x: int) -> int:
return 1
# error: 20 [unknown-argument] "Argument `y` does not match any known parameter of function `f`"
reveal_type(f(x=1, y=2)) # revealed: int
```
## Parameter already assigned
```py
def f(x: int) -> int:
return 1
# error: 18 [parameter-already-assigned] "Multiple values provided for parameter `x` of function `f`"
reveal_type(f(1, x=2)) # revealed: int
```
## Special functions
Some functions require special handling in type inference. Here, we make sure that we still emit
proper diagnostics in case of missing or superfluous arguments.
### `reveal_type`
```py
from typing_extensions import reveal_type
# error: [missing-argument] "No argument provided for required parameter `obj` of function `reveal_type`"
reveal_type() # revealed: Unknown
# error: [too-many-positional-arguments] "Too many positional arguments to function `reveal_type`: expected 1, got 2"
reveal_type(1, 2) # revealed: Literal[1]
```
### `static_assert`
```py
from knot_extensions import static_assert
# error: [missing-argument] "No argument provided for required parameter `condition` of function `static_assert`"
# error: [static-assert-error]
static_assert()
# error: [too-many-positional-arguments] "Too many positional arguments to function `static_assert`: expected 2, got 3"
static_assert(True, 2, 3)
```
### `len`
```py
# error: [missing-argument] "No argument provided for required parameter `obj` of function `len`"
len()
# error: [too-many-positional-arguments] "Too many positional arguments to function `len`: expected 1, got 2"
len([], 1)
```
### Type API predicates
```py
from knot_extensions import is_subtype_of, is_fully_static
# error: [missing-argument]
is_subtype_of()
# error: [missing-argument]
is_subtype_of(int)
# error: [too-many-positional-arguments]
is_subtype_of(int, int, int)
# error: [too-many-positional-arguments]
is_subtype_of(int, int, int, int)
# error: [missing-argument]
is_fully_static()
# error: [too-many-positional-arguments]
is_fully_static(int, int)
def flag() -> bool: ...
if flag():
def foo() -> int:
return 42
# error: [possibly-unresolved-reference]
reveal_type(foo()) # revealed: int
```

View File

@@ -1,44 +0,0 @@
# Invalid signatures
## Multiple arguments with the same name
We always map a keyword argument to the first parameter of that name.
```py
# error: [invalid-syntax] "Duplicate parameter "x""
def f(x: int, x: str) -> int:
return 1
# error: 13 [missing-argument] "No argument provided for required parameter `x` of function `f`"
# error: 18 [parameter-already-assigned] "Multiple values provided for parameter `x` of function `f`"
reveal_type(f(1, x=2)) # revealed: int
```
## Positional after non-positional
When parameter kinds are given in an invalid order, we emit a diagnostic and implicitly reorder them
to the valid order:
```py
# error: [invalid-syntax] "Parameter cannot follow var-keyword parameter"
def f(**kw: int, x: str) -> int:
return 1
# error: 15 [invalid-argument-type] "Object of type `Literal[1]` cannot be assigned to parameter 1 (`x`) of function `f`; expected type `str`"
reveal_type(f(1)) # revealed: int
```
## Non-defaulted after defaulted
We emit a syntax diagnostic for this, but it doesn't cause any problems for binding.
```py
# error: [invalid-syntax] "Parameter without a default cannot follow a parameter with a default"
def f(x: int = 1, y: str) -> int:
return 1
reveal_type(f(y="foo")) # revealed: int
# error: [invalid-argument-type] "Object of type `Literal["foo"]` cannot be assigned to parameter 1 (`x`) of function `f`; expected type `int`"
# error: [missing-argument] "No argument provided for required parameter `y` of function `f`"
reveal_type(f("foo")) # revealed: int
```

View File

@@ -3,14 +3,22 @@
## Union of return types
```py
def _(flag: bool):
if flag:
def f() -> int:
return 1
else:
def f() -> str:
return "foo"
reveal_type(f()) # revealed: int | str
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
def f() -> int:
return 1
else:
def f() -> str:
return "foo"
reveal_type(f()) # revealed: int | str
```
## Calling with an unknown union
@@ -18,10 +26,13 @@ def _(flag: bool):
```py
from nonexistent import f # error: [unresolved-import] "Cannot resolve import `nonexistent`"
def coinflip() -> bool:
def bool_instance() -> bool:
return True
if coinflip():
flag = bool_instance()
if flag:
def f() -> int:
return 1
@@ -33,14 +44,20 @@ reveal_type(f()) # revealed: Unknown | int
Calling a union with a non-callable element should emit a diagnostic.
```py
def _(flag: bool):
if flag:
f = 1
else:
def f() -> int:
return 1
x = f() # error: "Object of type `Literal[1] | Literal[f]` is not callable (due to union element `Literal[1]`)"
reveal_type(x) # revealed: Unknown | int
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
f = 1
else:
def f() -> int:
return 1
x = f() # error: "Object of type `Literal[1] | Literal[f]` is not callable (due to union element `Literal[1]`)"
reveal_type(x) # revealed: Unknown | int
```
## Multiple non-callable elements in a union
@@ -48,17 +65,23 @@ def _(flag: bool):
Calling a union with multiple non-callable elements should mention all of them in the diagnostic.
```py
def _(flag: bool, flag2: bool):
if flag:
f = 1
elif flag2:
f = "foo"
else:
def f() -> int:
return 1
# error: "Object of type `Literal[1, "foo"] | Literal[f]` is not callable (due to union elements Literal[1], Literal["foo"])"
# revealed: Unknown | int
reveal_type(f())
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
if flag:
f = 1
elif flag2:
f = "foo"
else:
def f() -> int:
return 1
# error: "Object of type `Literal[1] | Literal["foo"] | Literal[f]` is not callable (due to union elements Literal[1], Literal["foo"])"
# revealed: Unknown | int
reveal_type(f())
```
## All non-callable union elements
@@ -66,12 +89,16 @@ def _(flag: bool, flag2: bool):
Calling a union with no callable elements can emit a simpler diagnostic.
```py
def _(flag: bool):
if flag:
f = 1
else:
f = "foo"
def bool_instance() -> bool:
return True
x = f() # error: "Object of type `Literal[1, "foo"]` is not callable"
reveal_type(x) # revealed: Unknown
flag = bool_instance()
if flag:
f = 1
else:
f = "foo"
x = f() # error: "Object of type `Literal[1] | Literal["foo"]` is not callable"
reveal_type(x) # revealed: Unknown
```

View File

@@ -3,31 +3,38 @@
```py
class A: ...
def _(a1: A, a2: A, o: object):
n1 = None
n2 = None
def get_a() -> A: ...
def get_object() -> object: ...
reveal_type(a1 is a1) # revealed: bool
reveal_type(a1 is a2) # revealed: bool
a1 = get_a()
a2 = get_a()
reveal_type(n1 is n1) # revealed: Literal[True]
reveal_type(n1 is n2) # revealed: Literal[True]
n1 = None
n2 = None
reveal_type(a1 is n1) # revealed: Literal[False]
reveal_type(n1 is a1) # revealed: Literal[False]
o = get_object()
reveal_type(a1 is o) # revealed: bool
reveal_type(n1 is o) # revealed: bool
reveal_type(a1 is a1) # revealed: bool
reveal_type(a1 is a2) # revealed: bool
reveal_type(a1 is not a1) # revealed: bool
reveal_type(a1 is not a2) # revealed: bool
reveal_type(n1 is n1) # revealed: Literal[True]
reveal_type(n1 is n2) # revealed: Literal[True]
reveal_type(n1 is not n1) # revealed: Literal[False]
reveal_type(n1 is not n2) # revealed: Literal[False]
reveal_type(a1 is n1) # revealed: Literal[False]
reveal_type(n1 is a1) # revealed: Literal[False]
reveal_type(a1 is not n1) # revealed: Literal[True]
reveal_type(n1 is not a1) # revealed: Literal[True]
reveal_type(a1 is o) # revealed: bool
reveal_type(n1 is o) # revealed: bool
reveal_type(a1 is not o) # revealed: bool
reveal_type(n1 is not o) # revealed: bool
reveal_type(a1 is not a1) # revealed: bool
reveal_type(a1 is not a2) # revealed: bool
reveal_type(n1 is not n1) # revealed: Literal[False]
reveal_type(n1 is not n2) # revealed: Literal[False]
reveal_type(a1 is not n1) # revealed: Literal[True]
reveal_type(n1 is not a1) # revealed: Literal[True]
reveal_type(a1 is not o) # revealed: bool
reveal_type(n1 is not o) # revealed: bool
```

View File

@@ -312,9 +312,17 @@ reveal_type(1 <= 2j) # revealed: bool
reveal_type(1 > 2j) # revealed: bool
reveal_type(1 >= 2j) # revealed: bool
def f(x: bool, y: int):
reveal_type(x < y) # revealed: bool
reveal_type(y < x) # revealed: bool
reveal_type(4.2 < x) # revealed: bool
reveal_type(x < 4.2) # revealed: bool
def bool_instance() -> bool:
return True
def int_instance() -> int:
return 42
x = bool_instance()
y = int_instance()
reveal_type(x < y) # revealed: bool
reveal_type(y < x) # revealed: bool
reveal_type(4.2 < x) # revealed: bool
reveal_type(x < 4.2) # revealed: bool
```

View File

@@ -20,8 +20,10 @@ reveal_type(1 <= "" and 0 < 1) # revealed: bool
```py
# TODO: implement lookup of `__eq__` on typeshed `int` stub.
def _(a: int, b: int):
reveal_type(1 == a) # revealed: bool
reveal_type(9 < a) # revealed: bool
reveal_type(a < b) # revealed: bool
def int_instance() -> int:
return 42
reveal_type(1 == int_instance()) # revealed: bool
reveal_type(9 < int_instance()) # revealed: bool
reveal_type(int_instance() < int_instance()) # revealed: bool
```

View File

@@ -6,8 +6,6 @@ If we have an intersection type `A & B` and we get a definitive true/false answe
types, we can infer that the result for the intersection type is also true/false:
```py
from typing import Literal
class Base: ...
class Child1(Base):
@@ -16,19 +14,21 @@ class Child1(Base):
class Child2(Base): ...
def _(x: Base):
c1 = Child1()
def get_base() -> Base: ...
# Create an intersection type through narrowing:
if isinstance(x, Child1):
if isinstance(x, Child2):
reveal_type(x) # revealed: Child1 & Child2
x = get_base()
c1 = Child1()
reveal_type(x == 1) # revealed: Literal[True]
# Create an intersection type through narrowing:
if isinstance(x, Child1):
if isinstance(x, Child2):
reveal_type(x) # revealed: Child1 & Child2
# Other comparison operators fall back to the base type:
reveal_type(x > 1) # revealed: bool
reveal_type(x is c1) # revealed: bool
reveal_type(x == 1) # revealed: Literal[True]
# Other comparison operators fall back to the base type:
reveal_type(x > 1) # revealed: bool
reveal_type(x is c1) # revealed: bool
```
## Negative contributions
@@ -73,15 +73,18 @@ if x != "abc":
#### Integers
```py
def _(x: int):
if x != 1:
reveal_type(x) # revealed: int & ~Literal[1]
def get_int() -> int: ...
reveal_type(x != 1) # revealed: Literal[True]
reveal_type(x != 2) # revealed: bool
x = get_int()
reveal_type(x == 1) # revealed: Literal[False]
reveal_type(x == 2) # revealed: bool
if x != 1:
reveal_type(x) # revealed: int & ~Literal[1]
reveal_type(x != 1) # revealed: Literal[True]
reveal_type(x != 2) # revealed: bool
reveal_type(x == 1) # revealed: Literal[False]
reveal_type(x == 2) # revealed: bool
```
### Identity comparisons
@@ -89,14 +92,18 @@ def _(x: int):
```py
class A: ...
def _(o: object):
a = A()
n = None
def get_object() -> object: ...
if o is not None:
reveal_type(o) # revealed: ~None
reveal_type(o is n) # revealed: Literal[False]
reveal_type(o is not n) # revealed: Literal[True]
o = object()
a = A()
n = None
if o is not None:
reveal_type(o) # revealed: object & ~None
reveal_type(o is n) # revealed: Literal[False]
reveal_type(o is not n) # revealed: Literal[True]
```
## Diagnostics
@@ -112,13 +119,16 @@ class Container:
class NonContainer: ...
def _(x: object):
if isinstance(x, Container):
if isinstance(x, NonContainer):
reveal_type(x) # revealed: Container & NonContainer
def get_object() -> object: ...
# error: [unsupported-operator] "Operator `in` is not supported for types `int` and `NonContainer`"
reveal_type(2 in x) # revealed: bool
x = get_object()
if isinstance(x, Container):
if isinstance(x, NonContainer):
reveal_type(x) # revealed: Container & NonContainer
# error: [unsupported-operator] "Operator `in` is not supported for types `int` and `NonContainer`"
reveal_type(2 in x) # revealed: bool
```
### Unsupported operators for negative contributions
@@ -132,11 +142,14 @@ class Container:
class NonContainer: ...
def _(x: object):
if isinstance(x, Container):
if not isinstance(x, NonContainer):
reveal_type(x) # revealed: Container & ~NonContainer
def get_object() -> object: ...
# No error here!
reveal_type(2 in x) # revealed: bool
x = get_object()
if isinstance(x, Container):
if not isinstance(x, NonContainer):
reveal_type(x) # revealed: Container & ~NonContainer
# No error here!
reveal_type(2 in x) # revealed: bool
```

View File

@@ -31,10 +31,10 @@ class C:
def __lt__(self, other) -> C: ...
x = A() < B() < C()
reveal_type(x) # revealed: A & ~AlwaysTruthy | B
reveal_type(x) # revealed: A | B
y = 0 < 1 < A() < 3
reveal_type(y) # revealed: Literal[False] | A
reveal_type(y) # revealed: bool | A
z = 10 < 0 < A() < B() < C()
reveal_type(z) # revealed: Literal[False]

View File

@@ -3,17 +3,18 @@
## String literals
```py
def _(x: str):
reveal_type("abc" == "abc") # revealed: Literal[True]
reveal_type("ab_cd" <= "ab_ce") # revealed: Literal[True]
reveal_type("abc" in "ab cd") # revealed: Literal[False]
reveal_type("" not in "hello") # revealed: Literal[False]
reveal_type("--" is "--") # revealed: bool
reveal_type("A" is "B") # revealed: Literal[False]
reveal_type("--" is not "--") # revealed: bool
reveal_type("A" is not "B") # revealed: Literal[True]
reveal_type(x < "...") # revealed: bool
def str_instance() -> str: ...
# ensure we're not comparing the interned salsa symbols, which compare by order of declaration.
reveal_type("ab" < "ab_cd") # revealed: Literal[True]
reveal_type("abc" == "abc") # revealed: Literal[True]
reveal_type("ab_cd" <= "ab_ce") # revealed: Literal[True]
reveal_type("abc" in "ab cd") # revealed: Literal[False]
reveal_type("" not in "hello") # revealed: Literal[False]
reveal_type("--" is "--") # revealed: bool
reveal_type("A" is "B") # revealed: Literal[False]
reveal_type("--" is not "--") # revealed: bool
reveal_type("A" is not "B") # revealed: Literal[True]
reveal_type(str_instance() < "...") # revealed: bool
# ensure we're not comparing the interned salsa symbols, which compare by order of declaration.
reveal_type("ab" < "ab_cd") # revealed: Literal[True]
```

View File

@@ -33,7 +33,7 @@ reveal_type(a >= b) # revealed: Literal[False]
Even when tuples have different lengths, comparisons should be handled appropriately.
```py
```py path=different_length.py
a = (1, 2, 3)
b = (1, 2, 3, 4)
@@ -58,23 +58,28 @@ reveal_type(c >= d) # revealed: Literal[True]
#### Results with Ambiguity
```py
def _(x: bool, y: int):
a = (x,)
b = (y,)
def bool_instance() -> bool:
return True
reveal_type(a == a) # revealed: bool
reveal_type(a != a) # revealed: bool
reveal_type(a < a) # revealed: bool
reveal_type(a <= a) # revealed: bool
reveal_type(a > a) # revealed: bool
reveal_type(a >= a) # revealed: bool
def int_instance() -> int:
return 42
reveal_type(a == b) # revealed: bool
reveal_type(a != b) # revealed: bool
reveal_type(a < b) # revealed: bool
reveal_type(a <= b) # revealed: bool
reveal_type(a > b) # revealed: bool
reveal_type(a >= b) # revealed: bool
a = (bool_instance(),)
b = (int_instance(),)
reveal_type(a == a) # revealed: bool
reveal_type(a != a) # revealed: bool
reveal_type(a < a) # revealed: bool
reveal_type(a <= a) # revealed: bool
reveal_type(a > a) # revealed: bool
reveal_type(a >= a) # revealed: bool
reveal_type(a == b) # revealed: bool
reveal_type(a != b) # revealed: bool
reveal_type(a < b) # revealed: bool
reveal_type(a <= b) # revealed: bool
reveal_type(a > b) # revealed: bool
reveal_type(a >= b) # revealed: bool
```
#### Comparison Unsupported
@@ -102,7 +107,7 @@ reveal_type(a >= b) # revealed: bool
However, if the lexicographic comparison completes without reaching a point where str and int are
compared, Python will still produce a result based on the prior elements.
```py
```py path=short_circuit.py
a = (1, 2)
b = (999999, "hello")
@@ -192,7 +197,7 @@ reveal_type((A(), B()) < (A(), B())) # revealed: float | set | Literal[False]
#### Special Handling of Eq and NotEq in Lexicographic Comparisons
> Example: `(<int instance>, "foo") == (<int instance>, "bar")`
> Example: `(int_instance(), "foo") == (int_instance(), "bar")`
`Eq` and `NotEq` have unique behavior compared to other operators in lexicographic comparisons.
Specifically, for `Eq`, if any non-equal pair exists within the tuples being compared, we can
@@ -203,38 +208,42 @@ In contrast, with operators like `<` and `>`, the comparison must consider each
sequentially, and the final outcome might remain ambiguous until all pairs are compared.
```py
def _(x: str, y: int):
reveal_type("foo" == "bar") # revealed: Literal[False]
reveal_type(("foo",) == ("bar",)) # revealed: Literal[False]
reveal_type((4, "foo") == (4, "bar")) # revealed: Literal[False]
reveal_type((y, "foo") == (y, "bar")) # revealed: Literal[False]
def str_instance() -> str:
return "hello"
a = (x, y, "foo")
def int_instance() -> int:
return 42
reveal_type(a == a) # revealed: bool
reveal_type(a != a) # revealed: bool
reveal_type(a < a) # revealed: bool
reveal_type(a <= a) # revealed: bool
reveal_type(a > a) # revealed: bool
reveal_type(a >= a) # revealed: bool
reveal_type("foo" == "bar") # revealed: Literal[False]
reveal_type(("foo",) == ("bar",)) # revealed: Literal[False]
reveal_type((4, "foo") == (4, "bar")) # revealed: Literal[False]
reveal_type((int_instance(), "foo") == (int_instance(), "bar")) # revealed: Literal[False]
b = (x, y, "bar")
a = (str_instance(), int_instance(), "foo")
reveal_type(a == b) # revealed: Literal[False]
reveal_type(a != b) # revealed: Literal[True]
reveal_type(a < b) # revealed: bool
reveal_type(a <= b) # revealed: bool
reveal_type(a > b) # revealed: bool
reveal_type(a >= b) # revealed: bool
reveal_type(a == a) # revealed: bool
reveal_type(a != a) # revealed: bool
reveal_type(a < a) # revealed: bool
reveal_type(a <= a) # revealed: bool
reveal_type(a > a) # revealed: bool
reveal_type(a >= a) # revealed: bool
c = (x, y, "foo", "different_length")
b = (str_instance(), int_instance(), "bar")
reveal_type(a == c) # revealed: Literal[False]
reveal_type(a != c) # revealed: Literal[True]
reveal_type(a < c) # revealed: bool
reveal_type(a <= c) # revealed: bool
reveal_type(a > c) # revealed: bool
reveal_type(a >= c) # revealed: bool
reveal_type(a == b) # revealed: Literal[False]
reveal_type(a != b) # revealed: Literal[True]
reveal_type(a < b) # revealed: bool
reveal_type(a <= b) # revealed: bool
reveal_type(a > b) # revealed: bool
reveal_type(a >= b) # revealed: bool
c = (str_instance(), int_instance(), "foo", "different_length")
reveal_type(a == c) # revealed: Literal[False]
reveal_type(a != c) # revealed: Literal[True]
reveal_type(a < c) # revealed: bool
reveal_type(a <= c) # revealed: bool
reveal_type(a > c) # revealed: bool
reveal_type(a >= c) # revealed: bool
```
#### Error Propagation
@@ -243,36 +252,42 @@ Errors occurring within a tuple comparison should propagate outward. However, if
comparison can clearly conclude before encountering an error, the error should not be raised.
```py
def _(n: int, s: str):
class A: ...
# error: [unsupported-operator] "Operator `<` is not supported for types `A` and `A`"
A() < A()
# error: [unsupported-operator] "Operator `<=` is not supported for types `A` and `A`"
A() <= A()
# error: [unsupported-operator] "Operator `>` is not supported for types `A` and `A`"
A() > A()
# error: [unsupported-operator] "Operator `>=` is not supported for types `A` and `A`"
A() >= A()
def int_instance() -> int:
return 42
a = (0, n, A())
def str_instance() -> str:
return "hello"
# error: [unsupported-operator] "Operator `<` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a < a) # revealed: Unknown
# error: [unsupported-operator] "Operator `<=` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a <= a) # revealed: Unknown
# error: [unsupported-operator] "Operator `>` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a > a) # revealed: Unknown
# error: [unsupported-operator] "Operator `>=` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a >= a) # revealed: Unknown
class A: ...
# Comparison between `a` and `b` should only involve the first elements, `Literal[0]` and `Literal[99999]`,
# and should terminate immediately.
b = (99999, n, A())
# error: [unsupported-operator] "Operator `<` is not supported for types `A` and `A`"
A() < A()
# error: [unsupported-operator] "Operator `<=` is not supported for types `A` and `A`"
A() <= A()
# error: [unsupported-operator] "Operator `>` is not supported for types `A` and `A`"
A() > A()
# error: [unsupported-operator] "Operator `>=` is not supported for types `A` and `A`"
A() >= A()
reveal_type(a < b) # revealed: Literal[True]
reveal_type(a <= b) # revealed: Literal[True]
reveal_type(a > b) # revealed: Literal[False]
reveal_type(a >= b) # revealed: Literal[False]
a = (0, int_instance(), A())
# error: [unsupported-operator] "Operator `<` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a < a) # revealed: Unknown
# error: [unsupported-operator] "Operator `<=` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a <= a) # revealed: Unknown
# error: [unsupported-operator] "Operator `>` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a > a) # revealed: Unknown
# error: [unsupported-operator] "Operator `>=` is not supported for types `A` and `A`, in comparing `tuple[Literal[0], int, A]` with `tuple[Literal[0], int, A]`"
reveal_type(a >= a) # revealed: Unknown
# Comparison between `a` and `b` should only involve the first elements, `Literal[0]` and `Literal[99999]`,
# and should terminate immediately.
b = (99999, int_instance(), A())
reveal_type(a < b) # revealed: Literal[True]
reveal_type(a <= b) # revealed: Literal[True]
reveal_type(a > b) # revealed: Literal[False]
reveal_type(a >= b) # revealed: Literal[False]
```
### Membership Test Comparisons
@@ -280,20 +295,22 @@ def _(n: int, s: str):
"Membership Test Comparisons" refers to the operators `in` and `not in`.
```py
def _(n: int):
a = (1, 2)
b = ((3, 4), (1, 2))
c = ((1, 2, 3), (4, 5, 6))
d = ((n, n), (n, n))
def int_instance() -> int:
return 42
reveal_type(a in b) # revealed: Literal[True]
reveal_type(a not in b) # revealed: Literal[False]
a = (1, 2)
b = ((3, 4), (1, 2))
c = ((1, 2, 3), (4, 5, 6))
d = ((int_instance(), int_instance()), (int_instance(), int_instance()))
reveal_type(a in c) # revealed: Literal[False]
reveal_type(a not in c) # revealed: Literal[True]
reveal_type(a in b) # revealed: Literal[True]
reveal_type(a not in b) # revealed: Literal[False]
reveal_type(a in d) # revealed: bool
reveal_type(a not in d) # revealed: bool
reveal_type(a in c) # revealed: Literal[False]
reveal_type(a not in c) # revealed: Literal[True]
reveal_type(a in d) # revealed: bool
reveal_type(a not in d) # revealed: bool
```
### Identity Comparisons

View File

@@ -5,46 +5,49 @@
Comparisons on union types need to consider all possible cases:
```py
def _(flag: bool):
one_or_two = 1 if flag else 2
def bool_instance() -> bool:
return True
reveal_type(one_or_two <= 2) # revealed: Literal[True]
reveal_type(one_or_two <= 1) # revealed: bool
reveal_type(one_or_two <= 0) # revealed: Literal[False]
flag = bool_instance()
one_or_two = 1 if flag else 2
reveal_type(2 >= one_or_two) # revealed: Literal[True]
reveal_type(1 >= one_or_two) # revealed: bool
reveal_type(0 >= one_or_two) # revealed: Literal[False]
reveal_type(one_or_two <= 2) # revealed: Literal[True]
reveal_type(one_or_two <= 1) # revealed: bool
reveal_type(one_or_two <= 0) # revealed: Literal[False]
reveal_type(one_or_two < 1) # revealed: Literal[False]
reveal_type(one_or_two < 2) # revealed: bool
reveal_type(one_or_two < 3) # revealed: Literal[True]
reveal_type(2 >= one_or_two) # revealed: Literal[True]
reveal_type(1 >= one_or_two) # revealed: bool
reveal_type(0 >= one_or_two) # revealed: Literal[False]
reveal_type(one_or_two > 0) # revealed: Literal[True]
reveal_type(one_or_two > 1) # revealed: bool
reveal_type(one_or_two > 2) # revealed: Literal[False]
reveal_type(one_or_two < 1) # revealed: Literal[False]
reveal_type(one_or_two < 2) # revealed: bool
reveal_type(one_or_two < 3) # revealed: Literal[True]
reveal_type(one_or_two == 3) # revealed: Literal[False]
reveal_type(one_or_two == 1) # revealed: bool
reveal_type(one_or_two > 0) # revealed: Literal[True]
reveal_type(one_or_two > 1) # revealed: bool
reveal_type(one_or_two > 2) # revealed: Literal[False]
reveal_type(one_or_two != 3) # revealed: Literal[True]
reveal_type(one_or_two != 1) # revealed: bool
reveal_type(one_or_two == 3) # revealed: Literal[False]
reveal_type(one_or_two == 1) # revealed: bool
a_or_ab = "a" if flag else "ab"
reveal_type(one_or_two != 3) # revealed: Literal[True]
reveal_type(one_or_two != 1) # revealed: bool
reveal_type(a_or_ab in "ab") # revealed: Literal[True]
reveal_type("a" in a_or_ab) # revealed: Literal[True]
a_or_ab = "a" if flag else "ab"
reveal_type("c" not in a_or_ab) # revealed: Literal[True]
reveal_type("a" not in a_or_ab) # revealed: Literal[False]
reveal_type(a_or_ab in "ab") # revealed: Literal[True]
reveal_type("a" in a_or_ab) # revealed: Literal[True]
reveal_type("b" in a_or_ab) # revealed: bool
reveal_type("b" not in a_or_ab) # revealed: bool
reveal_type("c" not in a_or_ab) # revealed: Literal[True]
reveal_type("a" not in a_or_ab) # revealed: Literal[False]
one_or_none = 1 if flag else None
reveal_type("b" in a_or_ab) # revealed: bool
reveal_type("b" not in a_or_ab) # revealed: bool
reveal_type(one_or_none is None) # revealed: bool
reveal_type(one_or_none is not None) # revealed: bool
one_or_none = 1 if flag else None
reveal_type(one_or_none is None) # revealed: bool
reveal_type(one_or_none is not None) # revealed: bool
```
## Union on both sides of the comparison
@@ -53,15 +56,18 @@ With unions on both sides, we need to consider the full cross product of options
resulting (union) type:
```py
def _(flag_s: bool, flag_l: bool):
small = 1 if flag_s else 2
large = 2 if flag_l else 3
def bool_instance() -> bool:
return True
reveal_type(small <= large) # revealed: Literal[True]
reveal_type(small >= large) # revealed: bool
flag_s, flag_l = bool_instance(), bool_instance()
small = 1 if flag_s else 2
large = 2 if flag_l else 3
reveal_type(small < large) # revealed: bool
reveal_type(small > large) # revealed: Literal[False]
reveal_type(small <= large) # revealed: Literal[True]
reveal_type(small >= large) # revealed: bool
reveal_type(small < large) # revealed: bool
reveal_type(small > large) # revealed: Literal[False]
```
## Unsupported operations
@@ -71,9 +77,12 @@ back to `bool` for the result type instead of trying to infer something more pre
(supported) variants:
```py
def _(flag: bool):
x = [1, 2] if flag else 1
def bool_instance() -> bool:
return True
result = 1 in x # error: "Operator `in` is not supported"
reveal_type(result) # revealed: bool
flag = bool_instance()
x = [1, 2] if flag else 1
result = 1 in x # error: "Operator `in` is not supported"
reveal_type(result) # revealed: bool
```

View File

@@ -1,38 +1,42 @@
# Comparison: Unsupported operators
```py
def _(flag: bool, flag1: bool, flag2: bool):
class A: ...
a = 1 in 7 # error: "Operator `in` is not supported for types `Literal[1]` and `Literal[7]`"
reveal_type(a) # revealed: bool
def bool_instance() -> bool:
return True
b = 0 not in 10 # error: "Operator `not in` is not supported for types `Literal[0]` and `Literal[10]`"
reveal_type(b) # revealed: bool
class A: ...
# TODO: should error, once operand type check is implemented
# ("Operator `<` is not supported for types `object` and `int`")
c = object() < 5
# TODO: should be Unknown, once operand type check is implemented
reveal_type(c) # revealed: bool
a = 1 in 7 # error: "Operator `in` is not supported for types `Literal[1]` and `Literal[7]`"
reveal_type(a) # revealed: bool
# TODO: should error, once operand type check is implemented
# ("Operator `<` is not supported for types `int` and `object`")
d = 5 < object()
# TODO: should be Unknown, once operand type check is implemented
reveal_type(d) # revealed: bool
b = 0 not in 10 # error: "Operator `not in` is not supported for types `Literal[0]` and `Literal[10]`"
reveal_type(b) # revealed: bool
int_literal_or_str_literal = 1 if flag else "foo"
# error: "Operator `in` is not supported for types `Literal[42]` and `Literal[1]`, in comparing `Literal[42]` with `Literal[1, "foo"]`"
e = 42 in int_literal_or_str_literal
reveal_type(e) # revealed: bool
# TODO: should error, once operand type check is implemented
# ("Operator `<` is not supported for types `object` and `int`")
c = object() < 5
# TODO: should be Unknown, once operand type check is implemented
reveal_type(c) # revealed: bool
# TODO: should error, need to check if __lt__ signature is valid for right operand
# error may be "Operator `<` is not supported for types `int` and `str`, in comparing `tuple[Literal[1], Literal[2]]` with `tuple[Literal[1], Literal["hello"]]`
f = (1, 2) < (1, "hello")
# TODO: should be Unknown, once operand type check is implemented
reveal_type(f) # revealed: bool
# TODO: should error, once operand type check is implemented
# ("Operator `<` is not supported for types `int` and `object`")
d = 5 < object()
# TODO: should be Unknown, once operand type check is implemented
reveal_type(d) # revealed: bool
# error: [unsupported-operator] "Operator `<` is not supported for types `A` and `A`, in comparing `tuple[bool, A]` with `tuple[bool, A]`"
g = (flag1, A()) < (flag2, A())
reveal_type(g) # revealed: Unknown
flag = bool_instance()
int_literal_or_str_literal = 1 if flag else "foo"
# error: "Operator `in` is not supported for types `Literal[42]` and `Literal[1]`, in comparing `Literal[42]` with `Literal[1] | Literal["foo"]`"
e = 42 in int_literal_or_str_literal
reveal_type(e) # revealed: bool
# TODO: should error, need to check if __lt__ signature is valid for right operand
# error may be "Operator `<` is not supported for types `int` and `str`, in comparing `tuple[Literal[1], Literal[2]]` with `tuple[Literal[1], Literal["hello"]]`
f = (1, 2) < (1, "hello")
# TODO: should be Unknown, once operand type check is implemented
reveal_type(f) # revealed: bool
# error: [unsupported-operator] "Operator `<` is not supported for types `A` and `A`, in comparing `tuple[bool, A]` with `tuple[bool, A]`"
g = (bool_instance(), A()) < (bool_instance(), A())
reveal_type(g) # revealed: Unknown
```

View File

@@ -1,151 +0,0 @@
# Comprehensions
## Basic comprehensions
```py
class IntIterator:
def __next__(self) -> int:
return 42
class IntIterable:
def __iter__(self) -> IntIterator:
return IntIterator()
# revealed: int
[reveal_type(x) for x in IntIterable()]
class IteratorOfIterables:
def __next__(self) -> IntIterable:
return IntIterable()
class IterableOfIterables:
def __iter__(self) -> IteratorOfIterables:
return IteratorOfIterables()
# revealed: tuple[int, IntIterable]
[reveal_type((x, y)) for y in IterableOfIterables() for x in y]
# revealed: int
{reveal_type(x): 0 for x in IntIterable()}
# revealed: int
{0: reveal_type(x) for x in IntIterable()}
```
## Nested comprehension
```py
class IntIterator:
def __next__(self) -> int:
return 42
class IntIterable:
def __iter__(self) -> IntIterator:
return IntIterator()
# TODO: This could be a `tuple[int, int]` if we model that `y` can not be modified in the outer comprehension scope
# revealed: tuple[int, Unknown | int]
[[reveal_type((x, y)) for x in IntIterable()] for y in IntIterable()]
```
## Comprehension referencing outer comprehension
```py
class IntIterator:
def __next__(self) -> int:
return 42
class IntIterable:
def __iter__(self) -> IntIterator:
return IntIterator()
class IteratorOfIterables:
def __next__(self) -> IntIterable:
return IntIterable()
class IterableOfIterables:
def __iter__(self) -> IteratorOfIterables:
return IteratorOfIterables()
# TODO: This could be a `tuple[int, int]` (see above)
# revealed: tuple[int, Unknown | IntIterable]
[[reveal_type((x, y)) for x in y] for y in IterableOfIterables()]
```
## Comprehension with unbound iterable
Iterating over an unbound iterable yields `Unknown`:
```py
# error: [unresolved-reference] "Name `x` used when not defined"
# revealed: Unknown
[reveal_type(z) for z in x]
class IntIterator:
def __next__(self) -> int:
return 42
class IntIterable:
def __iter__(self) -> IntIterator:
return IntIterator()
# error: [not-iterable] "Object of type `int` is not iterable"
# revealed: tuple[int, Unknown]
[reveal_type((x, z)) for x in IntIterable() for z in x]
```
## Starred expressions
Starred expressions must be iterable
```py
class NotIterable: ...
class Iterator:
def __next__(self) -> int:
return 42
class Iterable:
def __iter__(self) -> Iterator: ...
# This is fine:
x = [*Iterable()]
# error: [not-iterable] "Object of type `NotIterable` is not iterable"
y = [*NotIterable()]
```
## Async comprehensions
### Basic
```py
class AsyncIterator:
async def __anext__(self) -> int:
return 42
class AsyncIterable:
def __aiter__(self) -> AsyncIterator:
return AsyncIterator()
# revealed: @Todo(async iterables/iterators)
[reveal_type(x) async for x in AsyncIterable()]
```
### Invalid async comprehension
This tests that we understand that `async` comprehensions do *not* work according to the synchronous
iteration protocol
```py
class Iterator:
def __next__(self) -> int:
return 42
class Iterable:
def __iter__(self) -> Iterator:
return Iterator()
# revealed: @Todo(async iterables/iterators)
[reveal_type(x) async for x in Iterable()]
```

View File

@@ -1,43 +0,0 @@
# Comprehensions with invalid syntax
```py
class IntIterator:
def __next__(self) -> int:
return 42
class IntIterable:
def __iter__(self) -> IntIterator:
return IntIterator()
# Missing 'in' keyword.
# It's reasonably clear here what they *meant* to write,
# so we'll still infer the correct type:
# error: [invalid-syntax] "Expected 'in', found name"
# revealed: int
[reveal_type(a) for a IntIterable()]
# Missing iteration variable
# error: [invalid-syntax] "Expected an identifier, but found a keyword 'in' that cannot be used here"
# error: [invalid-syntax] "Expected 'in', found name"
# error: [unresolved-reference]
# revealed: Unknown
[reveal_type(b) for in IntIterable()]
# Missing iterable
# error: [invalid-syntax] "Expected an expression"
# revealed: Unknown
[reveal_type(c) for c in]
# Missing 'in' keyword and missing iterable
# error: [invalid-syntax] "Expected 'in', found ']'"
# revealed: Unknown
[reveal_type(d) for d]
```

View File

@@ -3,35 +3,47 @@
## Simple if-expression
```py
def _(flag: bool):
x = 1 if flag else 2
reveal_type(x) # revealed: Literal[1, 2]
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else 2
reveal_type(x) # revealed: Literal[1, 2]
```
## If-expression with walrus operator
```py
def _(flag: bool):
y = 0
z = 0
x = (y := 1) if flag else (z := 2)
reveal_type(x) # revealed: Literal[1, 2]
reveal_type(y) # revealed: Literal[0, 1]
reveal_type(z) # revealed: Literal[0, 2]
def bool_instance() -> bool:
return True
flag = bool_instance()
y = 0
z = 0
x = (y := 1) if flag else (z := 2)
reveal_type(x) # revealed: Literal[1, 2]
reveal_type(y) # revealed: Literal[0, 1]
reveal_type(z) # revealed: Literal[0, 2]
```
## Nested if-expression
```py
def _(flag: bool, flag2: bool):
x = 1 if flag else 2 if flag2 else 3
reveal_type(x) # revealed: Literal[1, 2, 3]
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
x = 1 if flag else 2 if flag2 else 3
reveal_type(x) # revealed: Literal[1, 2, 3]
```
## None
```py
def _(flag: bool):
x = 1 if flag else None
reveal_type(x) # revealed: Literal[1] | None
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else None
reveal_type(x) # revealed: Literal[1] | None
```

View File

@@ -3,147 +3,128 @@
## Simple if
```py
def _(flag: bool):
y = 1
y = 2
def bool_instance() -> bool:
return True
if flag:
y = 3
flag = bool_instance()
y = 1
y = 2
reveal_type(y) # revealed: Literal[2, 3]
if flag:
y = 3
reveal_type(y) # revealed: Literal[2, 3]
```
## Simple if-elif-else
```py
def _(flag: bool, flag2: bool):
y = 1
y = 2
def bool_instance() -> bool:
return True
if flag:
y = 3
elif flag2:
y = 4
else:
r = y
y = 5
s = y
x = y
flag, flag2 = bool_instance(), bool_instance()
y = 1
y = 2
if flag:
y = 3
elif flag2:
y = 4
else:
r = y
y = 5
s = y
x = y
reveal_type(x) # revealed: Literal[3, 4, 5]
reveal_type(x) # revealed: Literal[3, 4, 5]
# revealed: Literal[2]
# error: [possibly-unresolved-reference]
reveal_type(r)
# revealed: Literal[2]
# error: [possibly-unresolved-reference]
reveal_type(r)
# revealed: Literal[5]
# error: [possibly-unresolved-reference]
reveal_type(s)
# revealed: Literal[5]
# error: [possibly-unresolved-reference]
reveal_type(s)
```
## Single symbol across if-elif-else
```py
def _(flag: bool, flag2: bool):
if flag:
y = 1
elif flag2:
y = 2
else:
y = 3
def bool_instance() -> bool:
return True
reveal_type(y) # revealed: Literal[1, 2, 3]
flag, flag2 = bool_instance(), bool_instance()
if flag:
y = 1
elif flag2:
y = 2
else:
y = 3
reveal_type(y) # revealed: Literal[1, 2, 3]
```
## if-elif-else without else assignment
```py
def _(flag: bool, flag2: bool):
y = 0
def bool_instance() -> bool:
return True
if flag:
y = 1
elif flag2:
y = 2
else:
pass
reveal_type(y) # revealed: Literal[0, 1, 2]
flag, flag2 = bool_instance(), bool_instance()
y = 0
if flag:
y = 1
elif flag2:
y = 2
else:
pass
reveal_type(y) # revealed: Literal[0, 1, 2]
```
## if-elif-else with intervening assignment
```py
def _(flag: bool, flag2: bool):
y = 0
def bool_instance() -> bool:
return True
if flag:
y = 1
z = 3
elif flag2:
y = 2
else:
pass
reveal_type(y) # revealed: Literal[0, 1, 2]
flag, flag2 = bool_instance(), bool_instance()
y = 0
if flag:
y = 1
z = 3
elif flag2:
y = 2
else:
pass
reveal_type(y) # revealed: Literal[0, 1, 2]
```
## Nested if statement
```py
def _(flag: bool, flag2: bool):
y = 0
def bool_instance() -> bool:
return True
if flag:
if flag2:
y = 1
reveal_type(y) # revealed: Literal[0, 1]
flag, flag2 = bool_instance(), bool_instance()
y = 0
if flag:
if flag2:
y = 1
reveal_type(y) # revealed: Literal[0, 1]
```
## if-elif without else
```py
def _(flag: bool, flag2: bool):
y = 1
y = 2
def bool_instance() -> bool:
return True
if flag:
y = 3
elif flag2:
y = 4
flag, flag2 = bool_instance(), bool_instance()
y = 1
y = 2
if flag:
y = 3
elif flag2:
y = 4
reveal_type(y) # revealed: Literal[2, 3, 4]
```
## if-elif with assignment expressions in tests
```py
def check(x: int) -> bool:
return bool(x)
if check(x := 1):
x = 2
elif check(x := 3):
x = 4
reveal_type(x) # revealed: Literal[2, 3, 4]
```
## constraints apply to later test expressions
```py
def check(x) -> bool:
return bool(x)
def _(flag: bool):
x = 1 if flag else None
y = 0
if x is None:
pass
elif check(y := x):
pass
reveal_type(y) # revealed: Literal[0, 1]
reveal_type(y) # revealed: Literal[2, 3, 4]
```

View File

@@ -3,43 +3,39 @@
## With wildcard
```py
def _(target: int):
match target:
case 1:
y = 2
case _:
y = 3
match 0:
case 1:
y = 2
case _:
y = 3
reveal_type(y) # revealed: Literal[2, 3]
reveal_type(y) # revealed: Literal[2, 3]
```
## Without wildcard
```py
def _(target: int):
match target:
case 1:
y = 2
case 2:
y = 3
match 0:
case 1:
y = 2
case 2:
y = 3
# revealed: Literal[2, 3]
# error: [possibly-unresolved-reference]
reveal_type(y)
# revealed: Literal[2, 3]
# error: [possibly-unresolved-reference]
reveal_type(y)
```
## Basic match
```py
def _(target: int):
y = 1
y = 2
y = 1
y = 2
match 0:
case 1:
y = 3
case 2:
y = 4
match target:
case 1:
y = 3
case 2:
y = 4
reveal_type(y) # revealed: Literal[2, 3, 4]
reveal_type(y) # revealed: Literal[2, 3, 4]
```

View File

@@ -10,66 +10,42 @@ x: str # error: [invalid-declaration] "Cannot declare type `str` for inferred t
## Incompatible declarations
```py
def _(flag: bool):
if flag:
x: str
else:
x: int
def bool_instance() -> bool:
return True
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: str, int"
flag = bool_instance()
if flag:
x: str
else:
x: int
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: str, int"
```
## Incompatible declarations for 2 (out of 3) types
## Partial declarations
```py
def _(flag1: bool, flag2: bool):
if flag1:
x: str
elif flag2:
x: int
def bool_instance() -> bool:
return True
# Here, the declared type for `x` is `int | str | Unknown`.
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: str, int"
flag = bool_instance()
if flag:
x: int
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: Unknown, int"
```
## Incompatible declarations with bad assignment
```py
def _(flag: bool):
if flag:
x: str
else:
x: int
def bool_instance() -> bool:
return True
# error: [conflicting-declarations]
# error: [invalid-assignment]
x = b"foo"
```
## No errors
Currently, we avoid raising the conflicting-declarations for the following cases:
### Partial declarations
```py
def _(flag: bool):
if flag:
x: int
x = 1
```
### Partial declarations in try-except
Refer to <https://github.com/astral-sh/ruff/issues/13966>
```py
def _():
try:
x: int = 1
except:
x = 2
x = 3
flag = bool_instance()
if flag:
x: str
else:
x: int
# error: [conflicting-declarations]
# error: [invalid-assignment]
x = b"foo"
```

View File

@@ -1,199 +0,0 @@
# Descriptor protocol
[Descriptors] let objects customize attribute lookup, storage, and deletion.
A descriptor is an attribute value that has one of the methods in the descriptor protocol. Those
methods are `__get__()`, `__set__()`, and `__delete__()`. If any of those methods are defined for an
attribute, it is said to be a descriptor.
## Basic example
An introductory example, modeled after a [simple example] in the primer on descriptors, involving a
descriptor that returns a constant value:
```py
from typing import Literal
class Ten:
def __get__(self, instance: object, owner: type | None = None) -> Literal[10]:
return 10
def __set__(self, instance: object, value: Literal[10]) -> None:
pass
class C:
ten = Ten()
c = C()
# TODO: this should be `Literal[10]`
reveal_type(c.ten) # revealed: Unknown | Ten
# TODO: This should `Literal[10]`
reveal_type(C.ten) # revealed: Unknown | Ten
# These are fine:
c.ten = 10
C.ten = 10
# TODO: Both of these should be errors
c.ten = 11
C.ten = 11
```
## Different types for `__get__` and `__set__`
The return type of `__get__` and the value type of `__set__` can be different:
```py
class FlexibleInt:
def __init__(self):
self._value: int | None = None
def __get__(self, instance: object, owner: type | None = None) -> int | None:
return self._value
def __set__(self, instance: object, value: int | str) -> None:
self._value = int(value)
class C:
flexible_int = FlexibleInt()
c = C()
# TODO: should be `int | None`
reveal_type(c.flexible_int) # revealed: Unknown | FlexibleInt
c.flexible_int = 42 # okay
c.flexible_int = "42" # also okay!
# TODO: should be `int | None`
reveal_type(c.flexible_int) # revealed: Unknown | FlexibleInt
# TODO: should be an error
c.flexible_int = None # not okay
# TODO: should be `int | None`
reveal_type(c.flexible_int) # revealed: Unknown | FlexibleInt
```
## Built-in `property` descriptor
The built-in `property` decorator creates a descriptor. The names for attribute reads/writes are
determined by the return type of the `name` method and the parameter type of the setter,
respectively.
```py
class C:
_name: str | None = None
@property
def name(self) -> str:
return self._name or "Unset"
# TODO: No diagnostic should be emitted here
# error: [unresolved-attribute] "Type `Literal[name]` has no attribute `setter`"
@name.setter
def name(self, value: str | None) -> None:
self._value = value
c = C()
reveal_type(c._name) # revealed: str | None
# Should be `str`
reveal_type(c.name) # revealed: @Todo(bound method)
# Should be `builtins.property`
reveal_type(C.name) # revealed: Literal[name]
# This is fine:
c.name = "new"
c.name = None
# TODO: this should be an error
c.name = 42
```
## Built-in `classmethod` descriptor
Similarly to `property`, `classmethod` decorator creates an implicit descriptor that binds the first
argument to the class instead of the instance.
```py
class C:
def __init__(self, value: str) -> None:
self._name: str = value
@classmethod
def factory(cls, value: str) -> "C":
return cls(value)
@classmethod
def get_name(cls) -> str:
return cls.__name__
c1 = C.factory("test") # okay
# TODO: should be `C`
reveal_type(c1) # revealed: @Todo(return type)
# TODO: should be `str`
reveal_type(C.get_name()) # revealed: @Todo(return type)
# TODO: should be `str`
reveal_type(C("42").get_name()) # revealed: @Todo(bound method)
```
## Descriptors only work when used as class variables
From the descriptor guide:
> Descriptors only work when used as class variables. When put in instances, they have no effect.
```py
from typing import Literal
class Ten:
def __get__(self, instance: object, owner: type | None = None) -> Literal[10]:
return 10
class C:
def __init__(self):
self.ten = Ten()
reveal_type(C().ten) # revealed: Unknown | Ten
```
## Descriptors distinguishing between class and instance access
Overloads can be used to distinguish between when a descriptor is accessed on a class object and
when it is accessed on an instance. A real-world example of this is the `__get__` method on
`types.FunctionType`.
```py
from typing_extensions import Literal, LiteralString, overload
class Descriptor:
@overload
def __get__(self, instance: None, owner: type, /) -> Literal["called on class object"]: ...
@overload
def __get__(self, instance: object, owner: type | None = None, /) -> Literal["called on instance"]: ...
def __get__(self, instance, owner=None, /) -> LiteralString:
if instance:
return "called on instance"
else:
return "called on class object"
class C:
d = Descriptor()
# TODO: should be `Literal["called on class object"]
reveal_type(C.d) # revealed: Unknown | Descriptor
# TODO: should be `Literal["called on instance"]
reveal_type(C().d) # revealed: Unknown | Descriptor
```
[descriptors]: https://docs.python.org/3/howto/descriptor.html
[simple example]: https://docs.python.org/3/howto/descriptor.html#simple-example-a-descriptor-that-returns-a-constant

View File

@@ -1,21 +0,0 @@
# Unpacking
<!-- snapshot-diagnostics -->
## Right hand side not iterable
```py
a, b = 1 # error: [not-iterable]
```
## Too many values to unpack
```py
a, b = (1, 2, 3) # error: [invalid-assignment]
```
## Too few values to unpack
```py
a, b = (1,) # error: [invalid-assignment]
```

View File

@@ -1,87 +0,0 @@
# Unresolved import diagnostics
<!-- snapshot-diagnostics -->
## Using `from` with an unresolvable module
This example demonstrates the diagnostic when a `from` style import is used with a module that could
not be found:
```py
from does_not_exist import add # error: [unresolved-import]
stat = add(10, 15)
```
## Using `from` with too many leading dots
This example demonstrates the diagnostic when a `from` style import is used with a presumptively
valid path, but where there are too many leading dots.
`package/__init__.py`:
```py
```
`package/foo.py`:
```py
def add(x, y):
return x + y
```
`package/subpackage/subsubpackage/__init__.py`:
```py
from ....foo import add # error: [unresolved-import]
stat = add(10, 15)
```
## Using `from` with an unknown current module
This is another case handled separately in Red Knot, where a `.` provokes relative module name
resolution, but where the module name is not resolvable.
```py
from .does_not_exist import add # error: [unresolved-import]
stat = add(10, 15)
```
## Using `from` with an unknown nested module
Like the previous test, but with sub-modules to ensure the span is correct.
```py
from .does_not_exist.foo.bar import add # error: [unresolved-import]
stat = add(10, 15)
```
## Using `from` with a resolvable module but unresolvable item
This ensures that diagnostics for an unresolvable item inside a resolvable import highlight the item
and not the entire `from ... import ...` statement.
`a.py`:
```py
does_exist1 = 1
does_exist2 = 2
```
```py
from a import does_exist1, does_not_exist, does_exist2 # error: [unresolved-import]
```
## An unresolvable import that does not use `from`
This ensures that an unresolvable `import ...` statement highlights just the module name and not the
entire statement.
```py
import does_not_exist # error: [unresolved-import]
x = does_not_exist.foo
```

View File

@@ -1,138 +0,0 @@
# `assert_type`
## Basic
```py
from typing_extensions import assert_type
def _(x: int):
assert_type(x, int) # fine
assert_type(x, str) # error: [type-assertion-failure]
```
## Narrowing
The asserted type is checked against the inferred type, not the declared type.
```toml
[environment]
python-version = "3.10"
```
```py
from typing_extensions import assert_type
def _(x: int | str):
if isinstance(x, int):
reveal_type(x) # revealed: int
assert_type(x, int) # fine
```
## Equivalence
The actual type must match the asserted type precisely.
```py
from typing import Any, Type, Union
from typing_extensions import assert_type
# Subtype does not count
def _(x: bool):
assert_type(x, int) # error: [type-assertion-failure]
def _(a: type[int], b: type[Any]):
assert_type(a, type[Any]) # error: [type-assertion-failure]
assert_type(b, type[int]) # error: [type-assertion-failure]
# The expression constructing the type is not taken into account
def _(a: type[int]):
assert_type(a, Type[int]) # fine
```
## Gradual types
```py
from typing import Any
from typing_extensions import Literal, assert_type
from knot_extensions import Unknown
# Any and Unknown are considered equivalent
def _(a: Unknown, b: Any):
reveal_type(a) # revealed: Unknown
assert_type(a, Any) # fine
reveal_type(b) # revealed: Any
assert_type(b, Unknown) # fine
def _(a: type[Unknown], b: type[Any]):
reveal_type(a) # revealed: type[Unknown]
assert_type(a, type[Any]) # fine
reveal_type(b) # revealed: type[Any]
assert_type(b, type[Unknown]) # fine
```
## Tuples
Tuple types with the same elements are the same.
```py
from typing_extensions import Any, assert_type
from knot_extensions import Unknown
def _(a: tuple[int, str, bytes]):
assert_type(a, tuple[int, str, bytes]) # fine
assert_type(a, tuple[int, str]) # error: [type-assertion-failure]
assert_type(a, tuple[int, str, bytes, None]) # error: [type-assertion-failure]
assert_type(a, tuple[int, bytes, str]) # error: [type-assertion-failure]
def _(a: tuple[Any, ...], b: tuple[Unknown, ...]):
assert_type(a, tuple[Any, ...]) # fine
assert_type(a, tuple[Unknown, ...]) # fine
assert_type(b, tuple[Unknown, ...]) # fine
assert_type(b, tuple[Any, ...]) # fine
```
## Unions
Unions with the same elements are the same, regardless of order.
```toml
[environment]
python-version = "3.10"
```
```py
from typing_extensions import assert_type
def _(a: str | int):
assert_type(a, str | int)
assert_type(a, int | str)
```
## Intersections
Intersections are the same when their positive and negative parts are respectively the same,
regardless of order.
```py
from typing_extensions import assert_type
from knot_extensions import Intersection, Not
class A: ...
class B: ...
class C: ...
class D: ...
def _(a: A):
if isinstance(a, B) and not isinstance(a, C) and not isinstance(a, D):
reveal_type(a) # revealed: A & B & ~C & ~D
assert_type(a, Intersection[A, B, Not[C], Not[D]])
assert_type(a, Intersection[B, A, Not[D], Not[C]])
```

View File

@@ -1,27 +0,0 @@
# `cast`
`cast()` takes two arguments, one type and one value, and returns a value of the given type.
The (inferred) type of the value and the given type do not need to have any correlation.
```py
from typing import Literal, cast
reveal_type(True) # revealed: Literal[True]
reveal_type(cast(str, True)) # revealed: str
reveal_type(cast("str", True)) # revealed: str
reveal_type(cast(int | str, 1)) # revealed: int | str
# error: [invalid-type-form]
reveal_type(cast(Literal, True)) # revealed: Unknown
# TODO: These should be errors
cast(1)
cast(str)
cast(str, b"ar", "foo")
# TODO: Either support keyword arguments properly,
# or give a comprehensible error message saying they're unsupported
cast(val="foo", typ=int) # error: [unresolved-reference] "Name `foo` used when not defined"
```

View File

@@ -49,131 +49,12 @@ def foo(
try:
help()
except x as e:
reveal_type(e) # revealed: AttributeError
# TODO: should be `AttributeError`
reveal_type(e) # revealed: @Todo(exception type)
except y as f:
reveal_type(f) # revealed: OSError | RuntimeError
# TODO: should be `OSError | RuntimeError`
reveal_type(f) # revealed: @Todo(exception type)
except z as g:
# TODO: should be `BaseException`
reveal_type(g) # revealed: @Todo(full tuple[...] support)
```
## Invalid exception handlers
```py
try:
pass
# error: [invalid-exception-caught] "Cannot catch object of type `Literal[3]` in an exception handler (must be a `BaseException` subclass or a tuple of `BaseException` subclasses)"
except 3 as e:
reveal_type(e) # revealed: Unknown
try:
pass
# error: [invalid-exception-caught] "Cannot catch object of type `Literal["foo"]` in an exception handler (must be a `BaseException` subclass or a tuple of `BaseException` subclasses)"
# error: [invalid-exception-caught] "Cannot catch object of type `Literal[b"bar"]` in an exception handler (must be a `BaseException` subclass or a tuple of `BaseException` subclasses)"
except (ValueError, OSError, "foo", b"bar") as e:
reveal_type(e) # revealed: ValueError | OSError | Unknown
def foo(
x: type[str],
y: tuple[type[OSError], type[RuntimeError], int],
z: tuple[type[str], ...],
):
try:
help()
# error: [invalid-exception-caught]
except x as e:
reveal_type(e) # revealed: Unknown
# error: [invalid-exception-caught]
except y as f:
reveal_type(f) # revealed: OSError | RuntimeError | Unknown
except z as g:
# TODO: should emit a diagnostic here:
reveal_type(g) # revealed: @Todo(full tuple[...] support)
```
## Object raised is not an exception
```py
try:
raise AttributeError() # fine
except:
...
try:
raise FloatingPointError # fine
except:
...
try:
raise 1 # error: [invalid-raise]
except:
...
try:
raise int # error: [invalid-raise]
except:
...
def _(e: Exception | type[Exception]):
raise e # fine
def _(e: Exception | type[Exception] | None):
raise e # error: [invalid-raise]
```
## Exception cause is not an exception
```py
def _():
try:
raise EOFError() from GeneratorExit # fine
except:
...
def _():
try:
raise StopIteration from MemoryError() # fine
except:
...
def _():
try:
raise BufferError() from None # fine
except:
...
def _():
try:
raise ZeroDivisionError from False # error: [invalid-raise]
except:
...
def _():
try:
raise SystemExit from bool() # error: [invalid-raise]
except:
...
def _():
try:
raise
except KeyboardInterrupt as e: # fine
reveal_type(e) # revealed: KeyboardInterrupt
raise LookupError from e # fine
def _():
try:
raise
except int as e: # error: [invalid-exception-caught]
reveal_type(e) # revealed: Unknown
raise KeyError from e
def _(e: Exception | type[Exception]):
raise ModuleNotFoundError from e # fine
def _(e: Exception | type[Exception] | None):
raise IndexError from e # fine
def _(e: int | None):
raise IndexError from e # error: [invalid-raise]
reveal_type(g) # revealed: @Todo(exception type)
```

Some files were not shown because too many files have changed in this diff Show More