Compare commits

..

1 Commits

Author SHA1 Message Date
Zanie
d65addffa0 Use https instead of ssh for schemastore authentication 2023-11-09 08:42:10 -06:00
5731 changed files with 180220 additions and 508650 deletions

View File

@@ -2,9 +2,36 @@
dev = "run --package ruff_dev --bin ruff_dev"
benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# statically link the C runtime so the executable does not depend on
# that shared/dynamic library.
#
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all())']
rustflags = [
# CLIPPY LINT SETTINGS
# This is a workaround to configure lints for the entire workspace, pending the ability to configure this via TOML.
# See: `https://github.com/rust-lang/cargo/issues/5034`
# `https://github.com/EmbarkStudios/rust-ecosystem/issues/22#issuecomment-947011395`
"-Dunsafe_code",
"-Wclippy::pedantic",
# Allowed pedantic lints
"-Wclippy::char_lit_as_u8",
"-Aclippy::collapsible_else_if",
"-Aclippy::collapsible_if",
"-Aclippy::implicit_hasher",
"-Aclippy::match_same_arms",
"-Aclippy::missing_errors_doc",
"-Aclippy::missing_panics_doc",
"-Aclippy::module_name_repetitions",
"-Aclippy::must_use_candidate",
"-Aclippy::similar_names",
"-Aclippy::too_many_lines",
# Disallowed restriction lints
"-Wclippy::print_stdout",
"-Wclippy::print_stderr",
"-Wclippy::dbg_macro",
"-Wclippy::empty_drop",
"-Wclippy::empty_structs_with_brackets",
"-Wclippy::exit",
"-Wclippy::get_unwrap",
"-Wclippy::rc_buffer",
"-Wclippy::rc_mutex",
"-Wclippy::rest_pat_in_fully_bound_structs",
"-Wunreachable_pub"
]

View File

@@ -1,8 +0,0 @@
[profile.ci]
# Print out output for failing tests as soon as they fail, and also at the end
# of the run (for easy scrollability).
failure-output = "immediate-final"
# Do not cancel the test run on the first failure.
fail-fast = false
status-level = "skip"

View File

@@ -20,7 +20,7 @@
"extensions": [
"ms-python.python",
"rust-lang.rust-analyzer",
"fill-labs.dependi",
"serayuzgur.crates",
"tamasfe.even-better-toml",
"Swellaby.vscode-rust-test-adapter",
"charliermarsh.ruff"

11
.gitattributes vendored
View File

@@ -2,17 +2,6 @@
crates/ruff_linter/resources/test/fixtures/isort/line_ending_crlf.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W605_1.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_2.py text eol=crlf
crates/ruff_linter/resources/test/fixtures/pycodestyle/W391_3.py text eol=crlf
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring_code_examples_crlf.py text eol=crlf
crates/ruff_python_formatter/tests/snapshots/format@docstring_code_examples_crlf.py.snap text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lexing/line_continuation_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_windows_eol.py text eol=crlf
crates/ruff_python_parser/resources/invalid/re_lex_logical_token_mac_eol.py text eol=cr
crates/ruff_python_parser/resources/inline linguist-generated=true
ruff.schema.json linguist-generated=true text=auto eol=lf
*.md.snap linguist-language=Markdown

16
.github/CODEOWNERS vendored
View File

@@ -5,17 +5,5 @@
# - The '*' pattern is global owners.
# - Order is important. The last matching pattern has the most precedence.
/crates/ruff_notebook/ @dhruvmanila
/crates/ruff_formatter/ @MichaReiser
/crates/ruff_python_formatter/ @MichaReiser
/crates/ruff_python_parser/ @MichaReiser @dhruvmanila
# flake8-pyi
/crates/ruff_linter/src/rules/flake8_pyi/ @AlexWaygood
# Script for fuzzing the parser
/scripts/fuzz-parser/ @AlexWaygood
# red-knot
/crates/red_knot* @carljm @MichaReiser @AlexWaygood
/crates/ruff_db/ @carljm @MichaReiser @AlexWaygood
# Jupyter
/crates/ruff_linter/src/jupyter/ @dhruvmanila

View File

@@ -3,8 +3,6 @@ Thank you for taking the time to report an issue! We're glad to have you involve
If you're filing a bug report, please consider including the following information:
* List of keywords you searched for before creating this issue. Write them down here so that others can find this issue more easily and help provide feedback.
e.g. "RUF001", "unused variable", "Jupyter notebook"
* A minimal code snippet that reproduces the bug.
* The command you invoked (e.g., `ruff /path/to/file.py --fix`), ideally including the `--isolated` flag.
* The current Ruff settings (any relevant sections from your `pyproject.toml`).

13
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
labels: ["internal"]
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
labels: ["internal"]

111
.github/renovate.json5 vendored
View File

@@ -1,111 +0,0 @@
{
$schema: "https://docs.renovatebot.com/renovate-schema.json",
dependencyDashboard: true,
suppressNotifications: ["prEditedNotification"],
extends: ["config:recommended"],
labels: ["internal"],
schedule: ["before 4am on Monday"],
semanticCommits: "disabled",
separateMajorMinor: false,
prHourlyLimit: 10,
enabledManagers: ["github-actions", "pre-commit", "cargo", "pep621", "pip_requirements", "npm"],
cargo: {
// See https://docs.renovatebot.com/configuration-options/#rangestrategy
rangeStrategy: "update-lockfile",
},
pep621: {
// The default for this package manager is to only search for `pyproject.toml` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/pep621/#file-matching
fileMatch: ["^(python|scripts)/.*pyproject\\.toml$"],
},
pip_requirements: {
// The default for this package manager is to run on all requirements.txt files:
// https://docs.renovatebot.com/modules/manager/pip_requirements/#file-matching
// `fileMatch` doesn't work for excluding files; to exclude `requirements.txt` files
// outside the `doc/` directory, we instead have to use `ignorePaths`. Unlike `fileMatch`,
// which takes a regex string, `ignorePaths` takes a glob string, so we have to use
// a "negative glob pattern".
// See:
// - https://docs.renovatebot.com/modules/manager/#ignoring-files-that-match-the-default-filematch
// - https://docs.renovatebot.com/configuration-options/#ignorepaths
// - https://docs.renovatebot.com/string-pattern-matching/#negative-matching
ignorePaths: ["!docs/requirements*.txt"]
},
npm: {
// The default for this package manager is to only search for `package.json` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/npm/#file-matching
fileMatch: ["^playground/.*package\\.json$"],
},
"pre-commit": {
enabled: true,
},
packageRules: [
{
// Group upload/download artifact updates, the versions are dependent
groupName: "Artifact GitHub Actions dependencies",
matchManagers: ["github-actions"],
matchDatasources: ["gitea-tags", "github-tags"],
matchPackagePatterns: ["actions/.*-artifact"],
description: "Weekly update of artifact-related GitHub Actions dependencies",
},
{
// This package rule disables updates for GitHub runners:
// we'd only pin them to a specific version
// if there was a deliberate reason to do so
groupName: "GitHub runners",
matchManagers: ["github-actions"],
matchDatasources: ["github-runners"],
description: "Disable PRs updating GitHub runners (e.g. 'runs-on: macos-14')",
enabled: false,
},
{
// Disable updates of `zip-rs`; intentionally pinned for now due to ownership change
// See: https://github.com/astral-sh/uv/issues/3642
matchPackagePatterns: ["zip"],
matchManagers: ["cargo"],
enabled: false,
},
{
// `mkdocs-material` requires a manual update to keep the version in sync
// with `mkdocs-material-insider`.
// See: https://squidfunk.github.io/mkdocs-material/insiders/upgrade/
matchManagers: ["pip_requirements"],
matchPackagePatterns: ["mkdocs-material"],
enabled: false,
},
{
groupName: "pre-commit dependencies",
matchManagers: ["pre-commit"],
description: "Weekly update of pre-commit dependencies",
},
{
groupName: "NPM Development dependencies",
matchManagers: ["npm"],
matchDepTypes: ["devDependencies"],
description: "Weekly update of NPM development dependencies",
},
{
groupName: "Monaco",
matchManagers: ["npm"],
matchPackagePatterns: ["monaco"],
description: "Weekly update of the Monaco editor",
},
{
groupName: "strum",
matchManagers: ["cargo"],
matchPackagePatterns: ["strum"],
description: "Weekly update of strum dependencies",
},
{
groupName: "ESLint",
matchManagers: ["npm"],
matchPackageNames: ["eslint"],
allowedVersions: "<9",
description: "Constraint ESLint to version 8 until TypeScript-eslint supports ESLint 9", // https://github.com/typescript-eslint/typescript-eslint/issues/8211
},
],
vulnerabilityAlerts: {
commitMessageSuffix: "",
labels: ["internal", "security"],
},
}

View File

@@ -1,470 +0,0 @@
# Build ruff on all platforms.
#
# Generates both wheels (for PyPI) and archived binaries (for GitHub releases).
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
name: "Build binaries"
on:
workflow_call:
inputs:
plan:
required: true
type: string
pull_request:
paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work.
- pyproject.toml
# And when we change this workflow itself...
- .github/workflows/build-binaries.yml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
sdist:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build sdist"
uses: PyO3/maturin-action@v1
with:
command: sdist
args: --out dist
- name: "Test sdist"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload sdist"
uses: actions/upload-artifact@v4
with:
name: wheels-sdist
path: dist
macos-x86_64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-12
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - x86_64"
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --locked --out dist
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-macos-x86_64
path: dist
- name: "Archive binary"
run: |
TARGET=x86_64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-macos-x86_64
path: |
*.tar.gz
*.sha256
macos-aarch64:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: macos-14
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: arm64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - aarch64"
uses: PyO3/maturin-action@v1
with:
target: aarch64
args: --release --locked --out dist
- name: "Test wheel - aarch64"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-aarch64-apple-darwin
path: dist
- name: "Archive binary"
run: |
TARGET=aarch64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-aarch64-apple-darwin
path: |
*.tar.gz
*.sha256
windows:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: windows-latest
strategy:
matrix:
platform:
- target: x86_64-pc-windows-msvc
arch: x64
- target: i686-pc-windows-msvc
arch: x86
- target: aarch64-pc-windows-msvc
arch: x64
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.platform.arch }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
args: --release --locked --out dist
env:
# aarch64 build fails, see https://github.com/PyO3/maturin/issues/2110
XWIN_VERSION: 16
- name: "Test wheel"
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.zip
7z a $ARCHIVE_FILE ./target/${{ matrix.platform.target }}/release/ruff.exe
sha256sum $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
path: |
*.zip
*.sha256
linux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-gnu
- i686-unknown-linux-gnu
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --locked --out dist
- name: "Test wheel"
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
${{ env.MODULE_NAME }} --help
python -m ${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.target }}
path: |
*.tar.gz
*.sha256
linux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-gnu
arch: aarch64
# see https://github.com/astral-sh/ruff/issues/3791
# and https://github.com/gnzlbg/jemallocator/issues/170#issuecomment-1503228963
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: armv7-unknown-linux-gnueabihf
arch: armv7
- target: s390x-unknown-linux-gnu
arch: s390x
- target: powerpc64le-unknown-linux-gnu
arch: ppc64le
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: powerpc64-unknown-linux-gnu
arch: ppc64
# see https://github.com/astral-sh/ruff/issues/10073
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: arm-unknown-linux-musleabihf
arch: arm
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: auto
docker-options: ${{ matrix.platform.maturin_docker_options }}
args: --release --locked --out dist
- uses: uraimo/run-on-arch-action@v2
if: matrix.platform.arch != 'ppc64'
name: Test wheel
with:
arch: ${{ matrix.platform.arch == 'arm' && 'armv6' || matrix.platform.arch }}
distro: ${{ matrix.platform.arch == 'arm' && 'bullseye' || 'ubuntu20.04' }}
githubToken: ${{ github.token }}
install: |
apt-get update
apt-get install -y --no-install-recommends python3 python3-pip
pip3 install -U pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
path: |
*.tar.gz
*.sha256
musllinux:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
- name: "Test wheel"
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@v3
with:
image: alpine:latest
options: -v ${{ github.workspace }}:/io -w /io
run: |
apk add python3
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.target }}
path: |
*.tar.gz
*.sha256
musllinux-cross:
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-build') }}
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-musl
arch: aarch64
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: armv7-unknown-linux-musleabihf
arch: armv7
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
docker-options: ${{ matrix.platform.maturin_docker_options }}
- uses: uraimo/run-on-arch-action@v2
name: Test wheel
with:
arch: ${{ matrix.platform.arch }}
distro: alpine_latest
githubToken: ${{ github.token }}
install: |
apk add python3
run: |
python -m venv .venv
.venv/bin/pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
.venv/bin/${{ env.MODULE_NAME }} --help
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-${{ matrix.platform.target }}
path: dist
- name: "Archive binary"
shell: bash
run: |
set -euo pipefail
TARGET=${{ matrix.platform.target }}
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.platform.target }}
path: |
*.tar.gz
*.sha256

View File

@@ -1,68 +0,0 @@
# Build and publish a Docker image.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a local
# artifacts job within `cargo-dist`.
#
# TODO(charlie): Ideally, the publish step would happen as a publish job within `cargo-dist`, but
# sharing the built image as an artifact between jobs is challenging.
name: "[ruff] Build Docker image"
on:
workflow_call:
inputs:
plan:
required: true
type: string
pull_request:
paths:
- .github/workflows/build-docker.yml
jobs:
docker-publish:
name: Build Docker image (ghcr.io/astral-sh/ruff)
runs-on: ubuntu-latest
environment:
name: release
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: docker/setup-buildx-action@v3
- uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/astral-sh/ruff
- name: Check tag consistency
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ fromJson(inputs.plan).announcement_tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ fromJson(inputs.plan).announcement_tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: "Build and push Docker image"
uses: docker/build-push-action@v6
with:
context: .
platforms: linux/amd64,linux/arm64
# Reuse the builder
cache-from: type=gha
cache-to: type=gha,mode=max
push: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
tags: ghcr.io/astral-sh/ruff:latest,ghcr.io/astral-sh/ruff:${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || 'dry-run' }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -23,35 +23,17 @@ jobs:
name: "Determine changes"
runs-on: ubuntu-latest
outputs:
# Flag that is raised when any code that affects parser is changed
parser: ${{ steps.changed.outputs.parser_any_changed }}
# Flag that is raised when any code that affects linter is changed
linter: ${{ steps.changed.outputs.linter_any_changed }}
# Flag that is raised when any code that affects formatter is changed
formatter: ${{ steps.changed.outputs.formatter_any_changed }}
# Flag that is raised when any code is changed
# This is superset of the linter and formatter
code: ${{ steps.changed.outputs.code_any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: tj-actions/changed-files@v45
- uses: tj-actions/changed-files@v40
id: changed
with:
files_yaml: |
parser:
- Cargo.toml
- Cargo.lock
- crates/ruff_python_trivia/**
- crates/ruff_source_file/**
- crates/ruff_text_size/**
- crates/ruff_python_ast/**
- crates/ruff_python_parser/**
- scripts/fuzz-parser/**
- .github/workflows/ci.yaml
linter:
- Cargo.toml
- Cargo.lock
@@ -59,8 +41,8 @@ jobs:
- "!crates/ruff_python_formatter/**"
- "!crates/ruff_formatter/**"
- "!crates/ruff_dev/**"
- "!crates/ruff_shrinking/**"
- scripts/*
- python/**
- .github/workflows/ci.yaml
formatter:
@@ -76,19 +58,11 @@ jobs:
- crates/ruff_python_parser/**
- crates/ruff_dev/**
- scripts/*
- python/**
- .github/workflows/ci.yaml
code:
- "**/*"
- "!**/*.md"
- "!docs/**"
- "!assets/**"
cargo-fmt:
name: "cargo fmt"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -98,9 +72,6 @@ jobs:
cargo-clippy:
name: "cargo clippy"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -109,26 +80,41 @@ jobs:
rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features --locked -- -D warnings
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
- name: "Clippy (wasm)"
run: cargo clippy -p ruff_wasm -p red_knot_wasm --target wasm32-unknown-unknown --all-features --locked -- -D warnings
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
cargo-test-linux:
name: "cargo test (linux)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
name: "cargo test (linux)"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
- name: "Install cargo insta"
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
run: cargo insta test --all --all-features --unreferenced reject
# Check for broken links in the documentation.
- run: cargo doc --all --no-deps
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@v3
with:
name: ruff
path: target/debug/ruff
cargo-test-windows:
runs-on: windows-latest
name: "cargo test (windows)"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo insta"
uses: taiki-e/install-action@v2
with:
@@ -136,57 +122,12 @@ jobs:
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo insta test --all-features --unreferenced reject --test-runner nextest
# Check for broken links in the documentation.
- run: cargo doc --all --no-deps
env:
RUSTDOCFLAGS: "-D warnings"
# Use --document-private-items so that all our doc comments are kept in
# sync, not just public items. Eventually we should do this for all
# crates; for now add crates here as they are warning-clean to prevent
# regression.
- run: cargo doc --no-deps -p red_knot_python_semantic -p red_knot -p red_knot_test -p ruff_db --document-private-items
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@v4
with:
name: ruff
path: target/debug/ruff
cargo-test-windows:
name: "cargo test (windows)"
runs-on: windows-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo nextest"
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
# Workaround for <https://github.com/nextest-rs/nextest/issues/1493>.
RUSTUP_WINDOWS_PATH_ADD_BIN: 1
run: |
cargo nextest run --all-features --profile ci
cargo test --all-features --doc
# We can't reject unreferenced snapshots on windows because flake8_executable can't run on windows
run: cargo insta test --all --all-features
cargo-test-wasm:
name: "cargo test (wasm)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 10
name: "cargo test (wasm)"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -198,69 +139,14 @@ jobs:
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@v0.4.0
- uses: Swatinem/rust-cache@v2
- name: "Test ruff_wasm"
- name: "Run wasm-pack"
run: |
cd crates/ruff_wasm
wasm-pack test --node
- name: "Test red_knot_wasm"
run: |
cd crates/red_knot_wasm
wasm-pack test --node
cargo-build-release:
name: "cargo build (release)"
runs-on: macos-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: "Build"
run: cargo build --release --locked
cargo-build-msrv:
name: "cargo build (msrv)"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- uses: SebRollen/toml-action@v1.2.0
id: msrv
with:
file: "Cargo.toml"
field: "workspace.package.rust-version"
- name: "Install Rust toolchain"
run: rustup default ${{ steps.msrv.outputs.value }}
- name: "Install mold"
uses: rui314/setup-mold@v1
- name: "Install cargo nextest"
uses: taiki-e/install-action@v2
with:
tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
env:
NEXTEST_PROFILE: "ci"
run: cargo +${{ steps.msrv.outputs.value }} insta test --all-features --unreferenced reject --test-runner nextest
cargo-fuzz:
name: "cargo fuzz"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 10
name: "cargo fuzz"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -268,53 +154,15 @@ jobs:
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@main
with:
tool: cargo-fuzz@0.11.2
- name: "Install cargo-fuzz"
# Download the latest version from quick install and not the github releases because github releases only has MUSL targets.
run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm
uses: taiki-e/install-action@v2
with:
tool: cargo-fuzz@0.11
- run: cargo fuzz build -s none
fuzz-parser:
name: "Fuzz the parser"
runs-on: ubuntu-latest
needs:
- cargo-test-linux
- determine_changes
if: ${{ needs.determine_changes.outputs.parser == 'true' }}
timeout-minutes: 20
env:
FORCE_COLOR: 1
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install Python requirements
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
- uses: actions/download-artifact@v4
name: Download Ruff binary to test
id: download-cached-binary
with:
name: ruff
path: ruff-to-test
- name: Fuzz
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ${{ steps.download-cached-binary.outputs.download-path }}/ruff
python scripts/fuzz-parser/fuzz.py 0-500 --test-executable ${{ steps.download-cached-binary.outputs.download-path }}/ruff
scripts:
name: "test scripts"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -337,27 +185,28 @@ jobs:
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes.
if: ${{ github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
timeout-minutes: 20
if: github.event_name == 'pull_request' && ${{
needs.determine_changes.outputs.linter == 'true' ||
needs.determine_changes.outputs.formatter == 'true'
}}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v4
- uses: actions/download-artifact@v3
name: Download comparison Ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v2
name: Download baseline Ruff binary
with:
name: ruff
branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
check_artifacts: true
- name: Install ruff-ecosystem
@@ -432,36 +281,38 @@ jobs:
run: |
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
name: Upload Results
with:
name: ecosystem-result
path: ecosystem-result
cargo-shear:
name: "cargo shear"
cargo-udeps:
name: "cargo udeps"
runs-on: ubuntu-latest
needs: determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
steps:
- uses: actions/checkout@v4
- uses: cargo-bins/cargo-binstall@main
- run: cargo binstall --no-confirm cargo-shear
- run: cargo shear
- name: "Install nightly Rust toolchain"
# Only pinned to make caching work, update freely
run: rustup toolchain install nightly-2023-10-15
- uses: Swatinem/rust-cache@v2
- name: "Install cargo-udeps"
uses: taiki-e/install-action@cargo-udeps
- name: "Run cargo-udeps"
run: cargo +nightly-2023-10-15 udeps
python-package:
name: "python package"
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
@@ -483,10 +334,9 @@ jobs:
pre-commit:
name: "pre-commit"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Install Rust toolchain"
@@ -495,7 +345,7 @@ jobs:
- name: "Install pre-commit"
run: pip install pre-commit
- name: "Cache pre-commit"
uses: actions/cache@v4
uses: actions/cache@v3
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
@@ -512,30 +362,25 @@ jobs:
docs:
name: "mkdocs"
runs-on: ubuntu-latest
timeout-minutes: 10
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.13"
- uses: actions/setup-python@v4
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- name: Install uv
uses: astral-sh/setup-uv@v3
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: uv pip install -r docs/requirements-insiders.txt --system
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: uv pip install -r docs/requirements.txt --system
run: pip install -r docs/requirements.txt
- name: "Update README File"
run: python scripts/transform_readme.py --target mkdocs
- name: "Generate docs"
@@ -547,14 +392,13 @@ jobs:
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
run: mkdocs build --strict -f mkdocs.generated.yml
check-formatter-instability-and-black-similarity:
name: "formatter instabilities and black similarity"
runs-on: ubuntu-latest
needs: determine_changes
if: needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main'
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -571,13 +415,9 @@ jobs:
check-ruff-lsp:
name: "test ruff-lsp"
runs-on: ubuntu-latest
timeout-minutes: 5
needs:
- cargo-test-linux
- determine_changes
if: ${{ needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main' }}
needs: cargo-test-linux
steps:
- uses: extractions/setup-just@v2
- uses: extractions/setup-just@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -586,11 +426,11 @@ jobs:
with:
repository: "astral-sh/ruff-lsp"
- uses: actions/setup-python@v5
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v4
- uses: actions/download-artifact@v3
name: Download development ruff binary
id: ruff-target
with:
@@ -612,10 +452,7 @@ jobs:
just test
benchmarks:
runs-on: ubuntu-22.04
needs: determine_changes
if: ${{ github.repository == 'astral-sh/ruff' && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20
runs-on: ubuntu-latest
steps:
- name: "Checkout Branch"
uses: actions/checkout@v4
@@ -634,7 +471,7 @@ jobs:
run: cargo codspeed build --features codspeed -p ruff_benchmark
- name: "Run benchmarks"
uses: CodSpeedHQ/action@v3
uses: CodSpeedHQ/action@v1
with:
run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}

View File

@@ -1,72 +0,0 @@
name: Daily parser fuzz
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * *"
pull_request:
paths:
- ".github/workflows/daily_fuzz.yaml"
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
FORCE_COLOR: 1
jobs:
fuzz:
name: Fuzz
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install Python requirements
run: uv pip install -r scripts/fuzz-parser/requirements.txt --system
- name: "Install Rust toolchain"
run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@v1
- uses: Swatinem/rust-cache@v2
- name: Build ruff
# A debug build means the script runs slower once it gets started,
# but this is outweighed by the fact that a release build takes *much* longer to compile in CI
run: cargo build --locked
- name: Fuzz
run: python scripts/fuzz-parser/fuzz.py $(shuf -i 0-9999999999999999999 -n 1000) --test-executable target/debug/ruff
create-issue-on-failure:
name: Create an issue if the daily fuzz surfaced any bugs
runs-on: ubuntu-latest
needs: fuzz
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.fuzz.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Daily parser fuzz failed on ${new Date().toDateString()}`,
body: "Runs listed here: https://github.com/astral-sh/ruff/actions/workflows/daily_fuzz.yml",
labels: ["bug", "parser", "fuzzer"],
})

55
.github/workflows/docs.yaml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
release:
types: [published]
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v4
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.generated.yml
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.3.2
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy site --project-name=astral-docs --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

247
.github/workflows/flake8-to-ruff.yaml vendored Normal file
View File

@@ -0,0 +1,247 @@
name: "[flake8-to-ruff] Release"
on: workflow_dispatch
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
PACKAGE_NAME: flake8-to-ruff
CRATE_NAME: flake8_to_ruff
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
macos-x86_64:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Install Rust toolchain"
run: rustup show
- name: "Build wheels - x86_64"
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --out dist --sdist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- name: "Install built wheel - x86_64"
run: |
pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
macos-universal:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Install Rust toolchain"
run: rustup show
- name: "Build wheels - universal2"
uses: PyO3/maturin-action@v1
with:
args: --release --target universal2-apple-darwin --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- name: "Install built wheel - universal2"
run: |
pip install dist/${{ env.CRATE_NAME }}-*universal2.whl --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
windows:
runs-on: windows-latest
strategy:
matrix:
target: [x64, x86]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.target }}
- name: "Install Rust toolchain"
run: rustup show
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- name: "Install built wheel"
shell: bash
run: |
python -m pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
linux:
runs-on: ubuntu-latest
strategy:
matrix:
target: [x86_64, i686]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- name: "Install built wheel"
if: matrix.target == 'x86_64'
run: |
pip install dist/${{ env.CRATE_NAME }}-*.whl --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
linux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
target: [aarch64, armv7, s390x, ppc64le, ppc64]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --no-default-features --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- uses: uraimo/run-on-arch-action@v2
if: matrix.target != 'ppc64'
name: Install built wheel
with:
arch: ${{ matrix.target }}
distro: ubuntu20.04
githubToken: ${{ github.token }}
install: |
apt-get update
apt-get install -y --no-install-recommends python3 python3-pip
pip3 install -U pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
musllinux:
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- name: "Install built wheel"
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@v3
with:
image: alpine:latest
options: -v ${{ github.workspace }}:/io -w /io
run: |
apk add py3-pip
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links /io/dist/ --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
musllinux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-musl
arch: aarch64
- target: armv7-unknown-linux-musleabihf
arch: armv7
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --out dist -m ./${{ env.CRATE_NAME }}/Cargo.toml
- uses: uraimo/run-on-arch-action@v2
name: Install built wheel
with:
arch: ${{ matrix.platform.arch }}
distro: alpine_latest
githubToken: ${{ github.token }}
install: |
apk add py3-pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
release:
name: Release
runs-on: ubuntu-latest
needs:
- macos-universal
- macos-x86_64
- windows
- linux
- linux-cross
- musllinux
- musllinux-cross
steps:
- uses: actions/download-artifact@v3
with:
name: wheels
- uses: actions/setup-python@v4
- name: "Publish to PyPi"
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.FLAKE8_TO_RUFF_TOKEN }}
run: |
pip install --upgrade twine
twine upload --skip-existing *

View File

@@ -1,76 +0,0 @@
name: "Test macOS 12 (x86_64)"
on:
pull_request
env:
PACKAGE_NAME: ruff
MODULE_NAME: ruff
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
macos-x86_64:
runs-on: macos-14
# env:
# MACOSX_DEPLOYMENT_TARGET: 13.7
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - x86_64"
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --locked --out dist
- name: "Upload wheels"
uses: actions/upload-artifact@v4
with:
name: wheels-macos-x86_64
path: dist
- name: "Archive binary"
run: |
TARGET=x86_64-apple-darwin
ARCHIVE_NAME=ruff-$TARGET
ARCHIVE_FILE=$ARCHIVE_NAME.tar.gz
mkdir -p $ARCHIVE_NAME
cp target/$TARGET/release/ruff $ARCHIVE_NAME/ruff
tar czvf $ARCHIVE_FILE $ARCHIVE_NAME
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v4
with:
name: artifacts-macos-x86_64
path: |
*.tar.gz
*.sha256
test:
needs: macos-x86_64
runs-on: macos-12
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Download binaries"
uses: actions/download-artifact@v4
with:
name: artifacts-macos-x86_64
- name: "Extract"
run: tar xzvf ruff-x86_64-apple-darwin.tar.gz
- name: "Run Ruff"
run: ./ruff-x86_64-apple-darwin/ruff --help

View File

@@ -1,29 +0,0 @@
# Notify downstream repositories of a new release.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[ruff] Notify dependents"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
update-dependents:
name: Notify dependents
runs-on: ubuntu-latest
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v7
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

48
.github/workflows/playground.yaml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: "[Playground] Release"
on:
workflow_dispatch:
release:
types: [published]
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
publish:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@v4
with:
node-version: 18
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack"
run: wasm-pack build --target web --out-dir ../../playground/src/pkg crates/ruff_wasm
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Build JavaScript bundle"
run: npm run build
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.3.2
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy playground/dist --project-name=ruff-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -17,13 +17,12 @@ jobs:
comment:
runs-on: ubuntu-latest
steps:
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v2
name: Download pull request number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
@@ -33,7 +32,7 @@ jobs:
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@v6
- uses: dawidd6/action-download-artifact@v2
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
@@ -44,20 +43,11 @@ jobs:
path: pr/ecosystem
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Guard against malicious ecosystem results that symlink to a secret
# file on this runner
if [[ -L pr/ecosystem/ecosystem-result ]]
then
echo "Error: ecosystem-result cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt
@@ -71,7 +61,7 @@ jobs:
echo 'EOF' >> $GITHUB_OUTPUT
- name: Find existing comment
uses: peter-evans/find-comment@v3
uses: peter-evans/find-comment@v2
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
@@ -81,7 +71,7 @@ jobs:
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@v4
uses: peter-evans/create-or-update-comment@v3
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}

View File

@@ -1,152 +0,0 @@
# Publish the Ruff documentation.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: mkdocs
on:
workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
mkdocs:
runs-on: ubuntu-latest
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v5
with:
python-version: 3.12
- name: "Set docs version"
run: |
version="${{ (inputs.plan != '' && fromJson(inputs.plan).announcement_tag) || inputs.ref }}"
# if version is missing, use 'latest'
if [ -z "$version" ]; then
echo "Using 'latest' as version"
version="latest"
fi
# Use version as display name for now
display_name="$version"
echo "version=$version" >> $GITHUB_ENV
echo "display_name=$display_name" >> $GITHUB_ENV
- name: "Set branch name"
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
timestamp="$(date +%s)"
# create branch_display_name from display_name by replacing all
# characters disallowed in git branch names with hyphens
branch_display_name="$(echo "$display_name" | tr -c '[:alnum:]._' '-' | tr -s '-')"
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> $GITHUB_ENV
echo "timestamp=$timestamp" >> $GITHUB_ENV
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo"
run: |
version="${{ env.version }}"
git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs
- name: "Copy docs"
run: rm -rf astral-docs/site/ruff && mkdir -p astral-docs/site && cp -r site/ruff astral-docs/site/
- name: "Commit docs"
working-directory: astral-docs
run: |
branch_name="${{ env.branch_name }}"
git config user.name "astral-docs-bot"
git config user.email "176161322+astral-docs-bot@users.noreply.github.com"
git checkout -b $branch_name
git add site/ruff
git commit -m "Update ruff documentation for $version"
- name: "Create Pull Request"
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
version="${{ env.version }}"
display_name="${{ env.display_name }}"
branch_name="${{ env.branch_name }}"
# set the PR title
pull_request_title="Update ruff documentation for $display_name"
# Delete any existing pull requests that are open for this version
# by checking against pull_request_title because the new PR will
# supersede the old one.
gh pr list --state open --json title --jq '.[] | select(.title == "$pull_request_title") | .number' | \
xargs -I {} gh pr close {}
# push the branch to GitHub
git push origin $branch_name
# create the PR
gh pr create --base main --head $branch_name \
--title "$pull_request_title" \
--body "Automated documentation update for $display_name" \
--label "documentation"
- name: "Merge Pull Request"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
working-directory: astral-docs
env:
GITHUB_TOKEN: ${{ secrets.ASTRAL_DOCS_PAT }}
run: |
branch_name="${{ env.branch_name }}"
# auto-merge the PR if the build was triggered by a release. Manual builds should be reviewed by a human.
# give the PR a few seconds to be created before trying to auto-merge it
sleep 10
gh pr merge --squash $branch_name

View File

@@ -1,55 +0,0 @@
# Publish the Ruff playground.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a post-announce
# job within `cargo-dist`.
name: "[Playground] Release"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
publish:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@v4
with:
node-version: 18
cache: "npm"
cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack"
run: wasm-pack build --target web --out-dir ../../playground/src/pkg crates/ruff_wasm
- name: "Install Node dependencies"
run: npm ci
working-directory: playground
- name: "Run TypeScript checks"
run: npm run check
working-directory: playground
- name: "Build JavaScript bundle"
run: npm run build
working-directory: playground
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.7.0
with:
apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }}
# `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy playground/dist --project-name=ruff-playground --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@@ -1,34 +0,0 @@
# Publish a release to PyPI.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish job
# within `cargo-dist`.
name: "[ruff] Publish to PyPI"
on:
workflow_call:
inputs:
plan:
required: true
type: string
jobs:
pypi-publish:
name: Upload to PyPI
runs-on: ubuntu-latest
environment:
name: release
permissions:
# For PyPI's trusted publishing.
id-token: write
steps:
- uses: actions/download-artifact@v4
with:
pattern: wheels-*
path: wheels
merge-multiple: true
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true

View File

@@ -1,55 +0,0 @@
# Build and publish ruff-api for wasm.
#
# Assumed to run as a subworkflow of .github/workflows/release.yml; specifically, as a publish
# job within `cargo-dist`.
name: "Build and publish wasm"
on:
workflow_dispatch:
workflow_call:
inputs:
plan:
required: true
type: string
env:
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
ruff_wasm:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
strategy:
matrix:
target: [web, bundler, nodejs]
fail-fast: false
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown
- uses: jetli/wasm-pack-action@v0.4.0
- uses: jetli/wasm-bindgen-action@v0.2.0
- name: "Run wasm-pack build"
run: wasm-pack build --target ${{ matrix.target }} crates/ruff_wasm
- name: "Rename generated package"
run: | # Replace the package name w/ jq
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@v4
with:
node-version: 18
registry-url: "https://registry.npmjs.org"
- name: "Publish (dry-run)"
if: ${{ inputs.plan == '' || fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --dry-run crates/ruff_wasm/pkg
- name: "Publish"
if: ${{ inputs.plan != '' && !fromJson(inputs.plan).announcement_tag_is_implicit }}
run: npm publish --provenance --access public crates/ruff_wasm/pkg
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

536
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,536 @@
name: "[ruff] Release"
on:
workflow_dispatch:
inputs:
tag:
description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)."
type: string
sha:
description: "The full sha of the commit to be released. If omitted, the latest commit on the default branch will be used."
default: ""
type: string
pull_request:
paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work
- pyproject.toml
# And when we change this workflow itself...
- .github/workflows/release.yaml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
jobs:
sdist:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build sdist"
uses: PyO3/maturin-action@v1
with:
command: sdist
args: --out dist
- name: "Test sdist"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload sdist"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
macos-x86_64:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - x86_64"
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --out dist
- name: "Test wheel - x86_64"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-x86_64-apple-darwin.tar.gz
tar czvf $ARCHIVE_FILE -C target/x86_64-apple-darwin/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
macos-universal:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels - universal2"
uses: PyO3/maturin-action@v1
with:
args: --release --target universal2-apple-darwin --out dist
- name: "Test wheel - universal2"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-aarch64-apple-darwin.tar.gz
tar czvf $ARCHIVE_FILE -C target/aarch64-apple-darwin/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
windows:
runs-on: windows-latest
strategy:
matrix:
platform:
- target: x86_64-pc-windows-msvc
arch: x64
- target: i686-pc-windows-msvc
arch: x86
- target: aarch64-pc-windows-msvc
arch: x64
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.platform.arch }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
args: --release --out dist
- name: "Test wheel"
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
run: |
python -m pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
shell: bash
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.zip
7z a $ARCHIVE_FILE ./target/${{ matrix.platform.target }}/release/ruff.exe
sha256sum $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.zip
*.sha256
linux:
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-gnu
- i686-unknown-linux-gnu
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist
- name: "Test wheel"
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-${{ matrix.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
linux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-gnu
arch: aarch64
# see https://github.com/astral-sh/ruff/issues/3791
# and https://github.com/gnzlbg/jemallocator/issues/170#issuecomment-1503228963
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: armv7-unknown-linux-gnueabihf
arch: armv7
- target: s390x-unknown-linux-gnu
arch: s390x
- target: powerpc64le-unknown-linux-gnu
arch: ppc64le
- target: powerpc64-unknown-linux-gnu
arch: ppc64
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: auto
docker-options: ${{ matrix.platform.maturin_docker_options }}
args: --release --out dist
- uses: uraimo/run-on-arch-action@v2
if: matrix.platform.arch != 'ppc64'
name: Test wheel
with:
arch: ${{ matrix.platform.arch }}
distro: ubuntu20.04
githubToken: ${{ github.token }}
install: |
apt-get update
apt-get install -y --no-install-recommends python3 python3-pip
pip3 install -U pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.platform.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
musllinux:
runs-on: ubuntu-latest
strategy:
matrix:
target:
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --out dist
- name: "Test wheel"
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@v3
with:
image: alpine:latest
options: -v ${{ github.workspace }}:/io -w /io
run: |
apk add py3-pip
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links /io/dist/ --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-${{ matrix.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
musllinux-cross:
runs-on: ubuntu-latest
strategy:
matrix:
platform:
- target: aarch64-unknown-linux-musl
arch: aarch64
maturin_docker_options: -e JEMALLOC_SYS_WITH_LG_PAGE=16
- target: armv7-unknown-linux-musleabihf
arch: armv7
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --out dist
docker-options: ${{ matrix.platform.maturin_docker_options }}
- uses: uraimo/run-on-arch-action@v2
name: Test wheel
with:
arch: ${{ matrix.platform.arch }}
distro: alpine_latest
githubToken: ${{ github.token }}
install: |
apk add py3-pip
run: |
pip3 install ${{ env.PACKAGE_NAME }} --no-index --find-links dist/ --force-reinstall
ruff check --help
- name: "Upload wheels"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
- name: "Archive binary"
run: |
ARCHIVE_FILE=ruff-${{ matrix.platform.target }}.tar.gz
tar czvf $ARCHIVE_FILE -C target/${{ matrix.platform.target }}/release ruff
shasum -a 256 $ARCHIVE_FILE > $ARCHIVE_FILE.sha256
- name: "Upload binary"
uses: actions/upload-artifact@v3
with:
name: binaries
path: |
*.tar.gz
*.sha256
validate-tag:
name: Validate tag
runs-on: ubuntu-latest
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
steps:
- uses: actions/checkout@v4
with:
ref: main # We checkout the main branch to check for the commit
- name: Check main branch
if: ${{ inputs.sha }}
run: |
# Fetch the main branch since a shallow checkout is used by default
git fetch origin main --unshallow
if ! git branch --contains ${{ inputs.sha }} | grep -E '(^|\s)main$'; then
echo "The specified sha is not on the main branch" >&2
exit 1
fi
- name: Check tag consistency
run: |
# Switch to the commit we want to release
git checkout ${{ inputs.sha }}
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ inputs.tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ inputs.tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
upload-release:
name: Upload to PyPI
runs-on: ubuntu-latest
needs:
- macos-universal
- macos-x86_64
- windows
- linux
- linux-cross
- musllinux
- musllinux-cross
- validate-tag
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
environment:
name: release
permissions:
# For pypi trusted publishing
id-token: write
steps:
- uses: actions/download-artifact@v3
with:
name: wheels
path: wheels
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true
tag-release:
name: Tag release
runs-on: ubuntu-latest
needs: upload-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For git tag
contents: write
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- name: git tag
run: |
git config user.email "hey@astral.sh"
git config user.name "Ruff Release CI"
git tag -m "v${{ inputs.tag }}" "v${{ inputs.tag }}"
# If there is duplicate tag, this will fail. The publish to pypi action will have been a noop (due to skip
# existing), so we make a non-destructive exit here
git push --tags
publish-release:
name: Publish to GitHub
runs-on: ubuntu-latest
needs: tag-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For GitHub release publishing
contents: write
steps:
- uses: actions/download-artifact@v3
with:
name: binaries
path: binaries
- name: "Publish to GitHub"
uses: softprops/action-gh-release@v1
with:
draft: true
files: binaries/*
tag_name: v${{ inputs.tag }}
# After the release has been published, we update downstream repositories
# This is separate because if this fails the release is still fine, we just need to do some manual workflow triggers
update-dependents:
name: Update dependents
runs-on: ubuntu-latest
needs: publish-release
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v6
with:
github-token: ${{ secrets.RUFF_PRE_COMMIT_PAT }}
script: |
github.rest.actions.createWorkflowDispatch({
owner: 'astral-sh',
repo: 'ruff-pre-commit',
workflow_id: 'main.yml',
ref: 'main',
})

View File

@@ -1,300 +0,0 @@
# This file was autogenerated by cargo-dist: https://opensource.axo.dev/cargo-dist/
#
# Copyright 2022-2024, axodotdev
# SPDX-License-Identifier: MIT or Apache-2.0
#
# CI that:
#
# * checks for a Git Tag that looks like a release
# * builds artifacts with cargo-dist (archives, installers, hashes)
# * uploads those artifacts to temporary workflow zip
# * on success, uploads the artifacts to a GitHub Release
#
# Note that the GitHub Release will be created with a generated
# title/body based on your changelogs.
name: Release
permissions:
"contents": "write"
# This task will run whenever you workflow_dispatch with a tag that looks like a version
# like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc.
# Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where
# PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION
# must be a Cargo-style SemVer Version (must have at least major.minor.patch).
#
# If PACKAGE_NAME is specified, then the announcement will be for that
# package (erroring out if it doesn't have the given version or isn't cargo-dist-able).
#
# If PACKAGE_NAME isn't specified, then the announcement will be for all
# (cargo-dist-able) packages in the workspace with that version (this mode is
# intended for workspaces with only one dist-able package, or with all dist-able
# packages versioned/released in lockstep).
#
# If you push multiple tags at once, separate instances of this workflow will
# spin up, creating an independent announcement for each one. However, GitHub
# will hard limit this to 3 tags per commit, as it will assume more tags is a
# mistake.
#
# If there's a prerelease-style suffix to the version, then the release(s)
# will be marked as a prerelease.
on:
workflow_dispatch:
inputs:
tag:
description: Release Tag
required: true
default: dry-run
type: string
jobs:
# Run 'cargo dist plan' (or host) to determine what tasks we need to do
plan:
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.plan.outputs.manifest }}
tag: ${{ (inputs.tag != 'dry-run' && inputs.tag) || '' }}
tag-flag: ${{ inputs.tag && inputs.tag != 'dry-run' && format('--tag={0}', inputs.tag) || '' }}
publishing: ${{ inputs.tag && inputs.tag != 'dry-run' }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cargo-dist
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.22.1/cargo-dist-installer.sh | sh"
- name: Cache cargo-dist
uses: actions/upload-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/cargo-dist
# sure would be cool if github gave us proper conditionals...
# so here's a doubly-nested ternary-via-truthiness to try to provide the best possible
# functionality based on whether this is a pull_request, and whether it's from a fork.
# (PRs run on the *source* but secrets are usually on the *target* -- that's *good*
# but also really annoying to build CI around when it needs secrets to work right.)
- id: plan
run: |
cargo dist ${{ (inputs.tag && inputs.tag != 'dry-run' && format('host --steps=create --tag={0}', inputs.tag)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "cargo dist ran successfully"
cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json
custom-build-binaries:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-binaries.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-build-docker:
needs:
- plan
if: ${{ needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload' || inputs.tag == 'dry-run' }}
uses: ./.github/workflows/build-docker.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
permissions:
"contents": "read"
"packages": "write"
# Build and package all the platform-agnostic(ish) things
build-global-artifacts:
needs:
- plan
- custom-build-binaries
- custom-build-docker
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/cargo-dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- id: cargo-dist
shell: bash
run: |
cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "cargo dist ran successfully"
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@v4
with:
name: artifacts-build-global
path: |
${{ steps.cargo-dist.outputs.paths }}
${{ env.BUILD_MANIFEST_NAME }}
# Determines if we should publish/announce
host:
needs:
- plan
- custom-build-binaries
- custom-build-docker
- build-global-artifacts
# Only run if we're "publishing", and only if local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "ubuntu-20.04"
outputs:
val: ${{ steps.host.outputs.manifest }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Install cached cargo-dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/cargo-dist
# Fetch artifacts from scratch-storage
- name: Fetch artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
# This is a harmless no-op for GitHub Releases, hosting for that happens in "announce"
- id: host
shell: bash
run: |
cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
echo "artifacts uploaded and released successfully"
cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
# Overwrite the previous copy
name: artifacts-dist-manifest
path: dist-manifest.json
custom-publish-pypi:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-pypi.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"id-token": "write"
"packages": "write"
custom-publish-wasm:
needs:
- plan
- host
if: ${{ !fromJson(needs.plan.outputs.val).announcement_is_prerelease || fromJson(needs.plan.outputs.val).publish_prereleases }}
uses: ./.github/workflows/publish-wasm.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
# publish jobs get escalated permissions
permissions:
"contents": "read"
"id-token": "write"
"packages": "write"
# Create a GitHub Release while uploading all files to it
announce:
needs:
- plan
- host
- custom-publish-pypi
- custom-publish-wasm
# use "always() && ..." to allow us to wait for all publish jobs while
# still allowing individual publish jobs to skip themselves (for prereleases).
# "host" however must run to completion, no skipping allowed!
if: ${{ always() && needs.host.result == 'success' && (needs.custom-publish-pypi.result == 'skipped' || needs.custom-publish-pypi.result == 'success') && (needs.custom-publish-wasm.result == 'skipped' || needs.custom-publish-wasm.result == 'success') }}
runs-on: "ubuntu-20.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
# Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts"
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: artifacts
merge-multiple: true
- name: Cleanup
run: |
# Remove the granular manifests
rm -f artifacts/*-dist-manifest.json
- name: Create GitHub Release
env:
PRERELEASE_FLAG: "${{ fromJson(needs.host.outputs.val).announcement_is_prerelease && '--prerelease' || '' }}"
ANNOUNCEMENT_TITLE: "${{ fromJson(needs.host.outputs.val).announcement_title }}"
ANNOUNCEMENT_BODY: "${{ fromJson(needs.host.outputs.val).announcement_github_body }}"
RELEASE_COMMIT: "${{ github.sha }}"
run: |
# Write and read notes from a file to avoid quoting breaking things
echo "$ANNOUNCEMENT_BODY" > $RUNNER_TEMP/notes.txt
gh release create "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --title "$ANNOUNCEMENT_TITLE" --notes-file "$RUNNER_TEMP/notes.txt" artifacts/*
custom-notify-dependents:
needs:
- plan
- announce
uses: ./.github/workflows/notify-dependents.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-docs:
needs:
- plan
- announce
uses: ./.github/workflows/publish-docs.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit
custom-publish-playground:
needs:
- plan
- announce
uses: ./.github/workflows/publish-playground.yml
with:
plan: ${{ needs.plan.outputs.val }}
secrets: inherit

View File

@@ -1,80 +0,0 @@
name: Sync typeshed
on:
workflow_dispatch:
schedule:
# Run on the 1st and the 15th of every month:
- cron: "0 0 1,15 * *"
env:
FORCE_COLOR: 1
GH_TOKEN: ${{ github.token }}
jobs:
sync:
name: Sync typeshed
runs-on: ubuntu-latest
timeout-minutes: 20
# Don't run the cron job on forks:
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@v4
name: Checkout Ruff
with:
path: ruff
- uses: actions/checkout@v4
name: Checkout typeshed
with:
repository: python/typeshed
path: typeshed
- name: Setup git
run: |
git config --global user.name typeshedbot
git config --global user.email '<>'
- name: Sync typeshed
id: sync
run: |
rm -rf ruff/crates/red_knot_vendored/vendor/typeshed
mkdir ruff/crates/red_knot_vendored/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_vendored/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_vendored/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_vendored/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_vendored/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_vendored/vendor/typeshed/source_commit.txt
- name: Commit the changes
id: commit
if: ${{ steps.sync.outcome == 'success' }}
run: |
cd ruff
git checkout -b typeshedbot/sync-typeshed
git add .
git diff --staged --quiet || git commit -m "Sync typeshed. Source commit: https://github.com/python/typeshed/commit/$(git -C ../typeshed rev-parse HEAD)"
- name: Create a PR
if: ${{ steps.sync.outcome == 'success' && steps.commit.outcome == 'success' }}
run: |
cd ruff
git push --force origin typeshedbot/sync-typeshed
gh pr list --repo $GITHUB_REPOSITORY --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
create-issue-on-failure:
name: Create an issue if the typeshed sync failed
runs-on: ubuntu-latest
needs: [sync]
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.sync.result == 'failure' }}
permissions:
issues: write
steps:
- uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
await github.rest.issues.create({
owner: "astral-sh",
repo: "ruff",
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
body: "Runs are listed here: https://github.com/astral-sh/ruff/actions/workflows/sync_typeshed.yaml",
})

9
.gitignore vendored
View File

@@ -21,14 +21,6 @@ flamegraph.svg
# `CARGO_TARGET_DIR=target-llvm-lines RUSTFLAGS="-Csymbol-mangling-version=v0" cargo llvm-lines -p ruff --lib`
/target*
# samply profiles
profile.json
# tracing-flame traces
tracing.folded
tracing-flamechart.svg
tracing-flamegraph.svg
###
# Rust.gitignore
###
@@ -100,7 +92,6 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
repos/
# Translations
*.mo

View File

@@ -14,10 +14,7 @@ MD041: false
# MD013/line-length
MD013: false
# MD014/commands-show-output
MD014: false
# MD024/no-duplicate-heading
MD024:
# Allow when nested under different parents e.g. CHANGELOG.md
siblings_only: true
allow_different_nesting: true

View File

@@ -2,13 +2,9 @@ fail_fast: true
exclude: |
(?x)^(
crates/red_knot_vendored/vendor/.*|
crates/red_knot_workspace/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff_notebook/resources/.*|
crates/ruff_server/resources/.*|
crates/ruff/resources/.*|
crates/ruff_cli/resources/.*|
crates/ruff_python_formatter/resources/.*|
crates/ruff_python_formatter/tests/snapshots/.*|
crates/ruff_python_resolver/resources/.*|
@@ -17,7 +13,7 @@ exclude: |
repos:
- repo: https://github.com/abravalheri/validate-pyproject
rev: v0.20.2
rev: v0.15
hooks:
- id: validate-pyproject
@@ -28,7 +24,6 @@ repos:
additional_dependencies:
- mdformat-mkdocs
- mdformat-admon
- mdformat-footnote
exclude: |
(?x)^(
docs/formatter/black\.md
@@ -36,7 +31,7 @@ repos:
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.42.0
rev: v0.37.0
hooks:
- id: markdownlint-fix
exclude: |
@@ -46,7 +41,7 @@ repos:
)$
- repo: https://github.com/crate-ci/typos
rev: v1.25.0
rev: v1.16.22
hooks:
- id: typos
@@ -60,17 +55,22 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.9
rev: v0.1.4
hooks:
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
types_or: [python, pyi]
require_serial: true
exclude: |
(?x)^(
crates/ruff_linter/resources/.*|
crates/ruff_python_formatter/resources/.*
)$
# Prettier
- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.3.3
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.3
hooks:
- id: prettier
types: [yaml]

View File

@@ -1,2 +0,0 @@
# Auto-generated by `cargo-dist`.
.github/workflows/release.yml

View File

@@ -1,5 +0,0 @@
{
"recommendations": [
"rust-lang.rust-analyzer"
]
}

View File

@@ -1,6 +0,0 @@
{
"rust-analyzer.check.extraArgs": [
"--all-features"
],
"rust-analyzer.check.command": "clippy",
}

View File

@@ -1,138 +1,5 @@
# Breaking Changes
## 0.6.0
- Detect imports in `src` layouts by default for `isort` rules ([#12848](https://github.com/astral-sh/ruff/pull/12848))
- The pytest rules `PT001` and `PT023` now default to omitting the decorator parentheses when there are no arguments ([#12838](https://github.com/astral-sh/ruff/pull/12838)).
- Lint and format Jupyter Notebook by default ([#12878](https://github.com/astral-sh/ruff/pull/12878)).
You can disable specific rules for notebooks using [`per-file-ignores`](https://docs.astral.sh/ruff/settings/#lint_per-file-ignores):
```toml
[tool.ruff.lint.per-file-ignores]
"*.ipynb" = ["E501"] # disable line-too-long in notebooks
```
If you'd prefer to either only lint or only format Jupyter Notebook files, you can use the
section-specific `exclude` option to do so. For example, the following would only lint Jupyter
Notebook files and not format them:
```toml
[tool.ruff.format]
exclude = ["*.ipynb"]
```
And, conversely, the following would only format Jupyter Notebook files and not lint them:
```toml
[tool.ruff.lint]
exclude = ["*.ipynb"]
```
You can completely disable Jupyter Notebook support by updating the [`extend-exclude`](https://docs.astral.sh/ruff/settings/#extend-exclude) setting:
```toml
[tool.ruff]
extend-exclude = ["*.ipynb"]
```
## 0.5.0
- Follow the XDG specification to discover user-level configurations on macOS (same as on other Unix platforms)
- Selecting `ALL` now excludes deprecated rules
- The released archives now include an extra level of nesting, which can be removed with `--strip-components=1` when untarring.
- The release artifact's file name no longer includes the version tag. This enables users to install via `/latest` URLs on GitHub.
## 0.3.0
### Ruff 2024.2 style
The formatter now formats code according to the Ruff 2024.2 style guide. Read the [changelog](./CHANGELOG.md#030) for a detailed list of stabilized style changes.
### `isort`: Use one blank line after imports in typing stub files ([#9971](https://github.com/astral-sh/ruff/pull/9971))
Previously, Ruff used one or two blank lines (or the number configured by `isort.lines-after-imports`) after imports in typing stub files (`.pyi` files).
The [typing style guide for stubs](https://typing.readthedocs.io/en/latest/source/stubs.html#style-guide) recommends using at most 1 blank line for grouping.
As of this release, `isort` now always uses one blank line after imports in stub files, the same as the formatter.
### `build` is no longer excluded by default ([#10093](https://github.com/astral-sh/ruff/pull/10093))
Ruff maintains a list of directories and files that are excluded by default. This list now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git`
- `.git-rewrite`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `dist`
- `node_modules`
- `site-packages`
- `venv`
Previously, the `build` directory was included in this list. However, the `build` directory tends to be a not-unpopular directory
name, and excluding it by default caused confusion. Ruff now no longer excludes `build` except if it is excluded by a `.gitignore` file
or because it is listed in `extend-exclude`.
### `--format` is no longer a valid `rule` or `linter` command option
Previously, `ruff rule` and `ruff linter` accepted the `--format <FORMAT>` option as an alias for `--output-format`. Ruff no longer
supports this alias. Please use `ruff rule --output-format <FORMAT>` and `ruff linter --output-format <FORMAT>` instead.
## 0.1.9
### `site-packages` is now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))
Ruff maintains a list of default exclusions, which now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git-rewrite`
- `.git`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `build`
- `dist`
- `node_modules`
- `site-packages`
- `venv`
Previously, the `site-packages` directory was not excluded by default. While `site-packages` tends
to be excluded anyway by virtue of the `.venv` exclusion, this may not be the case when using Ruff
from VS Code outside a virtual environment.
## 0.1.0
### The deprecated `format` setting has been removed

File diff suppressed because it is too large Load Diff

View File

@@ -2,20 +2,54 @@
Welcome! We're happy to have you here. Thank you in advance for your contribution to Ruff.
- [The Basics](#the-basics)
- [Prerequisites](#prerequisites)
- [Development](#development)
- [Project Structure](#project-structure)
- [Example: Adding a new lint rule](#example-adding-a-new-lint-rule)
- [Rule naming convention](#rule-naming-convention)
- [Rule testing: fixtures and snapshots](#rule-testing-fixtures-and-snapshots)
- [Example: Adding a new configuration option](#example-adding-a-new-configuration-option)
- [MkDocs](#mkdocs)
- [Release Process](#release-process)
- [Creating a new release](#creating-a-new-release)
- [Ecosystem CI](#ecosystem-ci)
- [Benchmarking and Profiling](#benchmarking-and-profiling)
- [CPython Benchmark](#cpython-benchmark)
- [Microbenchmarks](#microbenchmarks)
- [Benchmark-driven Development](#benchmark-driven-development)
- [PR Summary](#pr-summary)
- [Tips](#tips)
- [Profiling Projects](#profiling-projects)
- [Linux](#linux)
- [Mac](#mac)
- [`cargo dev`](#cargo-dev)
- [Subsystems](#subsystems)
- [Compilation Pipeline](#compilation-pipeline)
## The Basics
Ruff welcomes contributions in the form of pull requests.
Ruff welcomes contributions in the form of Pull Requests.
For small changes (e.g., bug fixes), feel free to submit a PR.
For larger changes (e.g., new lint rules, new functionality, new configuration options), consider
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed change.
You can also join us on [Discord](https://discord.com/invite/astral-sh) to discuss your idea with the
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5) to discuss your idea with the
community. We've labeled [beginner-friendly tasks](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
in the issue tracker, along with [bugs](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
and [improvements](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted)
that are ready for contributions.
If you're looking for a place to start, we recommend implementing a new lint rule (see:
[_Adding a new lint rule_](#example-adding-a-new-lint-rule), which will allow you to learn from and
pattern-match against the examples in the existing codebase. Many lint rules are inspired by
existing Python plugins, which can be used as a reference implementation.
As a concrete example: consider taking on one of the rules from the [`flake8-pyi`](https://github.com/astral-sh/ruff/issues/848)
plugin, and looking to the originating [Python source](https://github.com/PyCQA/flake8-pyi) for
guidance.
If you have suggestions on how we might improve the contributing documentation, [let us know](https://github.com/astral-sh/ruff/discussions/5693)!
### Prerequisites
@@ -29,33 +63,25 @@ You'll also need [Insta](https://insta.rs/docs/) to update snapshot tests:
cargo install cargo-insta
```
You'll need [uv](https://docs.astral.sh/uv/getting-started/installation/) (or `pipx` and `pip`) to
run Python utility commands.
and pre-commit to run some validation checks:
```shell
pipx install pre-commit # or `pip install pre-commit` if you have a virtualenv
```
You can optionally install pre-commit hooks to automatically run the validation checks
when making a commit:
```shell
uv tool install pre-commit
pre-commit install
```
We recommend [nextest](https://nexte.st/) to run Ruff's test suite (via `cargo nextest run`),
though it's not strictly necessary:
```shell
cargo install cargo-nextest --locked
```
Throughout this guide, any usages of `cargo test` can be replaced with `cargo nextest run`,
if you choose to install `nextest`.
### Development
After cloning the repository, run Ruff locally from the repository root with:
```shell
cargo run -p ruff -- check /path/to/file.py --no-cache
cargo run -p ruff_cli -- check /path/to/file.py --no-cache
```
Prior to opening a pull request, ensure that your code has been auto-formatted,
@@ -64,14 +90,12 @@ and that it passes both the lint and test validation checks:
```shell
cargo clippy --workspace --all-targets --all-features -- -D warnings # Rust linting
RUFF_UPDATE_SCHEMA=1 cargo test # Rust testing and updating ruff.schema.json
uvx pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
```
These checks will run on GitHub Actions when you open your pull request, but running them locally
These checks will run on GitHub Actions when you open your Pull Request, but running them locally
will save you time and expedite the merge process.
If you're using VS Code, you can also install the recommended [rust-analyzer](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer) extension to get these checks while editing.
Note that many code changes also require updating the snapshot tests, which is done interactively
after running `cargo test` like so:
@@ -79,14 +103,7 @@ after running `cargo test` like so:
cargo insta review
```
If your pull request relates to a specific lint rule, include the category and rule code in the
title, as in the following examples:
- \[`flake8-bugbear`\] Avoid false positive for usage after `continue` (`B031`)
- \[`flake8-simplify`\] Detect implicit `else` cases in `needless-bool` (`SIM103`)
- \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
Your pull request will be reviewed by a maintainer, which may involve a few rounds of iteration
Your Pull Request will be reviewed by a maintainer, which may involve a few rounds of iteration
prior to merging.
### Project Structure
@@ -94,8 +111,8 @@ prior to merging.
Ruff is structured as a monorepo with a [flat crate structure](https://matklad.github.io/2021/08/22/large-rust-workspaces.html),
such that all crates are contained in a flat `crates` directory.
The vast majority of the code, including all lint rules, lives in the `ruff_linter` crate (located
at `crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
The vast majority of the code, including all lint rules, lives in the `ruff` crate (located at
`crates/ruff_linter`). As a contributor, that's the crate that'll be most relevant to you.
At the time of writing, the repository includes the following crates:
@@ -103,7 +120,7 @@ At the time of writing, the repository includes the following crates:
If you're working on a rule, this is the crate for you.
- `crates/ruff_benchmark`: binary crate for running micro-benchmarks.
- `crates/ruff_cache`: library crate for caching lint results.
- `crates/ruff`: binary crate containing Ruff's command-line interface.
- `crates/ruff_cli`: binary crate containing Ruff's command-line interface.
- `crates/ruff_dev`: binary crate containing utilities used in the development of Ruff itself (e.g.,
`cargo dev generate-all`), see the [`cargo dev`](#cargo-dev) section below.
- `crates/ruff_diagnostics`: library crate for the rule-independent abstractions in the lint
@@ -168,14 +185,11 @@ and calling out to lint rule analyzer functions as it goes.
If you need to inspect the AST, you can run `cargo dev print-ast` with a Python file. Grep
for the `Diagnostic::new` invocations to understand how other, similar rules are implemented.
Once you're satisfied with your code, add tests for your rule
(see: [rule testing](#rule-testing-fixtures-and-snapshots)), and regenerate the documentation and
associated assets (like our JSON Schema) with `cargo dev generate-all`.
Once you're satisfied with your code, add tests for your rule. See [rule testing](#rule-testing-fixtures-and-snapshots)
for more details.
Finally, submit a pull request, and include the category, rule name, and rule code in the title, as
in:
> \[`pycodestyle`\] Implement `redundant-backslash` (`E502`)
Finally, regenerate the documentation and other generated assets (like our JSON Schema) with:
`cargo dev generate-all`.
#### Rule naming convention
@@ -217,7 +231,7 @@ Once you've completed the code for the rule itself, you can define tests with th
For example, if you're adding a new rule named `E402`, you would run:
```shell
cargo run -p ruff -- check crates/ruff_linter/resources/test/fixtures/pycodestyle/E402.py --no-cache --preview --select E402
cargo run -p ruff_cli -- check crates/ruff_linter/resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402
```
**Note:** Only a subset of rules are enabled by default. When testing a new rule, ensure that
@@ -238,7 +252,7 @@ Once you've completed the code for the rule itself, you can define tests with th
Ruff's user-facing settings live in a few different places.
First, the command-line options are defined via the `Args` struct in `crates/ruff/src/args.rs`.
First, the command-line options are defined via the `Args` struct in `crates/ruff_cli/src/args.rs`.
Second, the `pyproject.toml` options are defined in `crates/ruff_workspace/src/options.rs` (via the
`Options` struct), `crates/ruff_workspace/src/configuration.rs` (via the `Configuration` struct),
@@ -249,7 +263,7 @@ These represent, respectively: the schema used to parse the `pyproject.toml` fil
intermediate representation; and the final, internal representation used to power Ruff.
To add a new configuration option, you'll likely want to modify these latter few files (along with
`args.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`arg.rs`, if appropriate). If you want to pattern-match against an existing example, grep for
`dummy_variable_rgx`, which defines a regular expression to match against acceptable unused
variables (e.g., `_`).
@@ -265,24 +279,30 @@ To preview any changes to the documentation locally:
1. Install the [Rust toolchain](https://www.rust-lang.org/tools/install).
1. Install MkDocs and Material for MkDocs with:
```shell
pip install -r docs/requirements.txt
```
1. Generate the MkDocs site with:
```shell
uv run --no-project --isolated --with-requirements docs/requirements.txt scripts/generate_mkdocs.py
python scripts/generate_mkdocs.py
```
1. Run the development server with:
```shell
# For contributors.
uvx --with-requirements docs/requirements.txt -- mkdocs serve -f mkdocs.public.yml
mkdocs serve -f mkdocs.generated.yml
# For members of the Astral org, which has access to MkDocs Insiders via sponsorship.
uvx --with-requirements docs/requirements-insiders.txt -- mkdocs serve -f mkdocs.insiders.yml
mkdocs serve -f mkdocs.insiders.yml
```
The documentation should then be available locally at
[http://127.0.0.1:8000/ruff/](http://127.0.0.1:8000/ruff/).
[http://127.0.0.1:8000/docs/](http://127.0.0.1:8000/docs/).
## Release Process
@@ -295,64 +315,37 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
### Creating a new release
1. Install `uv`: `curl -LsSf https://astral.sh/uv/install.sh | sh`
We use an experimental in-house tool for managing releases.
1. Run `./scripts/release.sh`; this command will:
- Generate a temporary virtual environment with `rooster`
1. Install `rooster`: `pip install git+https://github.com/zanieb/rooster@main`
1. Run `rooster release`; this command will:
- Generate a changelog entry in `CHANGELOG.md`
- Update versions in `pyproject.toml` and `Cargo.toml`
- Update references to versions in the `README.md` and documentation
- Display contributors for the release
1. The changelog should then be editorialized for consistency
- Often labels will be missing from pull requests they will need to be manually organized into the proper section
- Changes should be edited to be user-facing descriptions, avoiding internal details
1. Highlight any breaking changes in `BREAKING_CHANGES.md`
1. Run `cargo check`. This should update the lock file with new versions.
1. Create a pull request with the changelog and version updates
1. Merge the PR
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yml) with:
- The new version number (without starting `v`)
1. Run the release workflow with the version number (without starting `v`) as input. Make sure
main has your merged PR as last commit
1. The release workflow will do the following:
1. Build all the assets. If this fails (even though we tested in step 4), we haven't tagged or
uploaded anything, you can restart after pushing a fix. If you just need to rerun the build,
make sure you're [re-running all the failed
jobs](https://docs.github.com/en/actions/managing-workflow-runs/re-running-workflows-and-jobs#re-running-failed-jobs-in-a-workflow) and not just a single failed job.
uploaded anything, you can restart after pushing a fix.
1. Upload to PyPI.
1. Create and push the Git tag (as extracted from `pyproject.toml`). We create the Git tag only
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/astral-sh/ruff/issues/4468)).
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/charliermarsh/ruff/issues/4468)).
1. Attach artifacts to draft GitHub release
1. Trigger downstream repositories. This can fail non-catastrophically, as we can run any
downstream jobs manually if needed.
1. Verify the GitHub release:
1. The Changelog should match the content of `CHANGELOG.md`
1. Append the contributors from the `scripts/release.sh` script
1. If needed, [update the schemastore](https://github.com/astral-sh/ruff/blob/main/scripts/update_schemastore.py).
1. One can determine if an update is needed when
`git diff old-version-tag new-version-tag -- ruff.schema.json` returns a non-empty diff.
1. Once run successfully, you should follow the link in the output to create a PR.
1. If needed, update the [`ruff-lsp`](https://github.com/astral-sh/ruff-lsp) and
[`ruff-vscode`](https://github.com/astral-sh/ruff-vscode) repositories and follow
the release instructions in those repositories. `ruff-lsp` should always be updated
before `ruff-vscode`.
This step is generally not required for a patch release, but should always be done
for a minor release.
1. Publish the GitHub release
1. Open the draft release in the GitHub release section
1. Copy the changelog for the release into the GitHub release
- See previous releases for formatting of section headers
1. Generate the contributor list with `rooster contributors` and add to the release notes
1. If needed, [update the schemastore](https://github.com/charliermarsh/ruff/blob/main/scripts/update_schemastore.py)
1. If needed, update the `ruff-lsp` and `ruff-vscode` repositories.
## Ecosystem CI
@@ -360,8 +353,9 @@ GitHub Actions will run your changes against a number of real-world projects fro
report on any linter or formatter differences. You can also run those checks locally via:
```shell
uvx --from ./python/ruff-ecosystem ruff-ecosystem check ruff "./target/debug/ruff"
uvx --from ./python/ruff-ecosystem ruff-ecosystem format ruff "./target/debug/ruff"
pip install -e ./python/ruff-ecosystem
ruff-ecosystem check ruff "./target/debug/ruff"
ruff-ecosystem format ruff "./target/debug/ruff"
```
See the [ruff-ecosystem package](https://github.com/astral-sh/ruff/tree/main/python/ruff-ecosystem) for more details.
@@ -371,14 +365,9 @@ See the [ruff-ecosystem package](https://github.com/astral-sh/ruff/tree/main/pyt
We have several ways of benchmarking and profiling Ruff:
- Our main performance benchmark comparing Ruff with other tools on the CPython codebase
- Microbenchmarks which run the linter or the formatter on individual files. These run on pull requests.
- Microbenchmarks which the linter or the formatter on individual files. There run on pull requests.
- Profiling the linter on either the microbenchmarks or entire projects
> **Note**
> When running benchmarks, ensure that your CPU is otherwise idle (e.g., close any background
> applications, like web browsers). You may also want to switch your CPU to a "performance"
> mode, if it exists, especially when benchmarking short-lived processes.
### CPython Benchmark
First, clone [CPython](https://github.com/python/cpython). It's a large and diverse Python codebase,
@@ -388,18 +377,12 @@ which makes it a good target for benchmarking.
git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff_linter/resources/test/cpython
```
Install `hyperfine`:
```shell
cargo install hyperfine
```
To benchmark the release build:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ -e"
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ -e"
Benchmark 1: ./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache
Time (mean ± σ): 293.8 ms ± 3.2 ms [User: 2384.6 ms, System: 90.3 ms]
@@ -418,7 +401,7 @@ To benchmark against the ecosystem's existing tools:
```shell
hyperfine --ignore-failure --warmup 5 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache" \
"pyflakes crates/ruff_linter/resources/test/cpython" \
"autoflake --recursive --expand-star-imports --remove-all-unused-imports --remove-unused-variables --remove-duplicate-keys resources/test/cpython" \
"pycodestyle crates/ruff_linter/resources/test/cpython" \
@@ -464,7 +447,7 @@ To benchmark a subset of rules, e.g. `LineTooLong` and `DocLineTooLong`:
```shell
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff check ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
"./target/release/ruff ./crates/ruff_linter/resources/test/cpython/ --no-cache -e --select W505,E501"
```
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
@@ -521,8 +504,6 @@ You can run the benchmarks with
cargo benchmark
```
`cargo benchmark` is an alias for `cargo bench -p ruff_benchmark --bench linter --bench formatter --`
#### Benchmark-driven Development
Ruff uses [Criterion.rs](https://bheisler.github.io/criterion.rs/book/) for benchmarks. You can use
@@ -532,10 +513,10 @@ if the benchmark improved/regressed compared to that baseline.
```shell
# Run once on your "baseline" code
cargo bench -p ruff_benchmark -- --save-baseline=main
cargo benchmark --save-baseline=main
# Then iterate with
cargo bench -p ruff_benchmark -- --baseline=main
cargo benchmark --baseline=main
```
#### PR Summary
@@ -545,10 +526,10 @@ This is useful to illustrate the improvements of a PR.
```shell
# On main
cargo bench -p ruff_benchmark -- --save-baseline=main
cargo benchmark --save-baseline=main
# After applying your changes
cargo bench -p ruff_benchmark -- --save-baseline=pr
cargo benchmark --save-baseline=pr
critcmp main pr
```
@@ -561,10 +542,10 @@ cargo install critcmp
#### Tips
- Use `cargo bench -p ruff_benchmark <filter>` to only run specific benchmarks. For example: `cargo bench -p ruff_benchmark lexer`
to only run the lexer benchmarks.
- Use `cargo bench -p ruff_benchmark -- --quiet` for a more cleaned up output (without statistical relevance)
- Use `cargo bench -p ruff_benchmark -- --quick` to get faster results (more prone to noise)
- Use `cargo benchmark <filter>` to only run specific benchmarks. For example: `cargo benchmark linter/pydantic`
to only run the pydantic tests.
- Use `cargo benchmark --quiet` for a more cleaned up output (without statistical relevance)
- Use `cargo benchmark --quick` to get faster results (more prone to noise)
### Profiling Projects
@@ -575,10 +556,10 @@ examples.
#### Linux
Install `perf` and build `ruff_benchmark` with the `profiling` profile and then run it with perf
Install `perf` and build `ruff_benchmark` with the `release-debug` profile and then run it with perf
```shell
cargo bench -p ruff_benchmark --no-run --profile=profiling && perf record --call-graph dwarf -F 9999 cargo bench -p ruff_benchmark --profile=profiling -- --profile-time=1
cargo bench -p ruff_benchmark --no-run --profile=release-debug && perf record --call-graph dwarf -F 9999 cargo bench -p ruff_benchmark --profile=release-debug -- --profile-time=1
```
You can also use the `ruff_dev` launcher to run `ruff check` multiple times on a repository to
@@ -586,8 +567,8 @@ gather enough samples for a good flamegraph (change the 999, the sample rate, an
of checks, to your liking)
```shell
cargo build --bin ruff_dev --profile=profiling
perf record -g -F 999 target/profiling/ruff_dev repeat --repeat 30 --exit-zero --no-cache path/to/cpython > /dev/null
cargo build --bin ruff_dev --profile=release-debug
perf record -g -F 999 target/release-debug/ruff_dev repeat --repeat 30 --exit-zero --no-cache path/to/cpython > /dev/null
```
Then convert the recorded profile
@@ -617,7 +598,7 @@ cargo install cargo-instruments
Then run the profiler with
```shell
cargo instruments -t time --bench linter --profile profiling -p ruff_benchmark -- --profile-time=1
cargo instruments -t time --bench linter --profile release-debug -p ruff_benchmark -- --profile-time=1
```
- `-t`: Specifies what to profile. Useful options are `time` to profile the wall time and `alloc`
@@ -631,11 +612,11 @@ Otherwise, follow the instructions from the linux section.
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
utils with it:
- `cargo dev print-ast <file>`: Print the AST of a python file using Ruff's
[Python parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser).
For `if True: pass # comment`, you can see the syntax tree, the byte offsets for start and
stop of each node and also how the `:` token, the comment and whitespace are not represented
anymore:
- `cargo dev print-ast <file>`: Print the AST of a python file using the
[RustPython parser](https://github.com/astral-sh/RustPython-Parser/tree/main/parser) that is
mainly used in Ruff. For `if True: pass # comment`, you can see the syntax tree, the byte offsets
for start and stop of each node and also how the `:` token, the comment and whitespace are not
represented anymore:
```text
[
@@ -808,8 +789,8 @@ To understand Ruff's import categorization system, we first need to define two c
"project root".)
- "Package root": The top-most directory defining the Python package that includes a given Python
file. To find the package root for a given Python file, traverse up its parent directories until
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't in a subtree
marked as a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
you reach a parent directory that doesn't contain an `__init__.py` file (and isn't marked as
a [namespace package](https://docs.astral.sh/ruff/settings/#namespace-packages)); take the directory
just before that, i.e., the first directory in the package.
For example, given:
@@ -898,11 +879,15 @@ There are three ways in which an import can be categorized as "first-party":
package (e.g., `from foo import bar` or `import foo.bar`), they'll be classified as first-party
automatically. This check is as simple as comparing the first segment of the current file's
module path to the first segment of the import.
1. **Source roots**: Ruff supports a [`src`](https://docs.astral.sh/ruff/settings/#src) setting, which
1. **Source roots**: Ruff supports a `[src](https://docs.astral.sh/ruff/settings/#src)` setting, which
sets the directories to scan when identifying first-party imports. The algorithm is
straightforward: given an import, like `import foo`, iterate over the directories enumerated in
the `src` setting and, for each directory, check for the existence of a subdirectory `foo` or a
file `foo.py`.
By default, `src` is set to the project root, along with `"src"` subdirectory in the project root.
This ensures that Ruff supports both flat and "src" layouts out of the box.
By default, `src` is set to the project root. In the above example, we'd want to set
`src = ["./src"]` to ensure that we locate `./my_project/src/foo` and thus categorize `import foo`
as first-party in `baz.py`. In practice, for this limited example, setting `src = ["./src"]` is
unnecessary, as all imports within `./my_project/src/foo` would be categorized as first-party via
the same-package heuristic; but if your project contains multiple packages, you'll want to set `src`
explicitly.

2554
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ resolver = "2"
[workspace.package]
edition = "2021"
rust-version = "1.76"
rust-version = "1.71"
homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff"
@@ -12,213 +12,51 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db", default-features = false }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_graph = { path = "crates/ruff_graph" }
ruff_index = { path = "crates/ruff_index" }
ruff_linter = { path = "crates/ruff_linter" }
ruff_macros = { path = "crates/ruff_macros" }
ruff_notebook = { path = "crates/ruff_notebook" }
ruff_python_ast = { path = "crates/ruff_python_ast" }
ruff_python_codegen = { path = "crates/ruff_python_codegen" }
ruff_python_formatter = { path = "crates/ruff_python_formatter" }
ruff_python_index = { path = "crates/ruff_python_index" }
ruff_python_literal = { path = "crates/ruff_python_literal" }
ruff_python_parser = { path = "crates/ruff_python_parser" }
ruff_python_semantic = { path = "crates/ruff_python_semantic" }
ruff_python_stdlib = { path = "crates/ruff_python_stdlib" }
ruff_python_trivia = { path = "crates/ruff_python_trivia" }
ruff_server = { path = "crates/ruff_server" }
ruff_source_file = { path = "crates/ruff_source_file" }
ruff_text_size = { path = "crates/ruff_text_size" }
red_knot_vendored = { path = "crates/red_knot_vendored" }
ruff_workspace = { path = "crates/ruff_workspace" }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
red_knot_server = { path = "crates/red_knot_server" }
red_knot_test = { path = "crates/red_knot_test" }
red_knot_workspace = { path = "crates/red_knot_workspace", default-features = false }
aho-corasick = { version = "1.1.3" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
anyhow = { version = "1.0.80" }
assert_fs = { version = "1.1.0" }
argfile = { version = "0.2.0" }
bincode = { version = "1.3.3" }
bitflags = { version = "2.5.0" }
bstr = { version = "1.9.1" }
cachedir = { version = "0.3.1" }
camino = { version = "1.1.7" }
chrono = { version = "0.4.35", default-features = false, features = ["clock"] }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete_command = { version = "0.6.0" }
clearscreen = { version = "3.0.0" }
codspeed-criterion-compat = { version = "2.6.0", default-features = false }
colored = { version = "2.1.0" }
console_error_panic_hook = { version = "0.1.7" }
console_log = { version = "1.0.0" }
countme = { version = "3.0.1" }
compact_str = "0.8.0"
criterion = { version = "0.5.1", default-features = false }
crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" }
drop_bomb = { version = "0.1.5" }
env_logger = { version = "0.11.0" }
etcetera = { version = "0.8.0" }
fern = { version = "0.6.1" }
filetime = { version = "0.2.23" }
anyhow = { version = "1.0.69" }
bitflags = { version = "2.4.1" }
chrono = { version = "0.4.31", default-features = false, features = ["clock"] }
clap = { version = "4.4.7", features = ["derive"] }
colored = { version = "2.0.0" }
filetime = { version = "0.2.20" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
globwalk = { version = "0.9.1" }
hashbrown = { version = "0.15.0", default-features = false, features = [
"raw-entry",
"inline-more",
] }
ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" }
imperative = { version = "1.0.4" }
indicatif = { version = "0.17.8" }
indoc = { version = "2.0.4" }
insta = { version = "1.35.1" }
insta-cmd = { version = "0.6.0" }
is-macro = { version = "0.3.5" }
is-wsl = { version = "0.4.0" }
itertools = { version = "0.13.0" }
js-sys = { version = "0.3.69" }
jod-thread = { version = "0.1.2" }
libc = { version = "0.2.153" }
globset = { version = "0.4.10" }
ignore = { version = "0.4.20" }
insta = { version = "1.34.0", feature = ["filters", "glob"] }
is-macro = { version = "0.3.0" }
itertools = { version = "0.11.0" }
libcst = { version = "1.1.0", default-features = false }
log = { version = "0.4.17" }
lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed",
] }
matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" }
notify = { version = "6.1.1" }
once_cell = { version = "1.19.0" }
ordermap = { version = "0.5.0" }
memchr = { version = "2.6.4" }
once_cell = { version = "1.17.1" }
path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.6.0", features = ["serde"] }
pretty_assertions = "1.3.0"
proc-macro2 = { version = "1.0.79" }
pyproject-toml = { version = "0.9.0" }
quick-junit = { version = "0.5.0" }
proc-macro2 = { version = "1.0.69" }
quote = { version = "1.0.23" }
rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rstest = { version = "0.22.0", default-features = false }
rustc-hash = { version = "2.0.0" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "b14be5c0392f4c55eca60b92e457a35549372382" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }
serde-wasm-bindgen = { version = "0.6.4" }
serde_json = { version = "1.0.113" }
serde_test = { version = "1.0.152" }
serde_with = { version = "3.6.0", default-features = false, features = [
"macros",
] }
rustc-hash = { version = "1.1.0" }
schemars = { version = "0.8.15" }
serde = { version = "1.0.190", features = ["derive"] }
serde_json = { version = "1.0.108" }
shellexpand = { version = "3.0.0" }
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
similar = { version = "2.3.0", features = ["inline"] }
smallvec = { version = "1.11.1" }
static_assertions = "1.1.0"
strum = { version = "0.26.0", features = ["strum_macros"] }
strum_macros = { version = "0.26.0" }
syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" }
test-case = { version = "3.3.1" }
thiserror = { version = "1.0.58" }
tikv-jemallocator = { version = "0.6.0" }
toml = { version = "0.8.11" }
strum = { version = "0.25.0", features = ["strum_macros"] }
strum_macros = { version = "0.25.3" }
syn = { version = "2.0.39" }
test-case = { version = "3.2.1" }
thiserror = { version = "1.0.50" }
toml = { version = "0.7.8" }
tracing = { version = "0.1.40" }
tracing-flame = { version = "0.2.0" }
tracing-indicatif = { version = "0.3.6" }
tracing-subscriber = { version = "0.3.18", default-features = false, features = [
"env-filter",
"fmt",
] }
tracing-tree = { version = "0.4.0" }
typed-arena = { version = "2.0.2" }
unic-ucd-category = { version = "0.9" }
tracing-indicatif = { version = "0.3.4" }
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
unicode-ident = { version = "1.0.12" }
unicode_names2 = { version = "1.2.0" }
unicode-width = { version = "0.1.11" }
unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
ureq = { version = "2.9.6" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
"fast-rng",
"macro-diagnostics",
"js",
] }
walkdir = { version = "2.3.2" }
wasm-bindgen = { version = "0.2.92" }
wasm-bindgen-test = { version = "0.3.42" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false }
[workspace.lints.rust]
unsafe_code = "warn"
unreachable_pub = "warn"
unexpected_cfgs = { level = "warn", check-cfg = [
"cfg(fuzzing)",
"cfg(codspeed)",
] }
[workspace.lints.clippy]
pedantic = { level = "warn", priority = -2 }
# Allowed pedantic lints
char_lit_as_u8 = "allow"
collapsible_else_if = "allow"
collapsible_if = "allow"
implicit_hasher = "allow"
map_unwrap_or = "allow"
match_same_arms = "allow"
missing_errors_doc = "allow"
missing_panics_doc = "allow"
module_name_repetitions = "allow"
must_use_candidate = "allow"
similar_names = "allow"
too_many_lines = "allow"
# To allow `#[allow(clippy::all)]` in `crates/ruff_python_parser/src/python.rs`.
needless_raw_string_hashes = "allow"
# Disallowed restriction lints
print_stdout = "warn"
print_stderr = "warn"
dbg_macro = "warn"
empty_drop = "warn"
empty_structs_with_brackets = "warn"
exit = "warn"
get_unwrap = "warn"
rc_buffer = "warn"
rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
uuid = { version = "1.5.0", features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
wsl = { version = "0.1.0" }
[profile.release]
# Note that we set these explicitly, and these values
# were chosen based on a trade-off between compile times
# and runtime performance[1].
#
# [1]: https://github.com/astral-sh/ruff/pull/9031
lto = "thin"
codegen-units = 16
# Some crates don't change as much but benefit more from
# more expensive optimization passes, so we selectively
# decrease codegen-units in some cases.
[profile.release.package.ruff_python_parser]
codegen-units = 1
[profile.release.package.ruff_python_ast]
lto = "fat"
codegen-units = 1
[profile.dev.package.insta]
@@ -232,73 +70,8 @@ opt-level = 3
[profile.dev.package.ruff_python_parser]
opt-level = 1
# Use the `--profile profiling` flag to show symbols in release mode.
# e.g. `cargo build --profile profiling`
[profile.profiling]
# Use the `--profile release-debug` flag to show symbols in release mode.
# e.g. `cargo build --profile release-debug`
[profile.release-debug]
inherits = "release"
debug = 1
# The profile that 'cargo dist' will build with.
[profile.dist]
inherits = "release"
# Config for 'cargo dist'
[workspace.metadata.dist]
# The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.22.1"
# CI backends to support
ci = "github"
# The installers to generate for each app
installers = ["shell", "powershell"]
# The archive format to use for windows builds (defaults .zip)
windows-archive = ".zip"
# The archive format to use for non-windows builds (defaults .tar.xz)
unix-archive = ".tar.gz"
# Target platforms to build apps for (Rust target-triple syntax)
targets = [
"aarch64-apple-darwin",
"aarch64-pc-windows-msvc",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
"arm-unknown-linux-musleabihf",
"armv7-unknown-linux-gnueabihf",
"armv7-unknown-linux-musleabihf",
"i686-pc-windows-msvc",
"i686-unknown-linux-gnu",
"i686-unknown-linux-musl",
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"s390x-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-pc-windows-msvc",
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
]
# Whether to auto-include files like READMEs, LICENSEs, and CHANGELOGs (default true)
auto-includes = false
# Whether cargo-dist should create a GitHub Release or use an existing draft
create-release = true
# Which actions to run on pull requests
pr-run-mode = "skip"
# Whether CI should trigger releases with dispatches instead of tag pushes
dispatch-releases = true
# Which phase cargo-dist should use to create the GitHub release
github-release = "announce"
# Whether CI should include auto-generated code to build local artifacts
build-local-artifacts = false
# Local artifacts jobs to run in CI
local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Post-announce jobs to run in CI
post-announce-jobs = [
"./notify-dependents",
"./publish-docs",
"./publish-playground",
]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program
install-updater = false
# Path that installers should place binaries in
install-path = "CARGO_HOME"

View File

@@ -1,38 +0,0 @@
FROM --platform=$BUILDPLATFORM ubuntu as build
ENV HOME="/root"
WORKDIR $HOME
RUN apt update && apt install -y build-essential curl python3-venv
# Setup zig as cross compiling linker
RUN python3 -m venv $HOME/.venv
RUN .venv/bin/pip install cargo-zigbuild
ENV PATH="$HOME/.venv/bin:$PATH"
# Install rust
ARG TARGETPLATFORM
RUN case "$TARGETPLATFORM" in \
"linux/arm64") echo "aarch64-unknown-linux-musl" > rust_target.txt ;; \
"linux/amd64") echo "x86_64-unknown-linux-musl" > rust_target.txt ;; \
*) exit 1 ;; \
esac
# Update rustup whenever we bump the rust version
COPY rust-toolchain.toml rust-toolchain.toml
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --target $(cat rust_target.txt) --profile minimal --default-toolchain none
ENV PATH="$HOME/.cargo/bin:$PATH"
# Installs the correct toolchain version from rust-toolchain.toml and then the musl target
RUN rustup target add $(cat rust_target.txt)
# Build
COPY crates crates
COPY Cargo.toml Cargo.toml
COPY Cargo.lock Cargo.lock
RUN cargo zigbuild --bin ruff --target $(cat rust_target.txt) --release
RUN cp target/$(cat rust_target.txt)/release/ruff /ruff
# TODO: Optimize binary size, with a version that also works when cross compiling
# RUN strip --strip-all /ruff
FROM scratch
COPY --from=build /ruff /ruff
WORKDIR /io
ENTRYPOINT ["/ruff"]

25
LICENSE
View File

@@ -1371,28 +1371,3 @@ are:
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
- pydoclint, licensed as follows:
"""
MIT License
Copyright (c) 2023 jsh9
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""

145
README.md
View File

@@ -4,12 +4,11 @@
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
[![image](https://img.shields.io/pypi/v/ruff.svg)](https://pypi.python.org/pypi/ruff)
[![image](https://img.shields.io/pypi/l/ruff.svg)](https://github.com/astral-sh/ruff/blob/main/LICENSE)
[![image](https://img.shields.io/pypi/l/ruff.svg)](https://pypi.python.org/pypi/ruff)
[![image](https://img.shields.io/pypi/pyversions/ruff.svg)](https://pypi.python.org/pypi/ruff)
[![Actions status](https://github.com/astral-sh/ruff/workflows/CI/badge.svg)](https://github.com/astral-sh/ruff/actions)
[![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?logo=discord&logoColor=white)](https://discord.com/invite/astral-sh)
[**Docs**](https://docs.astral.sh/ruff/) | [**Playground**](https://play.ruff.rs/)
[**Discord**](https://discord.gg/c9MhzV8aU5) | [**Docs**](https://docs.astral.sh/ruff/) | [**Playground**](https://play.ruff.rs/)
An extremely fast Python linter and code formatter, written in Rust.
@@ -28,15 +27,15 @@ An extremely fast Python linter and code formatter, written in Rust.
- ⚡️ 10-100x faster than existing linters (like Flake8) and formatters (like Black)
- 🐍 Installable via `pip`
- 🛠️ `pyproject.toml` support
- 🤝 Python 3.13 compatibility
- ⚖️ Drop-in parity with [Flake8](https://docs.astral.sh/ruff/faq/#how-does-ruffs-linter-compare-to-flake8), isort, and [Black](https://docs.astral.sh/ruff/faq/#how-does-ruffs-formatter-compare-to-black)
- 🤝 Python 3.12 compatibility
- ⚖️ Drop-in parity with [Flake8](https://docs.astral.sh/ruff/faq/#how-does-ruff-compare-to-flake8), isort, and Black
- 📦 Built-in caching, to avoid re-analyzing unchanged files
- 🔧 Fix support, for automatic error correction (e.g., automatically remove unused imports)
- 📏 Over [800 built-in rules](https://docs.astral.sh/ruff/rules/), with native re-implementations
- 📏 Over [700 built-in rules](https://docs.astral.sh/ruff/rules/), with native re-implementations
of popular Flake8 plugins, like flake8-bugbear
- ⌨️ First-party [editor integrations](https://docs.astral.sh/ruff/integrations/) for
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://docs.astral.sh/ruff/editors/setup)
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://docs.astral.sh/ruff/configuration/#config-file-discovery)
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://docs.astral.sh/ruff/configuration/#pyprojecttoml-discovery)
Ruff aims to be orders of magnitude faster than alternative tools while integrating more
functionality behind a single, common interface.
@@ -50,13 +49,12 @@ times faster than any individual tool.
Ruff is extremely actively developed and used in major open-source projects like:
- [Apache Airflow](https://github.com/apache/airflow)
- [Apache Superset](https://github.com/apache/superset)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Hugging Face](https://github.com/huggingface/transformers)
- [Pandas](https://github.com/pandas-dev/pandas)
- [SciPy](https://github.com/scipy/scipy)
...and [many more](#whos-using-ruff).
...and many more.
Ruff is backed by [Astral](https://astral.sh). Read the [launch post](https://astral.sh/blog/announcing-astral-the-company-behind-ruff),
or the original [project announcement](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster).
@@ -110,7 +108,7 @@ For more, see the [documentation](https://docs.astral.sh/ruff/).
1. [Who's Using Ruff?](#whos-using-ruff)
1. [License](#license)
## Getting Started<a id="getting-started"></a>
## Getting Started
For more, see the [documentation](https://docs.astral.sh/ruff/).
@@ -119,25 +117,7 @@ For more, see the [documentation](https://docs.astral.sh/ruff/).
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI:
```shell
# With pip.
pip install ruff
# With pipx.
pipx install ruff
```
Starting with version `0.5.0`, Ruff can be installed with our standalone installers:
```shell
# On macOS and Linux.
curl -LsSf https://astral.sh/ruff/install.sh | sh
# On Windows.
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.6.9/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.9/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -148,7 +128,7 @@ and with [a variety of other package managers](https://docs.astral.sh/ruff/insta
To run Ruff as a linter, try any of the following:
```shell
ruff check # Lint all files in the current directory (and any subdirectories).
ruff check . # Lint all files in the current directory (and any subdirectories).
ruff check path/to/code/ # Lint all files in `/path/to/code` (and any subdirectories).
ruff check path/to/code/*.py # Lint all `.py` files in `/path/to/code`.
ruff check path/to/code/to/file.py # Lint `file.py`.
@@ -158,7 +138,7 @@ ruff check @arguments.txt # Lint using an input file, treating its con
Or, to run Ruff as a formatter:
```shell
ruff format # Format all files in the current directory (and any subdirectories).
ruff format . # Format all files in the current directory (and any subdirectories).
ruff format path/to/code/ # Format all files in `/path/to/code` (and any subdirectories).
ruff format path/to/code/*.py # Format all `.py` files in `/path/to/code`.
ruff format path/to/code/to/file.py # Format `file.py`.
@@ -170,7 +150,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.9
rev: v0.1.5
hooks:
# Run the linter.
- id: ruff
@@ -179,10 +159,11 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
- id: ruff-format
```
Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or with [various other editors](https://docs.astral.sh/ruff/editors/setup).
Ruff can also be used as a [VS Code extension](https://github.com/astral-sh/ruff-vscode) or
alongside any other editor through the [Ruff LSP](https://github.com/astral-sh/ruff-lsp).
Ruff can also be used as a [GitHub Action](https://github.com/features/actions) via
[`ruff-action`](https://github.com/astral-sh/ruff-action):
[`ruff-action`](https://github.com/chartboost/ruff-action):
```yaml
name: Ruff
@@ -191,19 +172,20 @@ jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/ruff-action@v1
- uses: actions/checkout@v3
- uses: chartboost/ruff-action@v1
```
### Configuration<a id="configuration"></a>
### Configuration
Ruff can be configured through a `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file (see:
[_Configuration_](https://docs.astral.sh/ruff/configuration/), or [_Settings_](https://docs.astral.sh/ruff/settings/)
for a complete list of all configuration options).
If left unspecified, Ruff's default configuration is equivalent to the following `ruff.toml` file:
If left unspecified, Ruff's default configuration is equivalent to:
```toml
[tool.ruff]
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
@@ -212,25 +194,20 @@ exclude = [
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
]
@@ -241,7 +218,7 @@ indent-width = 4
# Assume Python 3.8
target-version = "py38"
[lint]
[tool.ruff.lint]
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E4", "E7", "E9", "F"]
ignore = []
@@ -253,7 +230,7 @@ unfixable = []
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
[format]
[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"
@@ -267,35 +244,21 @@ skip-magic-trailing-comma = false
line-ending = "auto"
```
Note that, in a `pyproject.toml`, each section header should be prefixed with `tool.ruff`. For
example, `[lint]` should be replaced with `[tool.ruff.lint]`.
Some configuration options can be provided via dedicated command-line arguments, such as those
related to rule enablement and disablement, file discovery, and logging level:
Some configuration options can be provided via the command-line, such as those related to
rule enablement and disablement, file discovery, and logging level:
```shell
ruff check --select F401 --select F403 --quiet
ruff check path/to/code/ --select F401 --select F403 --quiet
```
The remaining configuration options can be provided through a catch-all `--config` argument:
```shell
ruff check --config "lint.per-file-ignores = {'some_file.py' = ['F841']}"
```
To opt in to the latest lint rules, formatter style changes, interface updates, and more, enable
[preview mode](https://docs.astral.sh/ruff/rules/) by setting `preview = true` in your configuration
file or passing `--preview` on the command line. Preview mode enables a collection of unstable
features that may change prior to stabilization.
See `ruff help` for more on Ruff's top-level commands, or `ruff help check` and `ruff help format`
for more on the linting and formatting commands, respectively.
## Rules<a id="rules"></a>
## Rules
<!-- Begin section: Rules -->
**Ruff supports over 800 lint rules**, many of which are inspired by popular tools like Flake8,
**Ruff supports over 700 lint rules**, many of which are inspired by popular tools like Flake8,
isort, pyupgrade, and others. Regardless of the rule's origin, Ruff re-implements every rule in
Rust as a first-party feature.
@@ -351,6 +314,7 @@ quality tools, including:
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-trio](https://pypi.org/project/flake8-trio/)
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/astral-sh/ruff/issues/2102))
@@ -367,21 +331,21 @@ quality tools, including:
For a complete enumeration of the supported rules, see [_Rules_](https://docs.astral.sh/ruff/rules/).
## Contributing<a id="contributing"></a>
## Contributing
Contributions are welcome and highly appreciated. To get started, check out the
[**contributing guidelines**](https://docs.astral.sh/ruff/contributing/).
You can also join us on [**Discord**](https://discord.com/invite/astral-sh).
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5).
## Support<a id="support"></a>
## Support
Having trouble? Check out the existing issues on [**GitHub**](https://github.com/astral-sh/ruff/issues),
or feel free to [**open a new one**](https://github.com/astral-sh/ruff/issues/new).
You can also ask for help on [**Discord**](https://discord.com/invite/astral-sh).
You can also ask for help on [**Discord**](https://discord.gg/c9MhzV8aU5).
## Acknowledgements<a id="acknowledgements"></a>
## Acknowledgements
Ruff's linter draws on both the APIs and implementation details of many other
tools in the Python ecosystem, especially [Flake8](https://github.com/PyCQA/flake8), [Pyflakes](https://github.com/PyCQA/pyflakes),
@@ -405,43 +369,35 @@ Ruff is the beneficiary of a large number of [contributors](https://github.com/a
Ruff is released under the MIT license.
## Who's Using Ruff?<a id="whos-using-ruff"></a>
## Who's Using Ruff?
Ruff is used by a number of major open-source projects and companies, including:
- [Albumentations](https://github.com/albumentations-team/albumentations)
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
- Anthropic ([Python SDK](https://github.com/anthropics/anthropic-sdk-python))
- [Apache Airflow](https://github.com/apache/airflow)
- AstraZeneca ([Magnus](https://github.com/AstraZeneca/magnus-core))
- [Babel](https://github.com/python-babel/babel)
- Benchling ([Refac](https://github.com/benchling/refac))
- [Babel](https://github.com/python-babel/babel)
- [Bokeh](https://github.com/bokeh/bokeh)
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
- CERN ([Indico](https://getindico.io/))
- [DVC](https://github.com/iterative/dvc)
- [Dagger](https://github.com/dagger/dagger)
- [Dagster](https://github.com/dagster-io/dagster)
- Databricks ([MLflow](https://github.com/mlflow/mlflow))
- [Dify](https://github.com/langgenius/dify)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Godot](https://github.com/godotengine/godot)
- [Gradio](https://github.com/gradio-app/gradio)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- [HTTPX](https://github.com/encode/httpx)
- [Hatch](https://github.com/pypa/hatch)
- [Home Assistant](https://github.com/home-assistant/core)
- Hugging Face ([Transformers](https://github.com/huggingface/transformers),
[Datasets](https://github.com/huggingface/datasets),
[Diffusers](https://github.com/huggingface/diffusers))
- IBM ([Qiskit](https://github.com/Qiskit/qiskit))
- [Hatch](https://github.com/pypa/hatch)
- [Home Assistant](https://github.com/home-assistant/core)
- ING Bank ([popmon](https://github.com/ing-bank/popmon), [probatus](https://github.com/ing-bank/probatus))
- [Ibis](https://github.com/ibis-project/ibis)
- [ivy](https://github.com/unifyai/ivy)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [Kraken Tech](https://kraken.tech/)
- [LangChain](https://github.com/hwchase17/langchain)
- [Litestar](https://litestar.dev/)
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
- Matrix ([Synapse](https://github.com/matrix-org/synapse))
- [MegaLinter](https://github.com/oxsecurity/megalinter)
@@ -449,18 +405,14 @@ Ruff is used by a number of major open-source projects and companies, including:
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel),
[ONNX Runtime](https://github.com/microsoft/onnxruntime),
[LightGBM](https://github.com/microsoft/LightGBM))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [Mypy](https://github.com/python/mypy)
- [Nautobot](https://github.com/nautobot/nautobot)
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [Nokia](https://nokia.com/)
- [NoneBot](https://github.com/nonebot/nonebot2)
- [NumPyro](https://github.com/pyro-ppl/numpyro)
- [ONNX](https://github.com/onnx/onnx)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [Open Wine Components](https://github.com/Open-Wine-Components/umu-launcher)
- [PDM](https://github.com/pdm-project/pdm)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [Pandas](https://github.com/pandas-dev/pandas)
@@ -470,25 +422,20 @@ Ruff is used by a number of major open-source projects and companies, including:
- [PostHog](https://github.com/PostHog/posthog)
- Prefect ([Python SDK](https://github.com/PrefectHQ/prefect), [Marvin](https://github.com/PrefectHQ/marvin))
- [PyInstaller](https://github.com/pyinstaller/pyinstaller)
- [PyMC](https://github.com/pymc-devs/pymc/)
- [PyMC-Marketing](https://github.com/pymc-labs/pymc-marketing)
- [pytest](https://github.com/pytest-dev/pytest)
- [PyTorch](https://github.com/pytorch/pytorch)
- [Pydantic](https://github.com/pydantic/pydantic)
- [Pylint](https://github.com/PyCQA/pylint)
- [PyVista](https://github.com/pyvista/pyvista)
- [PyMC-Marketing](https://github.com/pymc-labs/pymc-marketing)
- [Reflex](https://github.com/reflex-dev/reflex)
- [River](https://github.com/online-ml/river)
- [Rippling](https://rippling.com)
- [Robyn](https://github.com/sansyrox/robyn)
- [Saleor](https://github.com/saleor/saleor)
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
- [SciPy](https://github.com/scipy/scipy)
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))
- [Saleor](https://github.com/saleor/saleor)
- [SciPy](https://github.com/scipy/scipy)
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [Starlette](https://github.com/encode/starlette)
- [Streamlit](https://github.com/streamlit/streamlit)
- [Litestar](https://litestar.dev/)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Vega-Altair](https://github.com/altair-viz/altair)
- WordPress ([Openverse](https://github.com/WordPress/openverse))
@@ -504,7 +451,7 @@ Ruff is used by a number of major open-source projects and companies, including:
### Show Your Support
If you're using Ruff, consider adding the Ruff badge to your project's `README.md`:
If you're using Ruff, consider adding the Ruff badge to project's `README.md`:
```md
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
@@ -524,12 +471,12 @@ If you're using Ruff, consider adding the Ruff badge to your project's `README.m
<a href="https://github.com/astral-sh/ruff"><img src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json" alt="Ruff" style="max-width:100%;"></a>
```
## License<a id="license"></a>
## License
This repository is licensed under the [MIT License](https://github.com/astral-sh/ruff/blob/main/LICENSE)
MIT
<div align="center">
<a target="_blank" href="https://astral.sh" style="background:none">
<img src="https://raw.githubusercontent.com/astral-sh/ruff/main/assets/svg/Astral.svg" alt="Made by Astral">
<img src="https://raw.githubusercontent.com/astral-sh/ruff/main/assets/svg/Astral.svg">
</a>
</div>

View File

@@ -1,21 +1,11 @@
[files]
# https://github.com/crate-ci/typos/issues/868
extend-exclude = ["crates/red_knot_vendored/vendor/**/*", "**/resources/**/*", "**/snapshots/**/*"]
extend-exclude = ["**/resources/**/*", "**/snapshots/**/*"]
[default.extend-words]
"arange" = "arange" # e.g. `numpy.arange`
hel = "hel"
whos = "whos"
spawnve = "spawnve"
ned = "ned"
pn = "pn" # `import panel as pn` is a thing
poit = "poit"
BA = "BA" # acronym for "Bad Allowed", used in testing.
jod = "jod" # e.g., `jod-thread`
[default]
extend-ignore-re = [
# Line ignore with trailing "spellchecker:disable-line"
"(?Rm)^.*#\\s*spellchecker:disable-line$",
"LICENSEs",
]

View File

@@ -1,21 +1,7 @@
doc-valid-idents = [
"..",
"CodeQL",
"FastAPI",
"IPython",
"LangChain",
"LibCST",
"McCabe",
"NumPy",
"SCREAMING_SNAKE_CASE",
"SQLAlchemy",
"StackOverflow",
"PyCharm",
]
ignore-interior-mutability = [
# Interned is read-only. The wrapped `Rc` never gets updated.
"ruff_formatter::format_element::Interned",
# The expression is read-only.
"ruff_python_ast::hashable::HashableExpr",
"CodeQL",
"IPython",
"NumPy",
"..",
]

View File

@@ -0,0 +1,36 @@
[package]
name = "flake8-to-ruff"
version = "0.1.5"
description = """
Convert Flake8 configuration files to Ruff configuration files.
"""
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff_linter = { path = "../ruff_linter", default-features = false }
ruff_workspace = { path = "../ruff_workspace" }
anyhow = { workspace = true }
clap = { workspace = true }
colored = { workspace = true }
configparser = { version = "3.0.2" }
itertools = { workspace = true }
log = { workspace = true }
once_cell = { workspace = true }
pep440_rs = { version = "0.3.12", features = ["serde"] }
regex = { workspace = true }
rustc-hash = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
strum = { workspace = true }
strum_macros = { workspace = true }
toml = { workspace = true }
[dev-dependencies]
pretty_assertions = "1.3.0"

View File

@@ -0,0 +1,99 @@
# flake8-to-ruff
Convert existing Flake8 configuration files (`setup.cfg`, `tox.ini`, or `.flake8`) for use with
[Ruff](https://github.com/astral-sh/ruff).
Generates a Ruff-compatible `pyproject.toml` section.
## Installation and Usage
### Installation
Available as [`flake8-to-ruff`](https://pypi.org/project/flake8-to-ruff/) on PyPI:
```shell
pip install flake8-to-ruff
```
### Usage
To run `flake8-to-ruff`:
```shell
flake8-to-ruff path/to/setup.cfg
flake8-to-ruff path/to/tox.ini
flake8-to-ruff path/to/.flake8
```
`flake8-to-ruff` will print the relevant `pyproject.toml` sections to standard output, like so:
```toml
[tool.ruff]
exclude = [
'.svn',
'CVS',
'.bzr',
'.hg',
'.git',
'__pycache__',
'.tox',
'.idea',
'.mypy_cache',
'.venv',
'node_modules',
'_state_machine.py',
'test_fstring.py',
'bad_coding2.py',
'badsyntax_*.py',
]
select = [
'A',
'E',
'F',
'Q',
]
ignore = []
[tool.ruff.flake8-quotes]
inline-quotes = 'single'
[tool.ruff.pep8-naming]
ignore-names = [
'foo',
'bar',
]
```
### Plugins
`flake8-to-ruff` will attempt to infer any activated plugins based on the settings provided in your
configuration file.
For example, if your `.flake8` file includes a `docstring-convention` property, `flake8-to-ruff`
will enable the appropriate [`flake8-docstrings`](https://pypi.org/project/flake8-docstrings/)
checks.
Alternatively, you can manually specify plugins on the command-line:
```shell
flake8-to-ruff path/to/.flake8 --plugin flake8-builtins --plugin flake8-quotes
```
## Limitations
1. Ruff only supports a subset of the Flake configuration options. `flake8-to-ruff` will warn on and
ignore unsupported options in the `.flake8` file (or equivalent). (Similarly, Ruff has a few
configuration options that don't exist in Flake8.)
1. Ruff will omit any rule codes that are unimplemented or unsupported by Ruff, including rule
codes from unsupported plugins. (See the
[documentation](https://docs.astral.sh/ruff/faq/#how-does-ruff-compare-to-flake8) for the complete
list of supported plugins.)
## License
MIT
## Contributing
Contributions are welcome and hugely appreciated. To get started, check out the
[contributing guidelines](https://github.com/astral-sh/ruff/blob/main/CONTRIBUTING.md).

View File

@@ -0,0 +1,65 @@
[build-system]
requires = [
# The minimum setuptools version is specific to the PEP 517 backend,
# and may be stricter than the version required in `setup.cfg`
"setuptools>=40.6.0,!=60.9.0",
"wheel",
# Must be kept in sync with the `install_requirements` in `setup.cfg`
"cffi>=1.12; platform_python_implementation != 'PyPy'",
"setuptools-rust>=0.11.4",
]
build-backend = "setuptools.build_meta"
[tool.black]
line-length = 79
target-version = ["py36"]
[tool.pytest.ini_options]
addopts = "-r s --capture=no --strict-markers --benchmark-disable"
markers = [
"skip_fips: this test is not executed in FIPS mode",
"supported: parametrized test requiring only_if and skip_message",
]
[tool.mypy]
show_error_codes = true
check_untyped_defs = true
no_implicit_reexport = true
warn_redundant_casts = true
warn_unused_ignores = true
warn_unused_configs = true
strict_equality = true
[[tool.mypy.overrides]]
module = [
"pretend"
]
ignore_missing_imports = true
[tool.coverage.run]
branch = true
relative_files = true
source = [
"cryptography",
"tests/",
]
[tool.coverage.paths]
source = [
"src/cryptography",
"*.tox/*/lib*/python*/site-packages/cryptography",
"*.tox\\*\\Lib\\site-packages\\cryptography",
"*.tox/pypy/site-packages/cryptography",
]
tests =[
"tests/",
"*tests\\",
]
[tool.coverage.report]
exclude_lines = [
"@abc.abstractmethod",
"@abc.abstractproperty",
"@typing.overload",
"if typing.TYPE_CHECKING",
]

View File

@@ -0,0 +1,91 @@
[metadata]
name = cryptography
version = attr: cryptography.__version__
description = cryptography is a package which provides cryptographic recipes and primitives to Python developers.
long_description = file: README.rst
long_description_content_type = text/x-rst
license = BSD-3-Clause OR Apache-2.0
url = https://github.com/pyca/cryptography
author = The Python Cryptographic Authority and individual contributors
author_email = cryptography-dev@python.org
project_urls =
Documentation=https://cryptography.io/
Source=https://github.com/pyca/cryptography/
Issues=https://github.com/pyca/cryptography/issues
Changelog=https://cryptography.io/en/latest/changelog/
classifiers =
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
License :: OSI Approved :: BSD License
Natural Language :: English
Operating System :: MacOS :: MacOS X
Operating System :: POSIX
Operating System :: POSIX :: BSD
Operating System :: POSIX :: Linux
Operating System :: Microsoft :: Windows
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Security :: Cryptography
[options]
python_requires = >=3.6
include_package_data = True
zip_safe = False
package_dir =
=src
packages = find:
# `install_requires` must be kept in sync with `pyproject.toml`
install_requires =
cffi >=1.12
[options.packages.find]
where = src
exclude =
_cffi_src
_cffi_src.*
[options.extras_require]
test =
pytest>=6.2.0
pytest-benchmark
pytest-cov
pytest-subtests
pytest-xdist
pretend
iso8601
pytz
hypothesis>=1.11.4,!=3.79.2
docs =
sphinx >= 1.6.5,!=1.8.0,!=3.1.0,!=3.1.1,!=5.2.0,!=5.2.0.post0
sphinx_rtd_theme
docstest =
pyenchant >= 1.6.11
twine >= 1.12.0
sphinxcontrib-spelling >= 4.0.1
sdist =
setuptools_rust >= 0.11.4
pep8test =
black
flake8
flake8-import-order
pep8-naming
# This extra is for OpenSSH private keys that use bcrypt KDF
# Versions: v3.1.3 - ignore_few_rounds, v3.1.5 - abi3
ssh =
bcrypt >= 3.1.5
[flake8]
ignore = E203,E211,W503,W504,N818
exclude = .tox,*.egg,.git,_build,.hypothesis
select = E,W,F,N,I
application-import-names = cryptography,cryptography_vectors,tests

View File

@@ -0,0 +1,19 @@
[flake8]
# Ignore style and complexity
# E: style errors
# W: style warnings
# C: complexity
# D: docstring warnings (unused pydocstyle extension)
# F841: local variable assigned but never used
ignore = E, C, W, D, F841
builtins = c, get_config
exclude =
.cache,
.github,
docs,
jupyterhub/alembic*,
onbuild,
scripts,
share,
tools,
setup.py

View File

@@ -0,0 +1,43 @@
[flake8]
# Exclude the grpc generated code
exclude = ./manim/grpc/gen/*
max-complexity = 15
max-line-length = 88
statistics = True
# Prevents some flake8-rst-docstrings errors
rst-roles = attr,class,func,meth,mod,obj,ref,doc,exc
rst-directives = manim, SEEALSO, seealso
docstring-convention=numpy
select = A,A00,B,B9,C4,C90,D,E,F,F,PT,RST,SIM,W
# General Compatibility
extend-ignore = E203, W503, D202, D212, D213, D404
# Misc
F401, F403, F405, F841, E501, E731, E402, F811, F821,
# Plug-in: flake8-builtins
A001, A002, A003,
# Plug-in: flake8-bugbear
B006, B007, B008, B009, B010, B903, B950,
# Plug-in: flake8-simplify
SIM105, SIM106, SIM119,
# Plug-in: flake8-comprehensions
C901
# Plug-in: flake8-pytest-style
PT001, PT004, PT006, PT011, PT018, PT022, PT023,
# Plug-in: flake8-docstrings
D100, D101, D102, D103, D104, D105, D106, D107,
D200, D202, D204, D205, D209,
D301,
D400, D401, D402, D403, D405, D406, D407, D409, D411, D412, D414,
# Plug-in: flake8-rst-docstrings
RST201, RST203, RST210, RST212, RST213, RST215,
RST301, RST303,

View File

@@ -0,0 +1,36 @@
[flake8]
min_python_version = 3.7.0
max-line-length = 88
ban-relative-imports = true
# flake8-use-fstring: https://github.com/MichaelKim0407/flake8-use-fstring#--percent-greedy-and---format-greedy
format-greedy = 1
inline-quotes = double
enable-extensions = TC, TC1
type-checking-strict = true
eradicate-whitelist-extend = ^-.*;
extend-ignore =
# E203: Whitespace before ':' (pycqa/pycodestyle#373)
E203,
# SIM106: Handle error-cases first
SIM106,
# ANN101: Missing type annotation for self in method
ANN101,
# ANN102: Missing type annotation for cls in classmethod
ANN102,
# PIE781: assign-and-return
PIE781,
# PIE798 no-unnecessary-class: Consider using a module for namespacing instead
PIE798,
per-file-ignores =
# TC002: Move third-party import '...' into a type-checking block
__init__.py:TC002,
# ANN201: Missing return type annotation for public function
tests/test_*:ANN201
tests/**/test_*:ANN201
extend-exclude =
# Frozen and not subject to change in this repo:
get-poetry.py,
install-poetry.py,
# External to the project's coding standards:
tests/fixtures/*,
tests/**/fixtures/*,

View File

@@ -0,0 +1,19 @@
[flake8]
max-line-length=120
docstring-convention=all
import-order-style=pycharm
application_import_names=bot,tests
exclude=.cache,.venv,.git,constants.py
extend-ignore=
B311,W503,E226,S311,T000,E731
# Missing Docstrings
D100,D104,D105,D107,
# Docstring Whitespace
D203,D212,D214,D215,
# Docstring Quotes
D301,D302,
# Docstring Content
D400,D401,D402,D404,D405,D406,D407,D408,D409,D410,D411,D412,D413,D414,D416,D417
# Type Annotations
ANN002,ANN003,ANN101,ANN102,ANN204,ANN206,ANN401
per-file-ignores=tests/*:D,ANN

View File

@@ -0,0 +1,6 @@
[flake8]
ignore = E203, E501, W503
per-file-ignores =
requests/__init__.py:E402, F401
requests/compat.py:E402, F401
tests/compat.py:F401

View File

@@ -0,0 +1,34 @@
[project]
name = "flake8-to-ruff"
keywords = ["automation", "flake8", "pycodestyle", "pyflakes", "pylint", "clippy"]
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
]
author = "Charlie Marsh"
author_email = "charlie.r.marsh@gmail.com"
description = "Convert existing Flake8 configuration to Ruff."
requires-python = ">=3.7"
[project.urls]
repository = "https://github.com/astral-sh/ruff#subdirectory=crates/flake8_to_ruff"
[build-system]
requires = ["maturin>=1.0,<2.0"]
build-backend = "maturin"
[tool.maturin]
bindings = "bin"
strip = true

View File

@@ -0,0 +1,13 @@
//! Extract Black configuration settings from a pyproject.toml.
use ruff_linter::line_width::LineLength;
use ruff_linter::settings::types::PythonVersion;
use serde::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
pub(crate) struct Black {
#[serde(alias = "line-length", alias = "line_length")]
pub(crate) line_length: Option<LineLength>,
#[serde(alias = "target-version", alias = "target_version")]
pub(crate) target_version: Option<Vec<PythonVersion>>,
}

View File

@@ -0,0 +1,687 @@
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use itertools::Itertools;
use ruff_linter::line_width::LineLength;
use ruff_linter::registry::Linter;
use ruff_linter::rule_selector::RuleSelector;
use ruff_linter::rules::flake8_pytest_style::types::{
ParametrizeNameType, ParametrizeValuesRowType, ParametrizeValuesType,
};
use ruff_linter::rules::flake8_quotes::settings::Quote;
use ruff_linter::rules::flake8_tidy_imports::settings::Strictness;
use ruff_linter::rules::pydocstyle::settings::Convention;
use ruff_linter::settings::types::PythonVersion;
use ruff_linter::settings::DEFAULT_SELECTORS;
use ruff_linter::warn_user;
use ruff_workspace::options::{
Flake8AnnotationsOptions, Flake8BugbearOptions, Flake8BuiltinsOptions, Flake8ErrMsgOptions,
Flake8PytestStyleOptions, Flake8QuotesOptions, Flake8TidyImportsOptions, LintCommonOptions,
LintOptions, McCabeOptions, Options, Pep8NamingOptions, PydocstyleOptions,
};
use ruff_workspace::pyproject::Pyproject;
use super::external_config::ExternalConfig;
use super::plugin::Plugin;
use super::{parser, plugin};
pub(crate) fn convert(
config: &HashMap<String, HashMap<String, Option<String>>>,
external_config: &ExternalConfig,
plugins: Option<Vec<Plugin>>,
) -> Pyproject {
// Extract the Flake8 section.
let flake8 = config
.get("flake8")
.expect("Unable to find flake8 section in INI file");
// Extract all referenced rule code prefixes, to power plugin inference.
let mut referenced_codes: HashSet<RuleSelector> = HashSet::default();
for (key, value) in flake8 {
if let Some(value) = value {
match key.as_str() {
"select" | "ignore" | "extend-select" | "extend_select" | "extend-ignore"
| "extend_ignore" => {
referenced_codes.extend(parser::parse_prefix_codes(value.as_ref()));
}
"per-file-ignores" | "per_file_ignores" => {
if let Ok(per_file_ignores) =
parser::parse_files_to_codes_mapping(value.as_ref())
{
for (_, codes) in parser::collect_per_file_ignores(per_file_ignores) {
referenced_codes.extend(codes);
}
}
}
_ => {}
}
}
}
// Infer plugins, if not provided.
let plugins = plugins.unwrap_or_else(|| {
let from_options = plugin::infer_plugins_from_options(flake8);
if !from_options.is_empty() {
#[allow(clippy::print_stderr)]
{
eprintln!("Inferred plugins from settings: {from_options:#?}");
}
}
let from_codes = plugin::infer_plugins_from_codes(&referenced_codes);
if !from_codes.is_empty() {
#[allow(clippy::print_stderr)]
{
eprintln!("Inferred plugins from referenced codes: {from_codes:#?}");
}
}
from_options.into_iter().chain(from_codes).collect()
});
// Check if the user has specified a `select`. If not, we'll add our own
// default `select`, and populate it based on user plugins.
let mut select = flake8
.get("select")
.and_then(|value| {
value
.as_ref()
.map(|value| HashSet::from_iter(parser::parse_prefix_codes(value)))
})
.unwrap_or_else(|| resolve_select(&plugins));
let mut ignore: HashSet<RuleSelector> = flake8
.get("ignore")
.and_then(|value| {
value
.as_ref()
.map(|value| HashSet::from_iter(parser::parse_prefix_codes(value)))
})
.unwrap_or_default();
// Parse each supported option.
let mut options = Options::default();
let mut lint_options = LintCommonOptions::default();
let mut flake8_annotations = Flake8AnnotationsOptions::default();
let mut flake8_bugbear = Flake8BugbearOptions::default();
let mut flake8_builtins = Flake8BuiltinsOptions::default();
let mut flake8_errmsg = Flake8ErrMsgOptions::default();
let mut flake8_pytest_style = Flake8PytestStyleOptions::default();
let mut flake8_quotes = Flake8QuotesOptions::default();
let mut flake8_tidy_imports = Flake8TidyImportsOptions::default();
let mut mccabe = McCabeOptions::default();
let mut pep8_naming = Pep8NamingOptions::default();
let mut pydocstyle = PydocstyleOptions::default();
for (key, value) in flake8 {
if let Some(value) = value {
match key.as_str() {
// flake8
"builtins" => {
options.builtins = Some(parser::parse_strings(value.as_ref()));
}
"max-line-length" | "max_line_length" => match LineLength::from_str(value) {
Ok(line_length) => options.line_length = Some(line_length),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
},
"select" => {
// No-op (handled above).
select.extend(parser::parse_prefix_codes(value.as_ref()));
}
"ignore" => {
// No-op (handled above).
}
"extend-select" | "extend_select" => {
// Unlike Flake8, use a single explicit `select`.
select.extend(parser::parse_prefix_codes(value.as_ref()));
}
"extend-ignore" | "extend_ignore" => {
// Unlike Flake8, use a single explicit `ignore`.
ignore.extend(parser::parse_prefix_codes(value.as_ref()));
}
"exclude" => {
options.exclude = Some(parser::parse_strings(value.as_ref()));
}
"extend-exclude" | "extend_exclude" => {
options.extend_exclude = Some(parser::parse_strings(value.as_ref()));
}
"per-file-ignores" | "per_file_ignores" => {
match parser::parse_files_to_codes_mapping(value.as_ref()) {
Ok(per_file_ignores) => {
lint_options.per_file_ignores =
Some(parser::collect_per_file_ignores(per_file_ignores));
}
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
// flake8-bugbear
"extend-immutable-calls" | "extend_immutable_calls" => {
flake8_bugbear.extend_immutable_calls =
Some(parser::parse_strings(value.as_ref()));
}
// flake8-builtins
"builtins-ignorelist" | "builtins_ignorelist" => {
flake8_builtins.builtins_ignorelist =
Some(parser::parse_strings(value.as_ref()));
}
// flake8-annotations
"suppress-none-returning" | "suppress_none_returning" => {
match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_annotations.suppress_none_returning = Some(bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
"suppress-dummy-args" | "suppress_dummy_args" => {
match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_annotations.suppress_dummy_args = Some(bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
"mypy-init-return" | "mypy_init_return" => {
match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_annotations.mypy_init_return = Some(bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
"allow-star-arg-any" | "allow_star_arg_any" => {
match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_annotations.allow_star_arg_any = Some(bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
// flake8-quotes
"quotes" | "inline-quotes" | "inline_quotes" => match value.trim() {
"'" | "single" => flake8_quotes.inline_quotes = Some(Quote::Single),
"\"" | "double" => flake8_quotes.inline_quotes = Some(Quote::Double),
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
},
"multiline-quotes" | "multiline_quotes" => match value.trim() {
"'" | "single" => flake8_quotes.multiline_quotes = Some(Quote::Single),
"\"" | "double" => flake8_quotes.multiline_quotes = Some(Quote::Double),
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
},
"docstring-quotes" | "docstring_quotes" => match value.trim() {
"'" | "single" => flake8_quotes.docstring_quotes = Some(Quote::Single),
"\"" | "double" => flake8_quotes.docstring_quotes = Some(Quote::Double),
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
},
"avoid-escape" | "avoid_escape" => match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_quotes.avoid_escape = Some(bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
},
// pep8-naming
"ignore-names" | "ignore_names" => {
pep8_naming.ignore_names = Some(parser::parse_strings(value.as_ref()));
}
"classmethod-decorators" | "classmethod_decorators" => {
pep8_naming.classmethod_decorators =
Some(parser::parse_strings(value.as_ref()));
}
"staticmethod-decorators" | "staticmethod_decorators" => {
pep8_naming.staticmethod_decorators =
Some(parser::parse_strings(value.as_ref()));
}
// flake8-tidy-imports
"ban-relative-imports" | "ban_relative_imports" => match value.trim() {
"true" => flake8_tidy_imports.ban_relative_imports = Some(Strictness::All),
"parents" => {
flake8_tidy_imports.ban_relative_imports = Some(Strictness::Parents);
}
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
},
// flake8-docstrings
"docstring-convention" => match value.trim() {
"google" => pydocstyle.convention = Some(Convention::Google),
"numpy" => pydocstyle.convention = Some(Convention::Numpy),
"pep257" => pydocstyle.convention = Some(Convention::Pep257),
"all" => pydocstyle.convention = None,
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
},
// mccabe
"max-complexity" | "max_complexity" => match value.parse::<usize>() {
Ok(max_complexity) => mccabe.max_complexity = Some(max_complexity),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
},
// flake8-errmsg
"errmsg-max-string-length" | "errmsg_max_string_length" => {
match value.parse::<usize>() {
Ok(max_string_length) => {
flake8_errmsg.max_string_length = Some(max_string_length);
}
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
// flake8-pytest-style
"pytest-fixture-no-parentheses" | "pytest_fixture_no_parentheses " => {
match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_pytest_style.fixture_parentheses = Some(!bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
"pytest-parametrize-names-type" | "pytest_parametrize_names_type" => {
match value.trim() {
"csv" => {
flake8_pytest_style.parametrize_names_type =
Some(ParametrizeNameType::Csv);
}
"tuple" => {
flake8_pytest_style.parametrize_names_type =
Some(ParametrizeNameType::Tuple);
}
"list" => {
flake8_pytest_style.parametrize_names_type =
Some(ParametrizeNameType::List);
}
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
}
}
"pytest-parametrize-values-type" | "pytest_parametrize_values_type" => {
match value.trim() {
"tuple" => {
flake8_pytest_style.parametrize_values_type =
Some(ParametrizeValuesType::Tuple);
}
"list" => {
flake8_pytest_style.parametrize_values_type =
Some(ParametrizeValuesType::List);
}
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
}
}
"pytest-parametrize-values-row-type" | "pytest_parametrize_values_row_type" => {
match value.trim() {
"tuple" => {
flake8_pytest_style.parametrize_values_row_type =
Some(ParametrizeValuesRowType::Tuple);
}
"list" => {
flake8_pytest_style.parametrize_values_row_type =
Some(ParametrizeValuesRowType::List);
}
_ => {
warn_user!("Unexpected '{key}' value: {value}");
}
}
}
"pytest-raises-require-match-for" | "pytest_raises_require_match_for" => {
flake8_pytest_style.raises_require_match_for =
Some(parser::parse_strings(value.as_ref()));
}
"pytest-mark-no-parentheses" | "pytest_mark_no_parentheses" => {
match parser::parse_bool(value.as_ref()) {
Ok(bool) => flake8_pytest_style.mark_parentheses = Some(!bool),
Err(e) => {
warn_user!("Unable to parse '{key}' property: {e}");
}
}
}
// Unknown
_ => {
warn_user!("Skipping unsupported property: {}", key);
}
}
}
}
// Deduplicate and sort.
lint_options.select = Some(
select
.into_iter()
.sorted_by_key(RuleSelector::prefix_and_code)
.collect(),
);
lint_options.ignore = Some(
ignore
.into_iter()
.sorted_by_key(RuleSelector::prefix_and_code)
.collect(),
);
if flake8_annotations != Flake8AnnotationsOptions::default() {
lint_options.flake8_annotations = Some(flake8_annotations);
}
if flake8_bugbear != Flake8BugbearOptions::default() {
lint_options.flake8_bugbear = Some(flake8_bugbear);
}
if flake8_builtins != Flake8BuiltinsOptions::default() {
lint_options.flake8_builtins = Some(flake8_builtins);
}
if flake8_errmsg != Flake8ErrMsgOptions::default() {
lint_options.flake8_errmsg = Some(flake8_errmsg);
}
if flake8_pytest_style != Flake8PytestStyleOptions::default() {
lint_options.flake8_pytest_style = Some(flake8_pytest_style);
}
if flake8_quotes != Flake8QuotesOptions::default() {
lint_options.flake8_quotes = Some(flake8_quotes);
}
if flake8_tidy_imports != Flake8TidyImportsOptions::default() {
lint_options.flake8_tidy_imports = Some(flake8_tidy_imports);
}
if mccabe != McCabeOptions::default() {
lint_options.mccabe = Some(mccabe);
}
if pep8_naming != Pep8NamingOptions::default() {
lint_options.pep8_naming = Some(pep8_naming);
}
if pydocstyle != PydocstyleOptions::default() {
lint_options.pydocstyle = Some(pydocstyle);
}
// Extract any settings from the existing `pyproject.toml`.
if let Some(black) = &external_config.black {
if let Some(line_length) = &black.line_length {
options.line_length = Some(*line_length);
}
if let Some(target_version) = &black.target_version {
if let Some(target_version) = target_version.iter().min() {
options.target_version = Some(*target_version);
}
}
}
if let Some(isort) = &external_config.isort {
if let Some(src_paths) = &isort.src_paths {
match options.src.as_mut() {
Some(src) => {
src.extend_from_slice(src_paths);
}
None => {
options.src = Some(src_paths.clone());
}
}
}
}
if let Some(project) = &external_config.project {
if let Some(requires_python) = &project.requires_python {
if options.target_version.is_none() {
options.target_version =
PythonVersion::get_minimum_supported_version(requires_python);
}
}
}
if lint_options != LintCommonOptions::default() {
options.lint = Some(LintOptions {
common: lint_options,
..LintOptions::default()
});
}
// Create the pyproject.toml.
Pyproject::new(options)
}
/// Resolve the set of enabled `RuleSelector` values for the given
/// plugins.
fn resolve_select(plugins: &[Plugin]) -> HashSet<RuleSelector> {
let mut select: HashSet<_> = DEFAULT_SELECTORS.iter().cloned().collect();
select.extend(plugins.iter().map(|p| Linter::from(p).into()));
select
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::str::FromStr;
use anyhow::Result;
use itertools::Itertools;
use pep440_rs::VersionSpecifiers;
use pretty_assertions::assert_eq;
use ruff_linter::line_width::LineLength;
use ruff_linter::registry::Linter;
use ruff_linter::rule_selector::RuleSelector;
use ruff_linter::rules::flake8_quotes;
use ruff_linter::rules::pydocstyle::settings::Convention;
use ruff_linter::settings::types::PythonVersion;
use ruff_workspace::options::{
Flake8QuotesOptions, LintCommonOptions, LintOptions, Options, PydocstyleOptions,
};
use ruff_workspace::pyproject::Pyproject;
use crate::converter::DEFAULT_SELECTORS;
use crate::pep621::Project;
use crate::ExternalConfig;
use super::super::plugin::Plugin;
use super::convert;
fn lint_default_options(plugins: impl IntoIterator<Item = RuleSelector>) -> LintCommonOptions {
LintCommonOptions {
ignore: Some(vec![]),
select: Some(
DEFAULT_SELECTORS
.iter()
.cloned()
.chain(plugins)
.sorted_by_key(RuleSelector::prefix_and_code)
.collect(),
),
..LintCommonOptions::default()
}
}
#[test]
fn it_converts_empty() {
let actual = convert(
&HashMap::from([("flake8".to_string(), HashMap::default())]),
&ExternalConfig::default(),
None,
);
let expected = Pyproject::new(Options {
lint: Some(LintOptions {
common: lint_default_options([]),
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_converts_dashes() {
let actual = convert(
&HashMap::from([(
"flake8".to_string(),
HashMap::from([("max-line-length".to_string(), Some("100".to_string()))]),
)]),
&ExternalConfig::default(),
Some(vec![]),
);
let expected = Pyproject::new(Options {
line_length: Some(LineLength::try_from(100).unwrap()),
lint: Some(LintOptions {
common: lint_default_options([]),
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_converts_underscores() {
let actual = convert(
&HashMap::from([(
"flake8".to_string(),
HashMap::from([("max_line_length".to_string(), Some("100".to_string()))]),
)]),
&ExternalConfig::default(),
Some(vec![]),
);
let expected = Pyproject::new(Options {
line_length: Some(LineLength::try_from(100).unwrap()),
lint: Some(LintOptions {
common: lint_default_options([]),
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_ignores_parse_errors() {
let actual = convert(
&HashMap::from([(
"flake8".to_string(),
HashMap::from([("max_line_length".to_string(), Some("abc".to_string()))]),
)]),
&ExternalConfig::default(),
Some(vec![]),
);
let expected = Pyproject::new(Options {
lint: Some(LintOptions {
common: lint_default_options([]),
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_converts_plugin_options() {
let actual = convert(
&HashMap::from([(
"flake8".to_string(),
HashMap::from([("inline-quotes".to_string(), Some("single".to_string()))]),
)]),
&ExternalConfig::default(),
Some(vec![]),
);
let expected = Pyproject::new(Options {
lint: Some(LintOptions {
common: LintCommonOptions {
flake8_quotes: Some(Flake8QuotesOptions {
inline_quotes: Some(flake8_quotes::settings::Quote::Single),
multiline_quotes: None,
docstring_quotes: None,
avoid_escape: None,
}),
..lint_default_options([])
},
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_converts_docstring_conventions() {
let actual = convert(
&HashMap::from([(
"flake8".to_string(),
HashMap::from([(
"docstring-convention".to_string(),
Some("numpy".to_string()),
)]),
)]),
&ExternalConfig::default(),
Some(vec![Plugin::Flake8Docstrings]),
);
let expected = Pyproject::new(Options {
lint: Some(LintOptions {
common: LintCommonOptions {
pydocstyle: Some(PydocstyleOptions {
convention: Some(Convention::Numpy),
ignore_decorators: None,
property_decorators: None,
}),
..lint_default_options([Linter::Pydocstyle.into()])
},
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_infers_plugins_if_omitted() {
let actual = convert(
&HashMap::from([(
"flake8".to_string(),
HashMap::from([("inline-quotes".to_string(), Some("single".to_string()))]),
)]),
&ExternalConfig::default(),
None,
);
let expected = Pyproject::new(Options {
lint: Some(LintOptions {
common: LintCommonOptions {
flake8_quotes: Some(Flake8QuotesOptions {
inline_quotes: Some(flake8_quotes::settings::Quote::Single),
multiline_quotes: None,
docstring_quotes: None,
avoid_escape: None,
}),
..lint_default_options([Linter::Flake8Quotes.into()])
},
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
}
#[test]
fn it_converts_project_requires_python() -> Result<()> {
let actual = convert(
&HashMap::from([("flake8".to_string(), HashMap::default())]),
&ExternalConfig {
project: Some(&Project {
requires_python: Some(VersionSpecifiers::from_str(">=3.8.16, <3.11")?),
}),
..ExternalConfig::default()
},
Some(vec![]),
);
let expected = Pyproject::new(Options {
target_version: Some(PythonVersion::Py38),
lint: Some(LintOptions {
common: lint_default_options([]),
..LintOptions::default()
}),
..Options::default()
});
assert_eq!(actual, expected);
Ok(())
}
}

View File

@@ -0,0 +1,10 @@
use super::black::Black;
use super::isort::Isort;
use super::pep621::Project;
#[derive(Default)]
pub(crate) struct ExternalConfig<'a> {
pub(crate) black: Option<&'a Black>,
pub(crate) isort: Option<&'a Isort>,
pub(crate) project: Option<&'a Project>,
}

View File

@@ -0,0 +1,10 @@
//! Extract isort configuration settings from a pyproject.toml.
use serde::{Deserialize, Serialize};
/// The [isort configuration](https://pycqa.github.io/isort/docs/configuration/config_files.html).
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
pub(crate) struct Isort {
#[serde(alias = "src-paths", alias = "src_paths")]
pub(crate) src_paths: Option<Vec<String>>,
}

View File

@@ -0,0 +1,80 @@
//! Utility to generate Ruff's `pyproject.toml` section from a Flake8 INI file.
mod black;
mod converter;
mod external_config;
mod isort;
mod parser;
mod pep621;
mod plugin;
mod pyproject;
use std::path::PathBuf;
use anyhow::Result;
use clap::Parser;
use configparser::ini::Ini;
use crate::converter::convert;
use crate::external_config::ExternalConfig;
use crate::plugin::Plugin;
use crate::pyproject::parse;
use ruff_linter::logging::{set_up_logging, LogLevel};
#[derive(Parser)]
#[command(
about = "Convert existing Flake8 configuration to Ruff.",
long_about = None
)]
struct Args {
/// Path to the Flake8 configuration file (e.g., `setup.cfg`, `tox.ini`, or
/// `.flake8`).
#[arg(required = true)]
file: PathBuf,
/// Optional path to a `pyproject.toml` file, used to ensure compatibility
/// with Black.
#[arg(long)]
pyproject: Option<PathBuf>,
/// List of plugins to enable.
#[arg(long, value_delimiter = ',')]
plugin: Option<Vec<Plugin>>,
}
fn main() -> Result<()> {
set_up_logging(&LogLevel::Default)?;
let args = Args::parse();
// Read the INI file.
let mut ini = Ini::new_cs();
ini.set_multiline(true);
let config = ini.load(args.file).map_err(|msg| anyhow::anyhow!(msg))?;
// Read the pyproject.toml file.
let pyproject = args.pyproject.map(parse).transpose()?;
let external_config = pyproject
.as_ref()
.and_then(|pyproject| pyproject.tool.as_ref())
.map(|tool| ExternalConfig {
black: tool.black.as_ref(),
isort: tool.isort.as_ref(),
..Default::default()
})
.unwrap_or_default();
let external_config = ExternalConfig {
project: pyproject
.as_ref()
.and_then(|pyproject| pyproject.project.as_ref()),
..external_config
};
// Create Ruff's pyproject.toml section.
let pyproject = convert(&config, &external_config, args.plugin);
#[allow(clippy::print_stdout)]
{
println!("{}", toml::to_string_pretty(&pyproject)?);
}
Ok(())
}

View File

@@ -0,0 +1,391 @@
use std::str::FromStr;
use anyhow::{bail, Result};
use once_cell::sync::Lazy;
use regex::Regex;
use rustc_hash::FxHashMap;
use ruff_linter::settings::types::PatternPrefixPair;
use ruff_linter::{warn_user, RuleSelector};
static COMMA_SEPARATED_LIST_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"[,\s]").unwrap());
/// Parse a comma-separated list of `RuleSelector` values (e.g.,
/// "F401,E501").
pub(crate) fn parse_prefix_codes(value: &str) -> Vec<RuleSelector> {
let mut codes: Vec<RuleSelector> = vec![];
for code in COMMA_SEPARATED_LIST_RE.split(value) {
let code = code.trim();
if code.is_empty() {
continue;
}
if let Ok(code) = RuleSelector::from_str(code) {
codes.push(code);
} else {
warn_user!("Unsupported prefix code: {code}");
}
}
codes
}
/// Parse a comma-separated list of strings (e.g., "__init__.py,__main__.py").
pub(crate) fn parse_strings(value: &str) -> Vec<String> {
COMMA_SEPARATED_LIST_RE
.split(value)
.map(str::trim)
.filter(|part| !part.is_empty())
.map(String::from)
.collect()
}
/// Parse a boolean.
pub(crate) fn parse_bool(value: &str) -> Result<bool> {
match value.trim() {
"true" => Ok(true),
"false" => Ok(false),
_ => bail!("Unexpected boolean value: {value}"),
}
}
#[derive(Debug)]
struct Token {
token_name: TokenType,
src: String,
}
#[derive(Debug, Copy, Clone)]
enum TokenType {
Code,
File,
Colon,
Comma,
Ws,
Eof,
}
struct State {
seen_sep: bool,
seen_colon: bool,
filenames: Vec<String>,
codes: Vec<String>,
}
impl State {
const fn new() -> Self {
Self {
seen_sep: true,
seen_colon: false,
filenames: vec![],
codes: vec![],
}
}
/// Generate the list of `StrRuleCodePair` pairs for the current
/// state.
fn parse(&self) -> Vec<PatternPrefixPair> {
let mut codes: Vec<PatternPrefixPair> = vec![];
for code in &self.codes {
if let Ok(code) = RuleSelector::from_str(code) {
for filename in &self.filenames {
codes.push(PatternPrefixPair {
pattern: filename.clone(),
prefix: code.clone(),
});
}
} else {
warn_user!("Unsupported prefix code: {code}");
}
}
codes
}
}
/// Tokenize the raw 'files-to-codes' mapping.
fn tokenize_files_to_codes_mapping(value: &str) -> Vec<Token> {
let mut tokens = vec![];
let mut i = 0;
while i < value.len() {
for (token_re, token_name) in [
(
Regex::new(r"([A-Z]+[0-9]*)(?:$|\s|,)").unwrap(),
TokenType::Code,
),
(Regex::new(r"([^\s:,]+)").unwrap(), TokenType::File),
(Regex::new(r"(\s*:\s*)").unwrap(), TokenType::Colon),
(Regex::new(r"(\s*,\s*)").unwrap(), TokenType::Comma),
(Regex::new(r"(\s+)").unwrap(), TokenType::Ws),
] {
if let Some(cap) = token_re.captures(&value[i..]) {
let mat = cap.get(1).unwrap();
if mat.start() == 0 {
tokens.push(Token {
token_name,
src: mat.as_str().trim().to_string(),
});
i += mat.end();
break;
}
}
}
}
tokens.push(Token {
token_name: TokenType::Eof,
src: String::new(),
});
tokens
}
/// Parse a 'files-to-codes' mapping, mimicking Flake8's internal logic.
/// See: <https://github.com/PyCQA/flake8/blob/7dfe99616fc2f07c0017df2ba5fa884158f3ea8a/src/flake8/utils.py#L45>
pub(crate) fn parse_files_to_codes_mapping(value: &str) -> Result<Vec<PatternPrefixPair>> {
if value.trim().is_empty() {
return Ok(vec![]);
}
let mut codes: Vec<PatternPrefixPair> = vec![];
let mut state = State::new();
for token in tokenize_files_to_codes_mapping(value) {
if matches!(token.token_name, TokenType::Comma | TokenType::Ws) {
state.seen_sep = true;
} else if !state.seen_colon {
if matches!(token.token_name, TokenType::Colon) {
state.seen_colon = true;
state.seen_sep = true;
} else if state.seen_sep && matches!(token.token_name, TokenType::File) {
state.filenames.push(token.src);
state.seen_sep = false;
} else {
bail!("Unexpected token: {:?}", token.token_name);
}
} else {
if matches!(token.token_name, TokenType::Eof) {
codes.extend(state.parse());
state = State::new();
} else if state.seen_sep && matches!(token.token_name, TokenType::Code) {
state.codes.push(token.src);
state.seen_sep = false;
} else if state.seen_sep && matches!(token.token_name, TokenType::File) {
codes.extend(state.parse());
state = State::new();
state.filenames.push(token.src);
state.seen_sep = false;
} else {
bail!("Unexpected token: {:?}", token.token_name);
}
}
}
Ok(codes)
}
/// Collect a list of `PatternPrefixPair` structs as a `BTreeMap`.
pub(crate) fn collect_per_file_ignores(
pairs: Vec<PatternPrefixPair>,
) -> FxHashMap<String, Vec<RuleSelector>> {
let mut per_file_ignores: FxHashMap<String, Vec<RuleSelector>> = FxHashMap::default();
for pair in pairs {
per_file_ignores
.entry(pair.pattern)
.or_default()
.push(pair.prefix);
}
per_file_ignores
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_linter::codes;
use ruff_linter::registry::Linter;
use ruff_linter::settings::types::PatternPrefixPair;
use ruff_linter::RuleSelector;
use super::{parse_files_to_codes_mapping, parse_prefix_codes, parse_strings};
#[test]
fn it_parses_prefix_codes() {
let actual = parse_prefix_codes("");
let expected: Vec<RuleSelector> = vec![];
assert_eq!(actual, expected);
let actual = parse_prefix_codes(" ");
let expected: Vec<RuleSelector> = vec![];
assert_eq!(actual, expected);
let actual = parse_prefix_codes("F401");
let expected = vec![codes::Pyflakes::_401.into()];
assert_eq!(actual, expected);
let actual = parse_prefix_codes("F401,");
let expected = vec![codes::Pyflakes::_401.into()];
assert_eq!(actual, expected);
let actual = parse_prefix_codes("F401,E501");
let expected = vec![
codes::Pyflakes::_401.into(),
codes::Pycodestyle::E501.into(),
];
assert_eq!(actual, expected);
let actual = parse_prefix_codes("F401, E501");
let expected = vec![
codes::Pyflakes::_401.into(),
codes::Pycodestyle::E501.into(),
];
assert_eq!(actual, expected);
}
#[test]
fn it_parses_strings() {
let actual = parse_strings("");
let expected: Vec<String> = vec![];
assert_eq!(actual, expected);
let actual = parse_strings(" ");
let expected: Vec<String> = vec![];
assert_eq!(actual, expected);
let actual = parse_strings("__init__.py");
let expected = vec!["__init__.py".to_string()];
assert_eq!(actual, expected);
let actual = parse_strings("__init__.py,");
let expected = vec!["__init__.py".to_string()];
assert_eq!(actual, expected);
let actual = parse_strings("__init__.py,__main__.py");
let expected = vec!["__init__.py".to_string(), "__main__.py".to_string()];
assert_eq!(actual, expected);
let actual = parse_strings("__init__.py, __main__.py");
let expected = vec!["__init__.py".to_string(), "__main__.py".to_string()];
assert_eq!(actual, expected);
}
#[test]
fn it_parse_files_to_codes_mapping() -> Result<()> {
let actual = parse_files_to_codes_mapping("")?;
let expected: Vec<PatternPrefixPair> = vec![];
assert_eq!(actual, expected);
let actual = parse_files_to_codes_mapping(" ")?;
let expected: Vec<PatternPrefixPair> = vec![];
assert_eq!(actual, expected);
// Ex) locust
let actual = parse_files_to_codes_mapping(
"per-file-ignores =
locust/test/*: F841
examples/*: F841
*.pyi: E302,E704"
.strip_prefix("per-file-ignores =")
.unwrap(),
)?;
let expected: Vec<PatternPrefixPair> = vec![
PatternPrefixPair {
pattern: "locust/test/*".to_string(),
prefix: codes::Pyflakes::_841.into(),
},
PatternPrefixPair {
pattern: "examples/*".to_string(),
prefix: codes::Pyflakes::_841.into(),
},
];
assert_eq!(actual, expected);
// Ex) celery
let actual = parse_files_to_codes_mapping(
"per-file-ignores =
t/*,setup.py,examples/*,docs/*,extra/*:
D,"
.strip_prefix("per-file-ignores =")
.unwrap(),
)?;
let expected: Vec<PatternPrefixPair> = vec![
PatternPrefixPair {
pattern: "t/*".to_string(),
prefix: Linter::Pydocstyle.into(),
},
PatternPrefixPair {
pattern: "setup.py".to_string(),
prefix: Linter::Pydocstyle.into(),
},
PatternPrefixPair {
pattern: "examples/*".to_string(),
prefix: Linter::Pydocstyle.into(),
},
PatternPrefixPair {
pattern: "docs/*".to_string(),
prefix: Linter::Pydocstyle.into(),
},
PatternPrefixPair {
pattern: "extra/*".to_string(),
prefix: Linter::Pydocstyle.into(),
},
];
assert_eq!(actual, expected);
// Ex) scrapy
let actual = parse_files_to_codes_mapping(
"per-file-ignores =
scrapy/__init__.py:E402
scrapy/core/downloader/handlers/http.py:F401
scrapy/http/__init__.py:F401
scrapy/linkextractors/__init__.py:E402,F401
scrapy/selector/__init__.py:F401
scrapy/spiders/__init__.py:E402,F401
scrapy/utils/url.py:F403,F405
tests/test_loader.py:E741"
.strip_prefix("per-file-ignores =")
.unwrap(),
)?;
let expected: Vec<PatternPrefixPair> = vec![
PatternPrefixPair {
pattern: "scrapy/__init__.py".to_string(),
prefix: codes::Pycodestyle::E402.into(),
},
PatternPrefixPair {
pattern: "scrapy/core/downloader/handlers/http.py".to_string(),
prefix: codes::Pyflakes::_401.into(),
},
PatternPrefixPair {
pattern: "scrapy/http/__init__.py".to_string(),
prefix: codes::Pyflakes::_401.into(),
},
PatternPrefixPair {
pattern: "scrapy/linkextractors/__init__.py".to_string(),
prefix: codes::Pycodestyle::E402.into(),
},
PatternPrefixPair {
pattern: "scrapy/linkextractors/__init__.py".to_string(),
prefix: codes::Pyflakes::_401.into(),
},
PatternPrefixPair {
pattern: "scrapy/selector/__init__.py".to_string(),
prefix: codes::Pyflakes::_401.into(),
},
PatternPrefixPair {
pattern: "scrapy/spiders/__init__.py".to_string(),
prefix: codes::Pycodestyle::E402.into(),
},
PatternPrefixPair {
pattern: "scrapy/spiders/__init__.py".to_string(),
prefix: codes::Pyflakes::_401.into(),
},
PatternPrefixPair {
pattern: "scrapy/utils/url.py".to_string(),
prefix: codes::Pyflakes::_403.into(),
},
PatternPrefixPair {
pattern: "scrapy/utils/url.py".to_string(),
prefix: codes::Pyflakes::_405.into(),
},
PatternPrefixPair {
pattern: "tests/test_loader.py".to_string(),
prefix: codes::Pycodestyle::E741.into(),
},
];
assert_eq!(actual, expected);
Ok(())
}
}

View File

@@ -0,0 +1,10 @@
//! Extract PEP 621 configuration settings from a pyproject.toml.
use pep440_rs::VersionSpecifiers;
use serde::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
pub(crate) struct Project {
#[serde(alias = "requires-python", alias = "requires_python")]
pub(crate) requires_python: Option<VersionSpecifiers>,
}

View File

@@ -0,0 +1,368 @@
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fmt;
use std::str::FromStr;
use anyhow::anyhow;
use ruff_linter::registry::Linter;
use ruff_linter::rule_selector::PreviewOptions;
use ruff_linter::RuleSelector;
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub enum Plugin {
Flake82020,
Flake8Annotations,
Flake8Bandit,
Flake8BlindExcept,
Flake8BooleanTrap,
Flake8Bugbear,
Flake8Builtins,
Flake8Commas,
Flake8Comprehensions,
Flake8Datetimez,
Flake8Debugger,
Flake8Docstrings,
Flake8Eradicate,
Flake8ErrMsg,
Flake8Executable,
Flake8ImplicitStrConcat,
Flake8ImportConventions,
Flake8NoPep420,
Flake8Pie,
Flake8Print,
Flake8PytestStyle,
Flake8Quotes,
Flake8Return,
Flake8Simplify,
Flake8TidyImports,
Flake8TypeChecking,
Flake8UnusedArguments,
Flake8UsePathlib,
McCabe,
PEP8Naming,
PandasVet,
Pyupgrade,
Tryceratops,
}
impl FromStr for Plugin {
type Err = anyhow::Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
match string {
"flake8-2020" => Ok(Plugin::Flake82020),
"flake8-annotations" => Ok(Plugin::Flake8Annotations),
"flake8-bandit" => Ok(Plugin::Flake8Bandit),
"flake8-blind-except" => Ok(Plugin::Flake8BlindExcept),
"flake8-boolean-trap" => Ok(Plugin::Flake8BooleanTrap),
"flake8-bugbear" => Ok(Plugin::Flake8Bugbear),
"flake8-builtins" => Ok(Plugin::Flake8Builtins),
"flake8-commas" => Ok(Plugin::Flake8Commas),
"flake8-comprehensions" => Ok(Plugin::Flake8Comprehensions),
"flake8-datetimez" => Ok(Plugin::Flake8Datetimez),
"flake8-debugger" => Ok(Plugin::Flake8Debugger),
"flake8-docstrings" => Ok(Plugin::Flake8Docstrings),
"flake8-eradicate" => Ok(Plugin::Flake8Eradicate),
"flake8-errmsg" => Ok(Plugin::Flake8ErrMsg),
"flake8-executable" => Ok(Plugin::Flake8Executable),
"flake8-implicit-str-concat" => Ok(Plugin::Flake8ImplicitStrConcat),
"flake8-import-conventions" => Ok(Plugin::Flake8ImportConventions),
"flake8-no-pep420" => Ok(Plugin::Flake8NoPep420),
"flake8-pie" => Ok(Plugin::Flake8Pie),
"flake8-print" => Ok(Plugin::Flake8Print),
"flake8-pytest-style" => Ok(Plugin::Flake8PytestStyle),
"flake8-quotes" => Ok(Plugin::Flake8Quotes),
"flake8-return" => Ok(Plugin::Flake8Return),
"flake8-simplify" => Ok(Plugin::Flake8Simplify),
"flake8-tidy-imports" => Ok(Plugin::Flake8TidyImports),
"flake8-type-checking" => Ok(Plugin::Flake8TypeChecking),
"flake8-unused-arguments" => Ok(Plugin::Flake8UnusedArguments),
"flake8-use-pathlib" => Ok(Plugin::Flake8UsePathlib),
"mccabe" => Ok(Plugin::McCabe),
"pep8-naming" => Ok(Plugin::PEP8Naming),
"pandas-vet" => Ok(Plugin::PandasVet),
"pyupgrade" => Ok(Plugin::Pyupgrade),
"tryceratops" => Ok(Plugin::Tryceratops),
_ => Err(anyhow!("Unknown plugin: {string}")),
}
}
}
impl fmt::Debug for Plugin {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match self {
Plugin::Flake82020 => "flake8-2020",
Plugin::Flake8Annotations => "flake8-annotations",
Plugin::Flake8Bandit => "flake8-bandit",
Plugin::Flake8BlindExcept => "flake8-blind-except",
Plugin::Flake8BooleanTrap => "flake8-boolean-trap",
Plugin::Flake8Bugbear => "flake8-bugbear",
Plugin::Flake8Builtins => "flake8-builtins",
Plugin::Flake8Commas => "flake8-commas",
Plugin::Flake8Comprehensions => "flake8-comprehensions",
Plugin::Flake8Datetimez => "flake8-datetimez",
Plugin::Flake8Debugger => "flake8-debugger",
Plugin::Flake8Docstrings => "flake8-docstrings",
Plugin::Flake8Eradicate => "flake8-eradicate",
Plugin::Flake8ErrMsg => "flake8-errmsg",
Plugin::Flake8Executable => "flake8-executable",
Plugin::Flake8ImplicitStrConcat => "flake8-implicit-str-concat",
Plugin::Flake8ImportConventions => "flake8-import-conventions",
Plugin::Flake8NoPep420 => "flake8-no-pep420",
Plugin::Flake8Pie => "flake8-pie",
Plugin::Flake8Print => "flake8-print",
Plugin::Flake8PytestStyle => "flake8-pytest-style",
Plugin::Flake8Quotes => "flake8-quotes",
Plugin::Flake8Return => "flake8-return",
Plugin::Flake8Simplify => "flake8-simplify",
Plugin::Flake8TidyImports => "flake8-tidy-imports",
Plugin::Flake8TypeChecking => "flake8-type-checking",
Plugin::Flake8UnusedArguments => "flake8-unused-arguments",
Plugin::Flake8UsePathlib => "flake8-use-pathlib",
Plugin::McCabe => "mccabe",
Plugin::PEP8Naming => "pep8-naming",
Plugin::PandasVet => "pandas-vet",
Plugin::Pyupgrade => "pyupgrade",
Plugin::Tryceratops => "tryceratops",
}
)
}
}
impl From<&Plugin> for Linter {
fn from(plugin: &Plugin) -> Self {
match plugin {
Plugin::Flake82020 => Linter::Flake82020,
Plugin::Flake8Annotations => Linter::Flake8Annotations,
Plugin::Flake8Bandit => Linter::Flake8Bandit,
Plugin::Flake8BlindExcept => Linter::Flake8BlindExcept,
Plugin::Flake8BooleanTrap => Linter::Flake8BooleanTrap,
Plugin::Flake8Bugbear => Linter::Flake8Bugbear,
Plugin::Flake8Builtins => Linter::Flake8Builtins,
Plugin::Flake8Commas => Linter::Flake8Commas,
Plugin::Flake8Comprehensions => Linter::Flake8Comprehensions,
Plugin::Flake8Datetimez => Linter::Flake8Datetimez,
Plugin::Flake8Debugger => Linter::Flake8Debugger,
Plugin::Flake8Docstrings => Linter::Pydocstyle,
Plugin::Flake8Eradicate => Linter::Eradicate,
Plugin::Flake8ErrMsg => Linter::Flake8ErrMsg,
Plugin::Flake8Executable => Linter::Flake8Executable,
Plugin::Flake8ImplicitStrConcat => Linter::Flake8ImplicitStrConcat,
Plugin::Flake8ImportConventions => Linter::Flake8ImportConventions,
Plugin::Flake8NoPep420 => Linter::Flake8NoPep420,
Plugin::Flake8Pie => Linter::Flake8Pie,
Plugin::Flake8Print => Linter::Flake8Print,
Plugin::Flake8PytestStyle => Linter::Flake8PytestStyle,
Plugin::Flake8Quotes => Linter::Flake8Quotes,
Plugin::Flake8Return => Linter::Flake8Return,
Plugin::Flake8Simplify => Linter::Flake8Simplify,
Plugin::Flake8TidyImports => Linter::Flake8TidyImports,
Plugin::Flake8TypeChecking => Linter::Flake8TypeChecking,
Plugin::Flake8UnusedArguments => Linter::Flake8UnusedArguments,
Plugin::Flake8UsePathlib => Linter::Flake8UsePathlib,
Plugin::McCabe => Linter::McCabe,
Plugin::PEP8Naming => Linter::PEP8Naming,
Plugin::PandasVet => Linter::PandasVet,
Plugin::Pyupgrade => Linter::Pyupgrade,
Plugin::Tryceratops => Linter::Tryceratops,
}
}
}
/// Infer the enabled plugins based on user-provided options.
///
/// For example, if the user specified a `mypy-init-return` setting, we should
/// infer that `flake8-annotations` is active.
pub(crate) fn infer_plugins_from_options(flake8: &HashMap<String, Option<String>>) -> Vec<Plugin> {
let mut plugins = BTreeSet::new();
for key in flake8.keys() {
match key.as_str() {
// flake8-annotations
"suppress-none-returning" | "suppress_none_returning" => {
plugins.insert(Plugin::Flake8Annotations);
}
"suppress-dummy-args" | "suppress_dummy_args" => {
plugins.insert(Plugin::Flake8Annotations);
}
"allow-untyped-defs" | "allow_untyped_defs" => {
plugins.insert(Plugin::Flake8Annotations);
}
"allow-untyped-nested" | "allow_untyped_nested" => {
plugins.insert(Plugin::Flake8Annotations);
}
"mypy-init-return" | "mypy_init_return" => {
plugins.insert(Plugin::Flake8Annotations);
}
"dispatch-decorators" | "dispatch_decorators" => {
plugins.insert(Plugin::Flake8Annotations);
}
"overload-decorators" | "overload_decorators" => {
plugins.insert(Plugin::Flake8Annotations);
}
"allow-star-arg-any" | "allow_star_arg_any" => {
plugins.insert(Plugin::Flake8Annotations);
}
// flake8-bugbear
"extend-immutable-calls" | "extend_immutable_calls" => {
plugins.insert(Plugin::Flake8Bugbear);
}
// flake8-builtins
"builtins-ignorelist" | "builtins_ignorelist" => {
plugins.insert(Plugin::Flake8Builtins);
}
// flake8-docstrings
"docstring-convention" | "docstring_convention" => {
plugins.insert(Plugin::Flake8Docstrings);
}
// flake8-eradicate
"eradicate-aggressive" | "eradicate_aggressive" => {
plugins.insert(Plugin::Flake8Eradicate);
}
"eradicate-whitelist" | "eradicate_whitelist" => {
plugins.insert(Plugin::Flake8Eradicate);
}
"eradicate-whitelist-extend" | "eradicate_whitelist_extend" => {
plugins.insert(Plugin::Flake8Eradicate);
}
// flake8-pytest-style
"pytest-fixture-no-parentheses" | "pytest_fixture_no_parentheses " => {
plugins.insert(Plugin::Flake8PytestStyle);
}
"pytest-parametrize-names-type" | "pytest_parametrize_names_type" => {
plugins.insert(Plugin::Flake8PytestStyle);
}
"pytest-parametrize-values-type" | "pytest_parametrize_values_type" => {
plugins.insert(Plugin::Flake8PytestStyle);
}
"pytest-parametrize-values-row-type" | "pytest_parametrize_values_row_type" => {
plugins.insert(Plugin::Flake8PytestStyle);
}
"pytest-raises-require-match-for" | "pytest_raises_require_match_for" => {
plugins.insert(Plugin::Flake8PytestStyle);
}
"pytest-mark-no-parentheses" | "pytest_mark_no_parentheses" => {
plugins.insert(Plugin::Flake8PytestStyle);
}
// flake8-quotes
"quotes" | "inline-quotes" | "inline_quotes" => {
plugins.insert(Plugin::Flake8Quotes);
}
"multiline-quotes" | "multiline_quotes" => {
plugins.insert(Plugin::Flake8Quotes);
}
"docstring-quotes" | "docstring_quotes" => {
plugins.insert(Plugin::Flake8Quotes);
}
"avoid-escape" | "avoid_escape" => {
plugins.insert(Plugin::Flake8Quotes);
}
// flake8-tidy-imports
"ban-relative-imports" | "ban_relative_imports" => {
plugins.insert(Plugin::Flake8TidyImports);
}
"banned-modules" | "banned_modules" => {
plugins.insert(Plugin::Flake8TidyImports);
}
// mccabe
"max-complexity" | "max_complexity" => {
plugins.insert(Plugin::McCabe);
}
// pep8-naming
"ignore-names" | "ignore_names" => {
plugins.insert(Plugin::PEP8Naming);
}
"classmethod-decorators" | "classmethod_decorators" => {
plugins.insert(Plugin::PEP8Naming);
}
"staticmethod-decorators" | "staticmethod_decorators" => {
plugins.insert(Plugin::PEP8Naming);
}
"max-string-length" | "max_string_length" => {
plugins.insert(Plugin::Flake8ErrMsg);
}
_ => {}
}
}
Vec::from_iter(plugins)
}
/// Infer the enabled plugins based on the referenced prefixes.
///
/// For example, if the user ignores `ANN101`, we should infer that
/// `flake8-annotations` is active.
pub(crate) fn infer_plugins_from_codes(selectors: &HashSet<RuleSelector>) -> Vec<Plugin> {
// Ignore cases in which we've knowingly changed rule prefixes.
[
Plugin::Flake82020,
Plugin::Flake8Annotations,
Plugin::Flake8Bandit,
// Plugin::Flake8BlindExcept,
Plugin::Flake8BooleanTrap,
Plugin::Flake8Bugbear,
Plugin::Flake8Builtins,
// Plugin::Flake8Commas,
Plugin::Flake8Comprehensions,
Plugin::Flake8Datetimez,
Plugin::Flake8Debugger,
Plugin::Flake8Docstrings,
// Plugin::Flake8Eradicate,
Plugin::Flake8ErrMsg,
Plugin::Flake8Executable,
Plugin::Flake8ImplicitStrConcat,
// Plugin::Flake8ImportConventions,
Plugin::Flake8NoPep420,
Plugin::Flake8Pie,
Plugin::Flake8Print,
Plugin::Flake8PytestStyle,
Plugin::Flake8Quotes,
Plugin::Flake8Return,
Plugin::Flake8Simplify,
// Plugin::Flake8TidyImports,
// Plugin::Flake8TypeChecking,
Plugin::Flake8UnusedArguments,
// Plugin::Flake8UsePathlib,
Plugin::McCabe,
Plugin::PEP8Naming,
Plugin::PandasVet,
Plugin::Tryceratops,
]
.into_iter()
.filter(|plugin| {
for selector in selectors {
if selector
.rules(&PreviewOptions::default())
.any(|rule| Linter::from(plugin).rules().any(|r| r == rule))
{
return true;
}
}
false
})
.collect()
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::{infer_plugins_from_options, Plugin};
#[test]
fn it_infers_plugins() {
let actual = infer_plugins_from_options(&HashMap::from([(
"inline-quotes".to_string(),
Some("single".to_string()),
)]));
let expected = vec![Plugin::Flake8Quotes];
assert_eq!(actual, expected);
let actual = infer_plugins_from_options(&HashMap::from([(
"staticmethod-decorators".to_string(),
Some("[]".to_string()),
)]));
let expected = vec![Plugin::PEP8Naming];
assert_eq!(actual, expected);
}
}

View File

@@ -0,0 +1,26 @@
use std::path::Path;
use anyhow::Result;
use serde::{Deserialize, Serialize};
use super::black::Black;
use super::isort::Isort;
use super::pep621::Project;
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) struct Tools {
pub(crate) black: Option<Black>,
pub(crate) isort: Option<Isort>,
}
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) struct Pyproject {
pub(crate) tool: Option<Tools>,
pub(crate) project: Option<Project>,
}
pub(crate) fn parse<P: AsRef<Path>>(path: P) -> Result<Pyproject> {
let contents = std::fs::read_to_string(path)?;
let pyproject = toml::from_str::<Pyproject>(&contents)?;
Ok(pyproject)
}

View File

@@ -1,39 +0,0 @@
[package]
name = "red_knot"
version = "0.0.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_workspace = { workspace = true, features = ["zstd"] }
red_knot_server = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
anyhow = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["wrap_help"] }
colored = { workspace = true }
countme = { workspace = true, features = ["enable"] }
crossbeam = { workspace = true }
ctrlc = { version = "3.4.4" }
rayon = { workspace = true }
salsa = { workspace = true }
tracing = { workspace = true, features = ["release_max_level_debug"] }
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
tracing-flame = { workspace = true }
tracing-tree = { workspace = true }
[dev-dependencies]
filetime = { workspace = true }
tempfile = { workspace = true }
[lints]
workspace = true

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -1,128 +0,0 @@
# Tracing
Traces are a useful tool to narrow down the location of a bug or, at least, to understand why the compiler is doing a particular thing.
Note, tracing messages with severity `debug` or greater are user-facing. They should be phrased accordingly.
Tracing spans are only shown when using `-vvv`.
## Verbosity levels
The CLI supports different verbosity levels.
- default: Only show errors and warnings.
- `-v` activates `info!`: Show generally useful information such as paths of configuration files, detected platform, etc., but it's not a lot of messages, it's something you'll activate in CI by default. cargo build e.g. shows you which packages are fresh.
- `-vv` activates `debug!` and timestamps: This should be enough information to get to the bottom of bug reports. When you're processing many packages or files, you'll get pages and pages of output, but each line is link to a specific action or state change.
- `-vvv` activates `trace!` (only in debug builds) and shows tracing-spans: At this level, you're logging everything. Most of this is wasted, it's really slow, we dump e.g. the entire resolution graph. Only useful to developers, and you almost certainly want to use `RED_KNOT_LOG` to filter it down to the area your investigating.
## Better logging with `RED_KNOT_LOG` and `RAYON_NUM_THREADS`
By default, the CLI shows messages from the `ruff` and `red_knot` crates. Tracing messages from other crates are not shown.
The `RED_KNOT_LOG` environment variable allows you to customize which messages are shown by specifying one
or more [filter directives](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives).
The `RAYON_NUM_THREADS` environment variable, meanwhile, can be used to control the level of concurrency red-knot uses.
By default, red-knot will attempt to parallelize its work so that multiple files are checked simultaneously,
but this can result in a confused logging output where messages from different threads are intertwined.
To switch off concurrency entirely and have more readable logs, use `RAYON_NUM_THREADS=1`.
### Examples
#### Show all debug messages
Shows debug messages from all crates.
```bash
RED_KNOT_LOG=debug
```
#### Show salsa query execution messages
Show the salsa `execute: my_query` messages in addition to all red knot messages.
```bash
RED_KNOT_LOG=ruff=trace,red_knot=trace,salsa=info
```
#### Show typing traces
Only show traces for the `red_knot_python_semantic::types` module.
```bash
RED_KNOT_LOG="red_knot_python_semantic::types"
```
Note: Ensure that you use `-vvv` to see tracing spans.
#### Show messages for a single file
Shows all messages that are inside of a span for a specific file.
```bash
RED_KNOT_LOG=red_knot[{file=/home/micha/astral/test/x.py}]=trace
```
**Note**: Tracing still shows all spans because tracing can't know at the time of entering the span
whether one if its children has the file `x.py`.
**Note**: Salsa currently logs the entire memoized values. In our case, the source text and parsed AST.
This very quickly leads to extremely long outputs.
## Tracing and Salsa
Be mindful about using `tracing` in Salsa queries, especially when using `warn` or `error` because it isn't guaranteed
that the query will execute after restoring from a persistent cache. In which case the user won't see the message.
For example, don't use `tracing` to show the user a message when generating a lint violation failed
because the message would only be shown when linting the file the first time, but not on subsequent analysis
runs or when restoring from a persistent cache. This can be confusing for users because they
don't understand why a specific lint violation isn't raised. Instead, change your
query to return the failure as part of the query's result or use a Salsa accumulator.
## Tracing in tests
You can use `ruff_db::testing::setup_logging` or `ruff_db::testing::setup_logging_with_filter` to set up logging in tests.
```rust
use ruff_db::testing::setup_logging;
#[test]
fn test() {
let _logging = setup_logging();
tracing::info!("This message will be printed to stderr");
}
```
Note: Most test runners capture stderr and only show its output when a test fails.
Note also that `setup_logging` only sets up logging for the current thread because [`set_global_default`](https://docs.rs/tracing/latest/tracing/subscriber/fn.set_global_default.html) can only be
called **once**.
## Release builds
`trace!` events are removed in release builds.
## Profiling
Red Knot generates a folded stack trace to the current directory named `tracing.folded` when setting the environment variable `RED_KNOT_LOG_PROFILE` to `1` or `true`.
```bash
RED_KNOT_LOG_PROFILE=1 red_knot -- --current-directory=../test -vvv
```
You can convert the textual representation into a visual one using `inferno`.
```shell
cargo install inferno
```
```shell
# flamegraph
cat tracing.folded | inferno-flamegraph > tracing-flamegraph.svg
# flamechart
cat tracing.folded | inferno-flamegraph --flamechart > tracing-flamechart.svg
```
![Example flamegraph](./tracing-flamegraph.png)
See [`tracing-flame`](https://crates.io/crates/tracing-flame) for more details.

View File

@@ -1,254 +0,0 @@
//! Sets up logging for Red Knot
use anyhow::Context;
use colored::Colorize;
use std::fmt;
use std::fs::File;
use std::io::BufWriter;
use tracing::{Event, Subscriber};
use tracing_subscriber::filter::LevelFilter;
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields};
use tracing_subscriber::registry::LookupSpan;
use tracing_subscriber::EnvFilter;
/// Logging flags to `#[command(flatten)]` into your CLI
#[derive(clap::Args, Debug, Clone, Default)]
#[command(about = None, long_about = None)]
pub(crate) struct Verbosity {
#[arg(
long,
short = 'v',
help = "Use verbose output (or `-vv` and `-vvv` for more verbose output)",
action = clap::ArgAction::Count,
global = true,
)]
verbose: u8,
}
impl Verbosity {
/// Returns the verbosity level based on the number of `-v` flags.
///
/// Returns `None` if the user did not specify any verbosity flags.
pub(crate) fn level(&self) -> VerbosityLevel {
match self.verbose {
0 => VerbosityLevel::Default,
1 => VerbosityLevel::Verbose,
2 => VerbosityLevel::ExtraVerbose,
_ => VerbosityLevel::Trace,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub(crate) enum VerbosityLevel {
/// Default output level. Only shows Ruff and Red Knot events up to the [`WARN`](tracing::Level::WARN).
Default,
/// Enables verbose output. Emits Ruff and Red Knot events up to the [`INFO`](tracing::Level::INFO).
/// Corresponds to `-v`.
Verbose,
/// Enables a more verbose tracing format and emits Ruff and Red Knot events up to [`DEBUG`](tracing::Level::DEBUG).
/// Corresponds to `-vv`
ExtraVerbose,
/// Enables all tracing events and uses a tree-like output format. Corresponds to `-vvv`.
Trace,
}
impl VerbosityLevel {
const fn level_filter(self) -> LevelFilter {
match self {
VerbosityLevel::Default => LevelFilter::WARN,
VerbosityLevel::Verbose => LevelFilter::INFO,
VerbosityLevel::ExtraVerbose => LevelFilter::DEBUG,
VerbosityLevel::Trace => LevelFilter::TRACE,
}
}
pub(crate) const fn is_trace(self) -> bool {
matches!(self, VerbosityLevel::Trace)
}
pub(crate) const fn is_extra_verbose(self) -> bool {
matches!(self, VerbosityLevel::ExtraVerbose)
}
}
pub(crate) fn setup_tracing(level: VerbosityLevel) -> anyhow::Result<TracingGuard> {
use tracing_subscriber::prelude::*;
// The `RED_KNOT_LOG` environment variable overrides the default log level.
let filter = if let Ok(log_env_variable) = std::env::var("RED_KNOT_LOG") {
EnvFilter::builder()
.parse(log_env_variable)
.context("Failed to parse directives specified in RED_KNOT_LOG environment variable.")?
} else {
match level {
VerbosityLevel::Default => {
// Show warning traces
EnvFilter::default().add_directive(LevelFilter::WARN.into())
}
level => {
let level_filter = level.level_filter();
// Show info|debug|trace events, but allow `RED_KNOT_LOG` to override
let filter = EnvFilter::default().add_directive(
format!("red_knot={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
);
filter.add_directive(
format!("ruff={level_filter}")
.parse()
.expect("Hardcoded directive to be valid"),
)
}
}
};
let (profiling_layer, guard) = setup_profile();
let registry = tracing_subscriber::registry()
.with(filter)
.with(profiling_layer);
if level.is_trace() {
let subscriber = registry.with(
tracing_tree::HierarchicalLayer::default()
.with_indent_lines(true)
.with_indent_amount(2)
.with_bracketed_fields(true)
.with_thread_ids(true)
.with_targets(true)
.with_writer(std::io::stderr)
.with_timer(tracing_tree::time::Uptime::default()),
);
subscriber.init();
} else {
let subscriber = registry.with(
tracing_subscriber::fmt::layer()
.event_format(RedKnotFormat {
display_level: true,
display_timestamp: level.is_extra_verbose(),
show_spans: false,
})
.with_writer(std::io::stderr),
);
subscriber.init();
}
Ok(TracingGuard {
_flame_guard: guard,
})
}
#[allow(clippy::type_complexity)]
fn setup_profile<S>() -> (
Option<tracing_flame::FlameLayer<S, BufWriter<File>>>,
Option<tracing_flame::FlushGuard<BufWriter<File>>>,
)
where
S: Subscriber + for<'span> LookupSpan<'span>,
{
if let Ok("1" | "true") = std::env::var("RED_KNOT_LOG_PROFILE").as_deref() {
let (layer, guard) = tracing_flame::FlameLayer::with_file("tracing.folded")
.expect("Flame layer to be created");
(Some(layer), Some(guard))
} else {
(None, None)
}
}
pub(crate) struct TracingGuard {
_flame_guard: Option<tracing_flame::FlushGuard<BufWriter<File>>>,
}
struct RedKnotFormat {
display_timestamp: bool,
display_level: bool,
show_spans: bool,
}
/// See <https://docs.rs/tracing-subscriber/0.3.18/src/tracing_subscriber/fmt/format/mod.rs.html#1026-1156>
impl<S, N> FormatEvent<S, N> for RedKnotFormat
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
let ansi = writer.has_ansi_escapes();
if self.display_timestamp {
let timestamp = chrono::Local::now()
.format("%Y-%m-%d %H:%M:%S.%f")
.to_string();
if ansi {
write!(writer, "{} ", timestamp.dimmed())?;
} else {
write!(
writer,
"{} ",
chrono::Local::now().format("%Y-%m-%d %H:%M:%S.%f")
)?;
}
}
if self.display_level {
let level = meta.level();
// Same colors as tracing
if ansi {
let formatted_level = level.to_string();
match *level {
tracing::Level::TRACE => {
write!(writer, "{} ", formatted_level.purple().bold())?;
}
tracing::Level::DEBUG => write!(writer, "{} ", formatted_level.blue().bold())?,
tracing::Level::INFO => write!(writer, "{} ", formatted_level.green().bold())?,
tracing::Level::WARN => write!(writer, "{} ", formatted_level.yellow().bold())?,
tracing::Level::ERROR => write!(writer, "{} ", level.to_string().red().bold())?,
}
} else {
write!(writer, "{level} ")?;
}
}
if self.show_spans {
let span = event.parent();
let mut seen = false;
let span = span
.and_then(|id| ctx.span(id))
.or_else(|| ctx.lookup_current());
let scope = span.into_iter().flat_map(|span| span.scope().from_root());
for span in scope {
seen = true;
if ansi {
write!(writer, "{}:", span.metadata().name().bold())?;
} else {
write!(writer, "{}:", span.metadata().name())?;
}
}
if seen {
writer.write_char(' ')?;
}
}
ctx.field_format().format_fields(writer.by_ref(), event)?;
writeln!(writer)
}
}

View File

@@ -1,384 +0,0 @@
use std::process::{ExitCode, Termination};
use std::sync::Mutex;
use anyhow::{anyhow, Context};
use clap::Parser;
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use salsa::plumbing::ZalsaDatabase;
use red_knot_python_semantic::SitePackages;
use red_knot_server::run_server;
use red_knot_workspace::db::RootDatabase;
use red_knot_workspace::watch;
use red_knot_workspace::watch::WorkspaceWatcher;
use red_knot_workspace::workspace::settings::Configuration;
use red_knot_workspace::workspace::WorkspaceMetadata;
use ruff_db::system::{OsSystem, System, SystemPath, SystemPathBuf};
use target_version::TargetVersion;
use crate::logging::{setup_tracing, Verbosity};
mod logging;
mod target_version;
mod verbosity;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
struct Args {
#[command(subcommand)]
pub(crate) command: Option<Command>,
#[arg(
long,
help = "Changes the current working directory.",
long_help = "Changes the current working directory before any specified operations. This affects the workspace and configuration discovery.",
value_name = "PATH"
)]
current_directory: Option<SystemPathBuf>,
#[arg(
long,
help = "Path to the virtual environment the project uses",
long_help = "\
Path to the virtual environment the project uses. \
If provided, red-knot will use the `site-packages` directory of this virtual environment \
to resolve type information for the project's third-party dependencies.",
value_name = "PATH"
)]
venv_path: Option<SystemPathBuf>,
#[arg(
long,
value_name = "DIRECTORY",
help = "Custom directory to use for stdlib typeshed stubs"
)]
custom_typeshed_dir: Option<SystemPathBuf>,
#[arg(
long,
value_name = "PATH",
help = "Additional path to use as a module-resolution source (can be passed multiple times)"
)]
extra_search_path: Option<Vec<SystemPathBuf>>,
#[arg(
long,
help = "Python version to assume when resolving types",
value_name = "VERSION"
)]
target_version: Option<TargetVersion>,
#[clap(flatten)]
verbosity: Verbosity,
#[arg(
long,
help = "Run in watch mode by re-running whenever files change",
short = 'W'
)]
watch: bool,
}
impl Args {
fn to_configuration(&self, cli_cwd: &SystemPath) -> Configuration {
let mut configuration = Configuration::default();
if let Some(target_version) = self.target_version {
configuration.target_version = Some(target_version.into());
}
if let Some(venv_path) = &self.venv_path {
configuration.search_paths.site_packages = Some(SitePackages::Derived {
venv_path: SystemPath::absolute(venv_path, cli_cwd),
});
}
if let Some(custom_typeshed_dir) = &self.custom_typeshed_dir {
configuration.search_paths.custom_typeshed =
Some(SystemPath::absolute(custom_typeshed_dir, cli_cwd));
}
if let Some(extra_search_paths) = &self.extra_search_path {
configuration.search_paths.extra_paths = extra_search_paths
.iter()
.map(|path| Some(SystemPath::absolute(path, cli_cwd)))
.collect();
}
configuration
}
}
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Start the language server
Server,
}
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
pub fn main() -> ExitStatus {
run().unwrap_or_else(|error| {
use std::io::Write;
// Use `writeln` instead of `eprintln` to avoid panicking when the stderr pipe is broken.
let mut stderr = std::io::stderr().lock();
// This communicates that this isn't a linter error but Red Knot itself hard-errored for
// some reason (e.g. failed to resolve the configuration)
writeln!(stderr, "{}", "Red Knot failed".red().bold()).ok();
// Currently we generally only see one error, but e.g. with io errors when resolving
// the configuration it is help to chain errors ("resolving configuration failed" ->
// "failed to read file: subdir/pyproject.toml")
for cause in error.chain() {
writeln!(stderr, " {} {cause}", "Cause:".bold()).ok();
}
ExitStatus::Error
})
}
fn run() -> anyhow::Result<ExitStatus> {
let args = Args::parse_from(std::env::args().collect::<Vec<_>>());
if matches!(args.command, Some(Command::Server)) {
return run_server().map(|()| ExitStatus::Success);
}
let verbosity = args.verbosity.level();
countme::enable(verbosity.is_trace());
let _guard = setup_tracing(verbosity)?;
// The base path to which all CLI arguments are relative to.
let cli_base_path = {
let cwd = std::env::current_dir().context("Failed to get the current working directory")?;
SystemPathBuf::from_path_buf(cwd)
.map_err(|path| {
anyhow!(
"The current working directory `{}` contains non-Unicode characters. Red Knot only supports Unicode paths.",
path.display()
)
})?
};
let cwd = args
.current_directory
.as_ref()
.map(|cwd| {
if cwd.as_std_path().is_dir() {
Ok(SystemPath::absolute(cwd, &cli_base_path))
} else {
Err(anyhow!(
"Provided current-directory path `{cwd}` is not a directory"
))
}
})
.transpose()?
.unwrap_or_else(|| cli_base_path.clone());
let system = OsSystem::new(cwd.clone());
let cli_configuration = args.to_configuration(&cwd);
let workspace_metadata = WorkspaceMetadata::from_path(
system.current_directory(),
&system,
Some(cli_configuration.clone()),
)?;
// TODO: Use the `program_settings` to compute the key for the database's persistent
// cache and load the cache if it exists.
let mut db = RootDatabase::new(workspace_metadata, system)?;
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_configuration);
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
ctrlc::set_handler(move || {
let mut lock = main_loop_cancellation_token.lock().unwrap();
if let Some(token) = lock.take() {
token.stop();
}
})?;
let exit_status = if args.watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)
};
tracing::trace!("Counts for entire CLI run:\n{}", countme::get_all());
std::mem::forget(db);
Ok(exit_status)
}
#[derive(Copy, Clone)]
pub enum ExitStatus {
/// Checking was successful and there were no errors.
Success = 0,
/// Checking was successful but there were errors.
Failure = 1,
/// Checking failed.
Error = 2,
}
impl Termination for ExitStatus {
fn report(self) -> ExitCode {
ExitCode::from(self as u8)
}
}
struct MainLoop {
/// Sender that can be used to send messages to the main loop.
sender: crossbeam_channel::Sender<MainLoopMessage>,
/// Receiver for the messages sent **to** the main loop.
receiver: crossbeam_channel::Receiver<MainLoopMessage>,
/// The file system watcher, if running in watch mode.
watcher: Option<WorkspaceWatcher>,
cli_configuration: Configuration,
}
impl MainLoop {
fn new(cli_configuration: Configuration) -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
(
Self {
sender: sender.clone(),
receiver,
watcher: None,
cli_configuration,
},
MainLoopCancellationToken { sender },
)
}
fn watch(mut self, db: &mut RootDatabase) -> anyhow::Result<ExitStatus> {
tracing::debug!("Starting watch mode");
let sender = self.sender.clone();
let watcher = watch::directory_watcher(move |event| {
sender.send(MainLoopMessage::ApplyChanges(event)).unwrap();
})?;
self.watcher = Some(WorkspaceWatcher::new(watcher, db));
self.run(db);
Ok(ExitStatus::Success)
}
fn run(mut self, db: &mut RootDatabase) -> ExitStatus {
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
let result = self.main_loop(db);
tracing::debug!("Exiting main loop");
result
}
fn main_loop(&mut self, db: &mut RootDatabase) -> ExitStatus {
// Schedule the first check.
tracing::debug!("Starting main loop");
let mut revision = 0u64;
while let Ok(message) = self.receiver.recv() {
match message {
MainLoopMessage::CheckWorkspace => {
let db = db.snapshot();
let sender = self.sender.clone();
// Spawn a new task that checks the workspace. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
if let Ok(result) = db.check() {
// Send the result back to the main loop for printing.
sender
.send(MainLoopMessage::CheckCompleted { result, revision })
.unwrap();
}
});
}
MainLoopMessage::CheckCompleted {
result,
revision: check_revision,
} => {
let has_diagnostics = !result.is_empty();
if check_revision == revision {
for diagnostic in result {
tracing::error!("{}", diagnostic);
}
} else {
tracing::debug!(
"Discarding check result for outdated revision: current: {revision}, result revision: {check_revision}"
);
}
if self.watcher.is_none() {
return if has_diagnostics {
ExitStatus::Failure
} else {
ExitStatus::Success
};
}
tracing::trace!("Counts after last check:\n{}", countme::get_all());
}
MainLoopMessage::ApplyChanges(changes) => {
revision += 1;
// Automatically cancels any pending queries and waits for them to complete.
db.apply_changes(changes, Some(&self.cli_configuration));
if let Some(watcher) = self.watcher.as_mut() {
watcher.update(db);
}
self.sender.send(MainLoopMessage::CheckWorkspace).unwrap();
}
MainLoopMessage::Exit => {
// Cancel any pending queries and wait for them to complete.
// TODO: Don't use Salsa internal APIs
// [Zulip-Thread](https://salsa.zulipchat.com/#narrow/stream/333573-salsa-3.2E0/topic/Expose.20an.20API.20to.20cancel.20other.20queries)
let _ = db.zalsa_mut();
return ExitStatus::Success;
}
}
tracing::debug!("Waiting for next main loop message.");
}
ExitStatus::Success
}
}
#[derive(Debug)]
struct MainLoopCancellationToken {
sender: crossbeam_channel::Sender<MainLoopMessage>,
}
impl MainLoopCancellationToken {
fn stop(self) {
self.sender.send(MainLoopMessage::Exit).unwrap();
}
}
/// Message sent from the orchestrator to the main loop.
#[derive(Debug)]
enum MainLoopMessage {
CheckWorkspace,
CheckCompleted { result: Vec<String>, revision: u64 },
ApplyChanges(Vec<watch::ChangeEvent>),
Exit,
}

View File

@@ -1,48 +0,0 @@
/// Enumeration of all supported Python versions
///
/// TODO: unify with the `PythonVersion` enum in the linter/formatter crates?
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord, Default, clap::ValueEnum)]
pub enum TargetVersion {
Py37,
#[default]
Py38,
Py39,
Py310,
Py311,
Py312,
Py313,
}
impl TargetVersion {
const fn as_str(self) -> &'static str {
match self {
Self::Py37 => "py37",
Self::Py38 => "py38",
Self::Py39 => "py39",
Self::Py310 => "py310",
Self::Py311 => "py311",
Self::Py312 => "py312",
Self::Py313 => "py313",
}
}
}
impl std::fmt::Display for TargetVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
impl From<TargetVersion> for red_knot_python_semantic::PythonVersion {
fn from(value: TargetVersion) -> Self {
match value {
TargetVersion::Py37 => Self::PY37,
TargetVersion::Py38 => Self::PY38,
TargetVersion::Py39 => Self::PY39,
TargetVersion::Py310 => Self::PY310,
TargetVersion::Py311 => Self::PY311,
TargetVersion::Py312 => Self::PY312,
TargetVersion::Py313 => Self::PY313,
}
}
}

View File

@@ -1 +0,0 @@

File diff suppressed because it is too large Load Diff

View File

@@ -1,52 +0,0 @@
[package]
name = "red_knot_python_semantic"
version = "0.0.0"
publish = false
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff_db = { workspace = true }
ruff_index = { workspace = true }
ruff_python_ast = { workspace = true }
ruff_python_stdlib = { workspace = true }
ruff_source_file = { workspace = true }
ruff_text_size = { workspace = true }
ruff_python_literal = { workspace = true }
anyhow = { workspace = true }
bitflags = { workspace = true }
camino = { workspace = true }
compact_str = { workspace = true }
countme = { workspace = true }
itertools = { workspace = true}
ordermap = { workspace = true }
salsa = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
rustc-hash = { workspace = true }
hashbrown = { workspace = true }
smallvec = { workspace = true }
static_assertions = { workspace = true }
test-case = { workspace = true }
memchr = { workspace = true }
[dev-dependencies]
ruff_db = { workspace = true, features = ["os", "testing"] }
ruff_python_parser = { workspace = true }
red_knot_test = { workspace = true }
red_knot_vendored = { workspace = true }
anyhow = { workspace = true }
insta = { workspace = true }
rstest = { workspace = true }
tempfile = { workspace = true }
[lints]
workspace = true

View File

@@ -1,4 +0,0 @@
/// Rebuild the crate if a test file is added or removed from
pub fn main() {
println!("cargo:rerun-if-changed=resources/mdtest");
}

View File

@@ -1,4 +0,0 @@
Markdown files within the `mdtest/` subdirectory are tests of type inference and type checking;
executed by the `tests/mdtest.rs` integration test.
See `crates/red_knot_test/README.md` for documentation of this test format.

View File

@@ -1,25 +0,0 @@
# Assignment with annotations
## Annotation only transparent to local inference
```py
x = 1
x: int
y = x
reveal_type(y) # revealed: Literal[1]
```
## Violates own annotation
```py
x: int = 'foo' # error: [invalid-assignment] "Object of type `Literal["foo"]` is not assignable to `int`"
```
## Violates previous annotation
```py
x: int
x = 'foo' # error: [invalid-assignment] "Object of type `Literal["foo"]` is not assignable to `int`"
```

View File

@@ -1,9 +0,0 @@
# Multi-target assignment
## Basic
```py
x = y = 1
reveal_type(x) # revealed: Literal[1]
reveal_type(y) # revealed: Literal[1]
```

View File

@@ -1,24 +0,0 @@
# Unbound
## Unbound
```py
x = foo
foo = 1
reveal_type(x) # revealed: Unbound
```
## Unbound class variable
Name lookups within a class scope fall back to globals, but lookups of class attributes don't.
```py
x = 1
class C:
y = x
if flag:
x = 2
reveal_type(C.x) # revealed: Literal[2]
reveal_type(C.y) # revealed: Literal[1]
```

View File

@@ -1,17 +0,0 @@
# Walrus operator
## Basic
```py
x = (y := 1) + 1
reveal_type(x) # revealed: Literal[2]
reveal_type(y) # revealed: Literal[1]
```
## Walrus self-addition
```py
x = 0
(x := x + 1)
reveal_type(x) # revealed: Literal[1]
```

View File

@@ -1,15 +0,0 @@
# Class attributes
## Union of attributes
```py
if flag:
class C:
x = 1
else:
class C:
x = 2
y = C.x
reveal_type(y) # revealed: Literal[1, 2]
```

View File

@@ -1,36 +0,0 @@
## Binary operations on integers
## Basic Arithmetic
```py
a = 2 + 1
b = a - 4
c = a * b
d = c // 3
e = c / 3
f = 5 % 3
reveal_type(a) # revealed: Literal[3]
reveal_type(b) # revealed: Literal[-1]
reveal_type(c) # revealed: Literal[-3]
reveal_type(d) # revealed: Literal[-1]
reveal_type(e) # revealed: float
reveal_type(f) # revealed: Literal[2]
```
## Division by Zero
```py
# TODO: `a` should be `int` and `e` should be `float` once we support inference.
a = 1 / 0 # error: "Cannot divide object of type `Literal[1]` by zero"
b = 2 // 0 # error: "Cannot floor divide object of type `Literal[2]` by zero"
c = 3 % 0 # error: "Cannot reduce object of type `Literal[3]` modulo zero"
d = int() / 0 # error: "Cannot divide object of type `int` by zero"
e = 1.0 / 0 # error: "Cannot divide object of type `float` by zero"
reveal_type(a) # revealed: float
reveal_type(b) # revealed: int
reveal_type(c) # revealed: int
reveal_type(d) # revealed: @Todo
reveal_type(e) # revealed: @Todo
```

View File

@@ -1,21 +0,0 @@
# Callable instance
## Dunder call
```py
class Multiplier:
def __init__(self, factor: float):
self.factor = factor
def __call__(self, number: float) -> float:
return number * self.factor
a = Multiplier(2.0)(3.0)
class Unit: ...
b = Unit()(3.0) # error: "Object of type `Unit` is not callable"
reveal_type(a) # revealed: float
reveal_type(b) # revealed: Unknown
```

View File

@@ -1,8 +0,0 @@
# Constructor
```py
class Foo: ...
x = Foo()
reveal_type(x) # revealed: Foo
```

View File

@@ -1,51 +0,0 @@
# Call expression
## Simple
```py
def get_int() -> int:
return 42
x = get_int()
reveal_type(x) # revealed: int
```
## Async
```py
async def get_int_async() -> int:
return 42
x = get_int_async()
# TODO: we don't yet support `types.CoroutineType`, should be generic `Coroutine[Any, Any, int]`
reveal_type(x) # revealed: @Todo
```
## Decorated
```py
from typing import Callable
def foo() -> int:
return 42
def decorator(func) -> Callable[[], int]:
return foo
@decorator
def bar() -> str:
return 'bar'
x = bar()
# TODO: should reveal `int`, as the decorator replaces `bar` with `foo`
reveal_type(x) # revealed: @Todo
```
## Invalid callable
```py
nonsense = 123
x = nonsense() # error: "Object of type `Literal[123]` is not callable"
```

View File

@@ -1,74 +0,0 @@
# Unions in calls
## Union of return types
```py
if flag:
def f() -> int:
return 1
else:
def f() -> str:
return 'foo'
x = f()
reveal_type(x) # revealed: int | str
```
## Calling with an unknown union
```py
from nonexistent import f # error: [unresolved-import] "Cannot resolve import `nonexistent`"
if flag:
def f() -> int:
return 1
x = f()
reveal_type(x) # revealed: Unknown | int
```
## Non-callable elements in a union
Calling a union with a non-callable element should emit a diagnostic.
```py
if flag:
f = 1
else:
def f() -> int:
return 1
x = f() # error: "Object of type `Literal[1] | Literal[f]` is not callable (due to union element `Literal[1]`)"
reveal_type(x) # revealed: Unknown | int
```
## Multiple non-callable elements in a union
Calling a union with multiple non-callable elements should mention all of them in the diagnostic.
```py
if flag:
f = 1
elif flag2:
f = 'foo'
else:
def f() -> int:
return 1
x = f() # error: "Object of type `Literal[1] | Literal["foo"] | Literal[f]` is not callable (due to union elements Literal[1], Literal["foo"])"
reveal_type(x) # revealed: Unknown | int
```
## All non-callable union elements
Calling a union with no callable elements can emit a simpler diagnostic.
```py
if flag:
f = 1
else:
f = 'foo'
x = f() # error: "Object of type `Literal[1] | Literal["foo"]` is not callable"
reveal_type(x) # revealed: Unknown
```

View File

@@ -1,43 +0,0 @@
### Comparison: Byte literals
These tests assert that we infer precise `Literal` types for comparisons between objects
inferred as having `Literal` bytes types:
```py
reveal_type(b"abc" == b"abc") # revealed: Literal[True]
reveal_type(b"abc" == b"ab") # revealed: Literal[False]
reveal_type(b"abc" != b"abc") # revealed: Literal[False]
reveal_type(b"abc" != b"ab") # revealed: Literal[True]
reveal_type(b"abc" < b"abd") # revealed: Literal[True]
reveal_type(b"abc" < b"abb") # revealed: Literal[False]
reveal_type(b"abc" <= b"abc") # revealed: Literal[True]
reveal_type(b"abc" <= b"abb") # revealed: Literal[False]
reveal_type(b"abc" > b"abd") # revealed: Literal[False]
reveal_type(b"abc" > b"abb") # revealed: Literal[True]
reveal_type(b"abc" >= b"abc") # revealed: Literal[True]
reveal_type(b"abc" >= b"abd") # revealed: Literal[False]
reveal_type(b"" in b"") # revealed: Literal[True]
reveal_type(b"" in b"abc") # revealed: Literal[True]
reveal_type(b"abc" in b"") # revealed: Literal[False]
reveal_type(b"ab" in b"abc") # revealed: Literal[True]
reveal_type(b"abc" in b"abc") # revealed: Literal[True]
reveal_type(b"d" in b"abc") # revealed: Literal[False]
reveal_type(b"ac" in b"abc") # revealed: Literal[False]
reveal_type(b"\x81\x82" in b"\x80\x81\x82") # revealed: Literal[True]
reveal_type(b"\x82\x83" in b"\x80\x81\x82") # revealed: Literal[False]
reveal_type(b"ab" not in b"abc") # revealed: Literal[False]
reveal_type(b"ac" not in b"abc") # revealed: Literal[True]
reveal_type(b"abc" is b"abc") # revealed: bool
reveal_type(b"abc" is b"ab") # revealed: Literal[False]
reveal_type(b"abc" is not b"abc") # revealed: bool
reveal_type(b"abc" is not b"ab") # revealed: Literal[True]
```

View File

@@ -1,41 +0,0 @@
# Comparing integers
## Integer literals
```py
a = 1 == 1 == True
b = 1 == 1 == 2 == 4
c = False < True <= 2 < 3 != 6
d = 1 < 1
e = 1 > 1
f = 1 is 1
g = 1 is not 1
h = 1 is 2
i = 1 is not 7
j = 1 <= "" and 0 < 1
reveal_type(a) # revealed: Literal[True]
reveal_type(b) # revealed: Literal[False]
reveal_type(c) # revealed: Literal[True]
reveal_type(d) # revealed: Literal[False]
reveal_type(e) # revealed: Literal[False]
reveal_type(f) # revealed: bool
reveal_type(g) # revealed: bool
reveal_type(h) # revealed: Literal[False]
reveal_type(i) # revealed: Literal[True]
reveal_type(j) # revealed: @Todo | Literal[True]
```
## Integer instance
```py
# TODO: implement lookup of `__eq__` on typeshed `int` stub.
def int_instance() -> int: ...
a = 1 == int_instance()
b = 9 < int_instance()
c = int_instance() < int_instance()
reveal_type(a) # revealed: @Todo
reveal_type(b) # revealed: bool
reveal_type(c) # revealed: bool
```

View File

@@ -1,37 +0,0 @@
# Non boolean returns
Walking through examples:
- `a = A() < B() < C()`
1. `A() < B() and B() < C()` - split in N comparison
1. `A()` and `B()` - evaluate outcome types
1. `bool` and `bool` - evaluate truthiness
1. `A | B` - union of "first true" types
- `b = 0 < 1 < A() < 3`
1. `0 < 1 and 1 < A() and A() < 3` - split in N comparison
1. `True` and `bool` and `A` - evaluate outcome types
1. `True` and `bool` and `bool` - evaluate truthiness
1. `bool | A` - union of "true" types
- `c = 10 < 0 < A() < B() < C()` short-circuit to False
```py
from __future__ import annotations
class A:
def __lt__(self, other) -> A: ...
class B:
def __lt__(self, other) -> B: ...
class C:
def __lt__(self, other) -> C: ...
a = A() < B() < C()
b = 0 < 1 < A() < 3
c = 10 < 0 < A() < B() < C()
reveal_type(a) # revealed: A | B
reveal_type(b) # revealed: bool | A
reveal_type(c) # revealed: Literal[False]
```

View File

@@ -1,29 +0,0 @@
# Comparing strings
## String literals
```py
def str_instance() -> str: ...
a = "abc" == "abc"
b = "ab_cd" <= "ab_ce"
c = "abc" in "ab cd"
d = "" not in "hello"
e = "--" is "--"
f = "A" is "B"
g = "--" is not "--"
h = "A" is not "B"
i = str_instance() < "..."
# ensure we're not comparing the interned salsa symbols, which compare by order of declaration.
j = "ab" < "ab_cd"
reveal_type(a) # revealed: Literal[True]
reveal_type(b) # revealed: Literal[True]
reveal_type(c) # revealed: Literal[False]
reveal_type(d) # revealed: Literal[False]
reveal_type(e) # revealed: bool
reveal_type(f) # revealed: Literal[False]
reveal_type(g) # revealed: bool
reveal_type(h) # revealed: Literal[True]
reveal_type(i) # revealed: bool
reveal_type(j) # revealed: Literal[True]
```

View File

@@ -1,205 +0,0 @@
# Comparison - Tuples
## Heterogeneous
For tuples like `tuple[int, str, Literal[1]]`
### Value Comparisons
"Value Comparisons" refers to the operators: `==`, `!=`, `<`, `<=`, `>`, `>=`
#### Results without Ambiguity
Cases where the result can be definitively inferred as a `BooleanLiteral`.
```py
a = (1, "test", (3, 13), True)
b = (1, "test", (3, 14), False)
reveal_type(a == a) # revealed: Literal[True]
reveal_type(a != a) # revealed: Literal[False]
reveal_type(a < a) # revealed: Literal[False]
reveal_type(a <= a) # revealed: Literal[True]
reveal_type(a > a) # revealed: Literal[False]
reveal_type(a >= a) # revealed: Literal[True]
reveal_type(a == b) # revealed: Literal[False]
reveal_type(a != b) # revealed: Literal[True]
reveal_type(a < b) # revealed: Literal[True]
reveal_type(a <= b) # revealed: Literal[True]
reveal_type(a > b) # revealed: Literal[False]
reveal_type(a >= b) # revealed: Literal[False]
```
Even when tuples have different lengths, comparisons should be handled appropriately.
```py path=different_length.py
a = (1, 2, 3)
b = (1, 2, 3, 4)
reveal_type(a == b) # revealed: Literal[False]
reveal_type(a != b) # revealed: Literal[True]
reveal_type(a < b) # revealed: Literal[True]
reveal_type(a <= b) # revealed: Literal[True]
reveal_type(a > b) # revealed: Literal[False]
reveal_type(a >= b) # revealed: Literal[False]
c = ("a", "b", "c", "d")
d = ("a", "b", "c")
reveal_type(c == d) # revealed: Literal[False]
reveal_type(c != d) # revealed: Literal[True]
reveal_type(c < d) # revealed: Literal[False]
reveal_type(c <= d) # revealed: Literal[False]
reveal_type(c > d) # revealed: Literal[True]
reveal_type(c >= d) # revealed: Literal[True]
```
#### Results with Ambiguity
```py
def bool_instance() -> bool: ...
def int_instance() -> int: ...
a = (bool_instance(),)
b = (int_instance(),)
# TODO: All @Todo should be `bool`
reveal_type(a == a) # revealed: @Todo
reveal_type(a != a) # revealed: @Todo
reveal_type(a < a) # revealed: @Todo
reveal_type(a <= a) # revealed: @Todo
reveal_type(a > a) # revealed: @Todo
reveal_type(a >= a) # revealed: @Todo
reveal_type(a == b) # revealed: @Todo
reveal_type(a != b) # revealed: @Todo
reveal_type(a < b) # revealed: @Todo
reveal_type(a <= b) # revealed: @Todo
reveal_type(a > b) # revealed: @Todo
reveal_type(a >= b) # revealed: @Todo
```
#### Comparison Unsupported
If two tuples contain types that do not support comparison, the result may be `Unknown`.
However, `==` and `!=` are exceptions and can still provide definite results.
```py
a = (1, 2)
b = (1, "hello")
# TODO: should be Literal[False]
reveal_type(a == b) # revealed: @Todo
# TODO: should be Literal[True]
reveal_type(a != b) # revealed: @Todo
# TODO: should be Unknown and add more informative diagnostics
reveal_type(a < b) # revealed: @Todo
reveal_type(a <= b) # revealed: @Todo
reveal_type(a > b) # revealed: @Todo
reveal_type(a >= b) # revealed: @Todo
```
However, if the lexicographic comparison completes without reaching a point where str and int are compared,
Python will still produce a result based on the prior elements.
```py path=short_circuit.py
a = (1, 2)
b = (999999, "hello")
reveal_type(a == b) # revealed: Literal[False]
reveal_type(a != b) # revealed: Literal[True]
reveal_type(a < b) # revealed: Literal[True]
reveal_type(a <= b) # revealed: Literal[True]
reveal_type(a > b) # revealed: Literal[False]
reveal_type(a >= b) # revealed: Literal[False]
```
#### Matryoshka Tuples
```py
a = (1, True, "Hello")
b = (a, a, a)
c = (b, b, b)
reveal_type(c == c) # revealed: Literal[True]
reveal_type(c != c) # revealed: Literal[False]
reveal_type(c < c) # revealed: Literal[False]
reveal_type(c <= c) # revealed: Literal[True]
reveal_type(c > c) # revealed: Literal[False]
reveal_type(c >= c) # revealed: Literal[True]
```
#### Non Boolean Rich Comparisons
```py
class A():
def __eq__(self, o) -> str: ...
def __ne__(self, o) -> int: ...
def __lt__(self, o) -> float: ...
def __le__(self, o) -> object: ...
def __gt__(self, o) -> tuple: ...
def __ge__(self, o) -> list: ...
a = (A(), A())
# TODO: All @Todo should be bool
reveal_type(a == a) # revealed: @Todo
reveal_type(a != a) # revealed: @Todo
reveal_type(a < a) # revealed: @Todo
reveal_type(a <= a) # revealed: @Todo
reveal_type(a > a) # revealed: @Todo
reveal_type(a >= a) # revealed: @Todo
```
### Membership Test Comparisons
"Membership Test Comparisons" refers to the operators `in` and `not in`.
```py
def int_instance() -> int: ...
a = (1, 2)
b = ((3, 4), (1, 2))
c = ((1, 2, 3), (4, 5, 6))
d = ((int_instance(), int_instance()), (int_instance(), int_instance()))
reveal_type(a in b) # revealed: Literal[True]
reveal_type(a not in b) # revealed: Literal[False]
reveal_type(a in c) # revealed: Literal[False]
reveal_type(a not in c) # revealed: Literal[True]
# TODO: All @Todo should be bool
reveal_type(a in d) # revealed: @Todo
reveal_type(a not in d) # revealed: @Todo
```
### Identity Comparisons
"Identity Comparisons" refers to `is` and `is not`.
```py
a = (1, 2)
b = ("a", "b")
c = (1, 2, 3)
reveal_type(a is (1, 2)) # revealed: bool
reveal_type(a is not (1, 2)) # revealed: bool
# TODO: Update to Literal[False] once str == int comparison is implemented
reveal_type(a is b) # revealed: @Todo
# TODO: Update to Literal[True] once str == int comparison is implemented
reveal_type(a is not b) # revealed: @Todo
reveal_type(a is c) # revealed: Literal[False]
reveal_type(a is not c) # revealed: Literal[True]
```
## Homogeneous
For tuples like `tuple[int, ...]`, `tuple[Any, ...]`
// TODO

View File

@@ -1,15 +0,0 @@
# Unsupported operators
```py
a = 1 in 7 # error: "Operator `in` is not supported for types `Literal[1]` and `Literal[7]`"
b = 0 not in 10 # error: "Operator `not in` is not supported for types `Literal[0]` and `Literal[10]`"
c = object() < 5 # error: "Operator `<` is not supported for types `object` and `Literal[5]`"
# TODO should error, need to check if __lt__ signature is valid for right operand
d = 5 < object()
reveal_type(a) # revealed: bool
reveal_type(b) # revealed: bool
reveal_type(c) # revealed: Unknown
# TODO: should be `Unknown`
reveal_type(d) # revealed: bool
```

View File

@@ -1,35 +0,0 @@
# If expressions
## Simple if-expression
```py
x = 1 if flag else 2
reveal_type(x) # revealed: Literal[1, 2]
```
## If-expression with walrus operator
```py
y = 0
z = 0
x = (y := 1) if flag else (z := 2)
a = y
b = z
reveal_type(x) # revealed: Literal[1, 2]
reveal_type(a) # revealed: Literal[0, 1]
reveal_type(b) # revealed: Literal[0, 2]
```
## Nested if-expression
```py
x = 1 if flag else 2 if flag2 else 3
reveal_type(x) # revealed: Literal[1, 2, 3]
```
## None
```py
x = 1 if flag else None
reveal_type(x) # revealed: Literal[1] | None
```

View File

@@ -1,98 +0,0 @@
# If statements
## Simple if
```py
y = 1
y = 2
if flag:
y = 3
x = y
reveal_type(x) # revealed: Literal[2, 3]
```
## Simple if-elif-else
```py
y = 1
y = 2
if flag:
y = 3
elif flag2:
y = 4
else:
r = y
y = 5
s = y
x = y
reveal_type(x) # revealed: Literal[3, 4, 5]
reveal_type(r) # revealed: Unbound | Literal[2]
reveal_type(s) # revealed: Unbound | Literal[5]
```
## Single symbol across if-elif-else
```py
if flag:
y = 1
elif flag2:
y = 2
else:
y = 3
reveal_type(y) # revealed: Literal[1, 2, 3]
```
## if-elif-else without else assignment
```py
y = 0
if flag:
y = 1
elif flag2:
y = 2
else:
pass
reveal_type(y) # revealed: Literal[0, 1, 2]
```
## if-elif-else with intervening assignment
```py
y = 0
if flag:
y = 1
z = 3
elif flag2:
y = 2
else:
pass
reveal_type(y) # revealed: Literal[0, 1, 2]
```
## Nested if statement
```py
y = 0
if flag:
if flag2:
y = 1
reveal_type(y) # revealed: Literal[0, 1]
```
## if-elif without else
```py
y = 1
y = 2
if flag:
y = 3
elif flag2:
y = 4
x = y
reveal_type(x) # revealed: Literal[2, 3, 4]
```

View File

@@ -1,39 +0,0 @@
# Pattern matching
## With wildcard
```py
match 0:
case 1:
y = 2
case _:
y = 3
reveal_type(y) # revealed: Literal[2, 3]
```
## Without wildcard
```py
match 0:
case 1:
y = 2
case 2:
y = 3
reveal_type(y) # revealed: Unbound | Literal[2, 3]
```
## Basic match
```py
y = 1
y = 2
match 0:
case 1:
y = 3
case 2:
y = 4
reveal_type(y) # revealed: Literal[2, 3, 4]
```

View File

@@ -1,39 +0,0 @@
# Errors while declaring
## Violates previous assignment
```py
x = 1
x: str # error: [invalid-declaration] "Cannot declare type `str` for inferred type `Literal[1]`"
```
## Incompatible declarations
```py
if flag:
x: str
else:
x: int
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: str, int"
```
## Partial declarations
```py
if flag:
x: int
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: Unknown, int"
```
## Incompatible declarations with bad assignment
```py
if flag:
x: str
else:
x: int
# error: [conflicting-declarations]
# error: [invalid-assignment]
x = b'foo'
```

View File

@@ -1,55 +0,0 @@
# Exception Handling
## Single Exception
```py
import re
try:
x
except NameError as e:
reveal_type(e) # revealed: NameError
except re.error as f:
reveal_type(f) # revealed: error
```
## Unknown type in except handler does not cause spurious diagnostic
```py
from nonexistent_module import foo # error: [unresolved-import]
try:
x
except foo as e:
reveal_type(foo) # revealed: Unknown
reveal_type(e) # revealed: Unknown
```
## Multiple Exceptions in a Tuple
```py
EXCEPTIONS = (AttributeError, TypeError)
try:
x
except (RuntimeError, OSError) as e:
reveal_type(e) # revealed: RuntimeError | OSError
except EXCEPTIONS as f:
reveal_type(f) # revealed: AttributeError | TypeError
```
## Dynamic exception types
```py
def foo(x: type[AttributeError], y: tuple[type[OSError], type[RuntimeError]], z: tuple[type[BaseException], ...]):
try:
w
except x as e:
# TODO: should be `AttributeError`
reveal_type(e) # revealed: @Todo
except y as f:
# TODO: should be `OSError | RuntimeError`
reveal_type(f) # revealed: @Todo
except z as g:
# TODO: should be `BaseException`
reveal_type(g) # revealed: @Todo
```

View File

@@ -1,641 +0,0 @@
# Control flow for exception handlers
These tests assert that we understand the possible "definition states" (which
symbols might or might not be defined) in the various branches of a
`try`/`except`/`else`/`finally` block.
For a full writeup on the semantics of exception handlers,
see [this document][1].
The tests throughout this Markdown document use functions with names starting
with `could_raise_*` to mark definitions that might or might not succeed
(as the function could raise an exception). A type checker must assume that any
arbitrary function call could raise an exception in Python; this is just a
naming convention used in these tests for clarity, and to future-proof the
tests against possible future improvements whereby certain statements or
expressions could potentially be inferred as being incapable of causing an
exception to be raised.
## A single bare `except`
Consider the following `try`/`except` block, with a single bare `except:`.
There are different types for the variable `x` in the two branches of this
block, and we can't determine which branch might have been taken from the
perspective of code following this block. The inferred type after the block's
conclusion is therefore the union of the type at the end of the `try` suite
(`str`) and the type at the end of the `except` suite (`Literal[2]`).
*Within* the `except` suite, we must infer a union of all possible "definition
states" we could have been in at any point during the `try` suite. This is
because control flow could have jumped to the `except` suite without any of the
`try`-suite definitions successfully completing, with only *some* of the
`try`-suite definitions successfully completing, or indeed with *all* of them
successfully completing. The type of `x` at the beginning of the `except` suite
in this example is therefore `Literal[1] | str`, taking into account that we
might have jumped to the `except` suite before the
`x = could_raise_returns_str()` redefinition, but we *also* could have jumped
to the `except` suite *after* that redefinition.
```py path=union_type_inferred.py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except:
reveal_type(x) # revealed: Literal[1] | str
x = 2
reveal_type(x) # revealed: Literal[2]
reveal_type(x) # revealed: str | Literal[2]
```
If `x` has the same type at the end of both branches, however, the branches
unify and `x` is not inferred as having a union type following the
`try`/`except` block:
```py path=branches_unify_to_non_union_type.py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
x = could_raise_returns_str()
except:
x = could_raise_returns_str()
reveal_type(x) # revealed: str
```
## A non-bare `except`
For simple `try`/`except` blocks, an `except TypeError:` handler has the same
control flow semantics as an `except:` handler. An `except TypeError:` handler
will not catch *all* exceptions: if this is the only handler, it opens up the
possibility that an exception might occur that would not be handled. However,
as described in [the document on exception-handling semantics][1], that would
lead to termination of the scope. It's therefore irrelevant to consider this
possibility when it comes to control-flow analysis.
```py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = 2
reveal_type(x) # revealed: Literal[2]
reveal_type(x) # revealed: str | Literal[2]
```
## Multiple `except` branches
If the scope reaches the final `reveal_type` call in this example,
either the `try`-block suite of statements was executed in its entirety,
or exactly one `except` suite was executed in its entirety.
The inferred type of `x` at this point is the union of the types at the end of
the three suites:
- At the end of `try`, `type(x) == str`
- At the end of `except TypeError`, `x == 2`
- At the end of `except ValueError`, `x == 3`
```py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = 2
reveal_type(x) # revealed: Literal[2]
except ValueError:
reveal_type(x) # revealed: Literal[1] | str
x = 3
reveal_type(x) # revealed: Literal[3]
reveal_type(x) # revealed: str | Literal[2, 3]
```
## Exception handlers with `else` branches (but no `finally`)
If we reach the `reveal_type` call at the end of this scope,
either the `try` and `else` suites were both executed in their entireties,
or the `except` suite was executed in its entirety. The type of `x` at this
point is the union of the type at the end of the `else` suite and the type at
the end of the `except` suite:
- At the end of `else`, `x == 3`
- At the end of `except`, `x == 2`
```py path=single_except.py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = 2
reveal_type(x) # revealed: Literal[2]
else:
reveal_type(x) # revealed: str
x = 3
reveal_type(x) # revealed: Literal[3]
reveal_type(x) # revealed: Literal[2, 3]
```
For a block that has multiple `except` branches and an `else` branch, the same
principle applies. In order to reach the final `reveal_type` call,
either exactly one of the `except` suites must have been executed in its
entirety, or the `try` suite and the `else` suite must both have been executed
in their entireties:
```py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = 2
reveal_type(x) # revealed: Literal[2]
except ValueError:
reveal_type(x) # revealed: Literal[1] | str
x = 3
reveal_type(x) # revealed: Literal[3]
else:
reveal_type(x) # revealed: str
x = 4
reveal_type(x) # revealed: Literal[4]
reveal_type(x) # revealed: Literal[2, 3, 4]
```
## Exception handlers with `finally` branches (but no `except` branches)
A `finally` suite is *always* executed. As such, if we reach the `reveal_type`
call at the end of this example, we know that `x` *must* have been reassigned
to `2` during the `finally` suite. The type of `x` at the end of the example is
therefore `Literal[2]`:
```py path=redef_in_finally.py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
finally:
x = 2
reveal_type(x) # revealed: Literal[2]
reveal_type(x) # revealed: Literal[2]
```
If `x` was *not* redefined in the `finally` suite, however, things are somewhat
more complicated. If we reach the final `reveal_type` call,
unlike the state when we're visiting the `finally` suite,
we know that the `try`-block suite ran to completion.
This means that there are fewer possible states at this point than there were
when we were inside the `finally` block.
(Our current model does *not* correctly infer the types *inside* `finally`
suites, however; this is still a TODO item for us.)
```py path=no_redef_in_finally.py
def could_raise_returns_str() -> str:
return 'foo'
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
finally:
# TODO: should be Literal[1] | str
reveal_type(x) # revealed: str
reveal_type(x) # revealed: str
```
## Combining an `except` branch with a `finally` branch
As previously stated, we do not yet have accurate inference for types *inside*
`finally` suites. When we do, however, we will have to take account of the
following possibilities inside `finally` suites:
- The `try` suite could have run to completion
- Or we could have jumped from halfway through the `try` suite to an `except`
suite, and the `except` suite ran to completion
- Or we could have jumped from halfway through the `try` suite straight to the
`finally` suite due to an unhandled exception
- Or we could have jumped from halfway through the `try` suite to an
`except` suite, only for an exception raised in the `except` suite to cause
us to jump to the `finally` suite before the `except` suite ran to completion
```py path=redef_in_finally.py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_bool() -> bool:
return True
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
x = could_raise_returns_bool()
reveal_type(x) # revealed: bool
finally:
# TODO: should be `Literal[1] | str | bytes | bool`
reveal_type(x) # revealed: str | bool
x = 2
reveal_type(x) # revealed: Literal[2]
reveal_type(x) # revealed: Literal[2]
```
Now for an example without a redefinition in the `finally` suite.
As before, there *should* be fewer possibilities after completion of the
`finally` suite than there were during the `finally` suite itself.
(In some control-flow possibilities, some exceptions were merely *suspended*
during the `finally` suite; these lead to the scope's termination following the
conclusion of the `finally` suite.)
```py path=no_redef_in_finally.py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_bool() -> bool:
return True
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
x = could_raise_returns_bool()
reveal_type(x) # revealed: bool
finally:
# TODO: should be `Literal[1] | str | bytes | bool`
reveal_type(x) # revealed: str | bool
reveal_type(x) # revealed: str | bool
```
An example with multiple `except` branches and a `finally` branch:
```py path=multiple_except_branches.py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_bool() -> bool:
return True
def could_raise_returns_memoryview() -> memoryview:
return memoryview(b"")
def could_raise_returns_float() -> float:
return 3.14
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
x = could_raise_returns_bool()
reveal_type(x) # revealed: bool
except ValueError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_memoryview()
reveal_type(x) # revealed: memoryview
x = could_raise_returns_float()
reveal_type(x) # revealed: float
finally:
# TODO: should be `Literal[1] | str | bytes | bool | memoryview | float`
reveal_type(x) # revealed: str | bool | float
reveal_type(x) # revealed: str | bool | float
```
## Combining `except`, `else` and `finally` branches
If the exception handler has an `else` branch, we must also take into account
the possibility that control flow could have jumped to the `finally` suite from
partway through the `else` suite due to an exception raised *there*.
```py path=single_except_branch.py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_bool() -> bool:
return True
def could_raise_returns_memoryview() -> memoryview:
return memoryview(b"")
def could_raise_returns_float() -> float:
return 3.14
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
x = could_raise_returns_bool()
reveal_type(x) # revealed: bool
else:
reveal_type(x) # revealed: str
x = could_raise_returns_memoryview()
reveal_type(x) # revealed: memoryview
x = could_raise_returns_float()
reveal_type(x) # revealed: float
finally:
# TODO: should be `Literal[1] | str | bytes | bool | memoryview | float`
reveal_type(x) # revealed: bool | float
reveal_type(x) # revealed: bool | float
```
The same again, this time with multiple `except` branches:
```py path=multiple_except_branches.py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_bool() -> bool:
return True
def could_raise_returns_memoryview() -> memoryview:
return memoryview(b"")
def could_raise_returns_float() -> float:
return 3.14
def could_raise_returns_range() -> range:
return range(42)
def could_raise_returns_slice() -> slice:
return slice(None)
x = 1
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
x = could_raise_returns_bool()
reveal_type(x) # revealed: bool
except ValueError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_memoryview()
reveal_type(x) # revealed: memoryview
x = could_raise_returns_float()
reveal_type(x) # revealed: float
else:
reveal_type(x) # revealed: str
x = could_raise_returns_range()
reveal_type(x) # revealed: range
x = could_raise_returns_slice()
reveal_type(x) # revealed: slice
finally:
# TODO: should be `Literal[1] | str | bytes | bool | memoryview | float | range | slice`
reveal_type(x) # revealed: bool | float | slice
reveal_type(x) # revealed: bool | float | slice
```
## Nested `try`/`except` blocks
It would take advanced analysis, which we are not yet capable of, to be able
to determine that an exception handler always suppresses all exceptions. This
is partly because it is possible for statements in `except`, `else` and
`finally` suites to raise exceptions as well as statements in `try` suites.
This means that if an exception handler is nested inside the `try` statement of
an enclosing exception handler, it should (at least for now) be treated the
same as any other node: as a suite containing statements that could possibly
raise exceptions, which would lead to control flow jumping out of that suite
prior to the suite running to completion.
```py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_bool() -> bool:
return True
def could_raise_returns_memoryview() -> memoryview:
return memoryview(b"")
def could_raise_returns_float() -> float:
return 3.14
def could_raise_returns_range() -> range:
return range(42)
def could_raise_returns_slice() -> slice:
return slice(None)
def could_raise_returns_complex() -> complex:
return 3j
def could_raise_returns_bytearray() -> bytearray:
return bytearray()
class Foo: ...
class Bar: ...
def could_raise_returns_Foo() -> Foo:
return Foo()
def could_raise_returns_Bar() -> Bar:
return Bar()
x = 1
try:
try:
reveal_type(x) # revealed: Literal[1]
x = could_raise_returns_str()
reveal_type(x) # revealed: str
except TypeError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
x = could_raise_returns_bool()
reveal_type(x) # revealed: bool
except ValueError:
reveal_type(x) # revealed: Literal[1] | str
x = could_raise_returns_memoryview()
reveal_type(x) # revealed: memoryview
x = could_raise_returns_float()
reveal_type(x) # revealed: float
else:
reveal_type(x) # revealed: str
x = could_raise_returns_range()
reveal_type(x) # revealed: range
x = could_raise_returns_slice()
reveal_type(x) # revealed: slice
finally:
# TODO: should be `Literal[1] | str | bytes | bool | memoryview | float | range | slice`
reveal_type(x) # revealed: bool | float | slice
x = 2
reveal_type(x) # revealed: Literal[2]
reveal_type(x) # revealed: Literal[2]
except:
reveal_type(x) # revealed: Literal[1, 2] | str | bytes | bool | memoryview | float | range | slice
x = could_raise_returns_complex()
reveal_type(x) # revealed: complex
x = could_raise_returns_bytearray()
reveal_type(x) # revealed: bytearray
else:
reveal_type(x) # revealed: Literal[2]
x = could_raise_returns_Foo()
reveal_type(x) # revealed: Foo
x = could_raise_returns_Bar()
reveal_type(x) # revealed: Bar
finally:
# TODO: should be `Literal[1, 2] | str | bytes | bool | memoryview | float | range | slice | complex | bytearray | Foo | Bar`
reveal_type(x) # revealed: bytearray | Bar
# Either one `except` branch or the `else`
# must have been taken and completed to get here:
reveal_type(x) # revealed: bytearray | Bar
```
## Nested scopes inside `try` blocks
Shadowing a variable in an inner scope has no effect on type inference of the
variable by that name in the outer scope:
```py
def could_raise_returns_str() -> str:
return 'foo'
def could_raise_returns_bytes() -> bytes:
return b'foo'
def could_raise_returns_range() -> range:
return range(42)
def could_raise_returns_bytearray() -> bytearray:
return bytearray()
def could_raise_returns_float() -> float:
return 3.14
x = 1
try:
def foo(param=could_raise_returns_str()):
x = could_raise_returns_str()
try:
reveal_type(x) # revealed: str
x = could_raise_returns_bytes()
reveal_type(x) # revealed: bytes
except:
reveal_type(x) # revealed: str | bytes
x = could_raise_returns_bytearray()
reveal_type(x) # revealed: bytearray
x = could_raise_returns_float()
reveal_type(x) # revealed: float
finally:
# TODO: should be `str | bytes | bytearray | float`
reveal_type(x) # revealed: bytes | float
reveal_type(x) # revealed: bytes | float
x = foo
reveal_type(x) # revealed: Literal[foo]
except:
reveal_type(x) # revealed: Literal[1] | Literal[foo]
class Bar:
x = could_raise_returns_range()
reveal_type(x) # revealed: range
x = Bar
reveal_type(x) # revealed: Literal[Bar]
finally:
# TODO: should be `Literal[1] | Literal[foo] | Literal[Bar]`
reveal_type(x) # revealed: Literal[foo] | Literal[Bar]
reveal_type(x) # revealed: Literal[foo] | Literal[Bar]
```
[1]: https://astral-sh.notion.site/Exception-handler-control-flow-11348797e1ca80bb8ce1e9aedbbe439d

View File

@@ -1,30 +0,0 @@
# Except star
## Except\* with BaseException
```py
try:
x
except* BaseException as e:
reveal_type(e) # revealed: BaseExceptionGroup
```
## Except\* with specific exception
```py
try:
x
except* OSError as e:
# TODO(Alex): more precise would be `ExceptionGroup[OSError]`
reveal_type(e) # revealed: BaseExceptionGroup
```
## Except\* with multiple exceptions
```py
try:
x
except* (TypeError, AttributeError) as e:
#TODO(Alex): more precise would be `ExceptionGroup[TypeError | AttributeError]`.
reveal_type(e) # revealed: BaseExceptionGroup
```

View File

@@ -1,151 +0,0 @@
# Expressions
## OR
```py
def foo() -> str:
pass
a = True or False
b = 'x' or 'y' or 'z'
c = '' or 'y' or 'z'
d = False or 'z'
e = False or True
f = False or False
g = foo() or False
h = foo() or True
reveal_type(a) # revealed: Literal[True]
reveal_type(b) # revealed: Literal["x"]
reveal_type(c) # revealed: Literal["y"]
reveal_type(d) # revealed: Literal["z"]
reveal_type(e) # revealed: Literal[True]
reveal_type(f) # revealed: Literal[False]
reveal_type(g) # revealed: str | Literal[False]
reveal_type(h) # revealed: str | Literal[True]
```
## AND
```py
def foo() -> str:
pass
a = True and False
b = False and True
c = foo() and False
d = foo() and True
e = 'x' and 'y' and 'z'
f = 'x' and 'y' and ''
g = '' and 'y'
reveal_type(a) # revealed: Literal[False]
reveal_type(b) # revealed: Literal[False]
reveal_type(c) # revealed: str | Literal[False]
reveal_type(d) # revealed: str | Literal[True]
reveal_type(e) # revealed: Literal["z"]
reveal_type(f) # revealed: Literal[""]
reveal_type(g) # revealed: Literal[""]
```
## Simple function calls to bool
```py
def returns_bool() -> bool:
return True
if returns_bool():
x = True
else:
x = False
reveal_type(x) # revealed: bool
```
## Complex
```py
def foo() -> str:
pass
a = "x" and "y" or "z"
b = "x" or "y" and "z"
c = "" and "y" or "z"
d = "" or "y" and "z"
e = "x" and "y" or ""
f = "x" or "y" and ""
reveal_type(a) # revealed: Literal["y"]
reveal_type(b) # revealed: Literal["x"]
reveal_type(c) # revealed: Literal["z"]
reveal_type(d) # revealed: Literal["z"]
reveal_type(e) # revealed: Literal["y"]
reveal_type(f) # revealed: Literal["x"]
```
## `bool()` function
## Evaluates to builtin
```py path=a.py
redefined_builtin_bool = bool
def my_bool(x)-> bool: pass
```
```py
from a import redefined_builtin_bool, my_bool
a = redefined_builtin_bool(0)
b = my_bool(0)
reveal_type(a) # revealed: Literal[False]
reveal_type(b) # revealed: bool
```
## Truthy values
```py
a = bool(1)
b = bool((0,))
c = bool("NON EMPTY")
d = bool(True)
def foo(): pass
e = bool(foo)
reveal_type(a) # revealed: Literal[True]
reveal_type(b) # revealed: Literal[True]
reveal_type(c) # revealed: Literal[True]
reveal_type(d) # revealed: Literal[True]
reveal_type(e) # revealed: Literal[True]
```
## Falsy values
```py
a = bool(0)
b = bool(())
c = bool(None)
d = bool("")
e = bool(False)
f = bool()
reveal_type(a) # revealed: Literal[False]
reveal_type(b) # revealed: Literal[False]
reveal_type(c) # revealed: Literal[False]
reveal_type(d) # revealed: Literal[False]
reveal_type(e) # revealed: Literal[False]
reveal_type(f) # revealed: Literal[False]
```
## Ambiguous values
```py
a = bool([])
b = bool({})
c = bool(set())
reveal_type(a) # revealed: bool
reveal_type(b) # revealed: bool
reveal_type(c) # revealed: bool
```

View File

@@ -1,23 +0,0 @@
# Structures
## Class import following
```py
from b import C as D; E = D
reveal_type(E) # revealed: Literal[C]
```
```py path=b.py
class C: pass
```
## Module member resolution
```py
import b; D = b.C
reveal_type(D) # revealed: Literal[C]
```
```py path=b.py
class C: pass
```

View File

@@ -1,6 +0,0 @@
# Importing builtin module
```py
import builtins; x = builtins.copyright
reveal_type(x) # revealed: Literal[copyright]
```

View File

@@ -1,75 +0,0 @@
# Conditional imports
## Maybe unbound
```py path=maybe_unbound.py
if flag:
y = 3
x = y
reveal_type(x) # revealed: Unbound | Literal[3]
reveal_type(y) # revealed: Unbound | Literal[3]
```
```py
from maybe_unbound import x, y
reveal_type(x) # revealed: Literal[3]
reveal_type(y) # revealed: Literal[3]
```
## Maybe unbound annotated
```py path=maybe_unbound_annotated.py
if flag:
y: int = 3
x = y
reveal_type(x) # revealed: Unbound | Literal[3]
reveal_type(y) # revealed: Unbound | Literal[3]
```
Importing an annotated name prefers the declared type over the inferred type:
```py
from maybe_unbound_annotated import x, y
reveal_type(x) # revealed: Literal[3]
reveal_type(y) # revealed: int
```
## Reimport
```py path=c.py
def f(): ...
```
```py path=b.py
if flag:
from c import f
else:
def f(): ...
```
```py
from b import f
# TODO: We should disambiguate in such cases, showing `Literal[b.f, c.f]`.
reveal_type(f) # revealed: Literal[f, f]
```
## Reimport with stub declaration
When we have a declared type in one path and only an inferred-from-definition type in the other, we
should still be able to unify those:
```py path=c.pyi
x: int
```
```py path=b.py
if flag:
from c import x
else:
x = 1
```
```py
from b import x
reveal_type(x) # revealed: int
```

View File

@@ -1,52 +0,0 @@
# Unresolved Imports
## Unresolved import statement
```py
import bar # error: "Cannot resolve import `bar`"
reveal_type(bar) # revealed: Unknown
```
## Unresolved import from statement
```py
from bar import baz # error: "Cannot resolve import `bar`"
reveal_type(baz) # revealed: Unknown
```
## Unresolved import from resolved module
```py path=a.py
```
```py
from a import thing # error: "Module `a` has no member `thing`"
reveal_type(thing) # revealed: Unknown
```
## Resolved import of symbol from unresolved import
```py path=a.py
import foo as foo # error: "Cannot resolve import `foo`"
reveal_type(foo) # revealed: Unknown
```
Importing the unresolved import into a second file should not trigger an additional "unresolved
import" violation:
```py
from a import foo
reveal_type(foo) # revealed: Unknown
```
## No implicit shadowing
```py path=b.py
x: int
```
```py
from b import x
x = 'foo' # error: [invalid-assignment] "Object of type `Literal["foo"]"
```

View File

@@ -1,133 +0,0 @@
# Relative
## Non-existent
```py path=package/__init__.py
```
```py path=package/bar.py
from .foo import X # error: [unresolved-import]
reveal_type(X) # revealed: Unknown
```
## Simple
```py path=package/__init__.py
```
```py path=package/foo.py
X = 42
```
```py path=package/bar.py
from .foo import X
reveal_type(X) # revealed: Literal[42]
```
## Dotted
```py path=package/__init__.py
```
```py path=package/foo/bar/baz.py
X = 42
```
```py path=package/bar.py
from .foo.bar.baz import X
reveal_type(X) # revealed: Literal[42]
```
## Bare to package
```py path=package/__init__.py
X = 42
```
```py path=package/bar.py
from . import X
reveal_type(X) # revealed: Literal[42]
```
## Non-existent + bare to package
```py path=package/bar.py
from . import X # error: [unresolved-import]
reveal_type(X) # revealed: Unknown
```
## Dunder init
```py path=package/__init__.py
from .foo import X
reveal_type(X) # revealed: Literal[42]
```
```py path=package/foo.py
X = 42
```
## Non-existent + dunder init
```py path=package/__init__.py
from .foo import X # error: [unresolved-import]
reveal_type(X) # revealed: Unknown
```
## Long relative import
```py path=package/__init__.py
```
```py path=package/foo.py
X = 42
```
```py path=package/subpackage/subsubpackage/bar.py
from ...foo import X
reveal_type(X) # revealed: Literal[42]
```
## Unbound symbol
```py path=package/__init__.py
```
```py path=package/foo.py
x
```
```py path=package/bar.py
from .foo import x # error: [unresolved-import]
reveal_type(x) # revealed: Unknown
```
## Bare to module
```py path=package/__init__.py
```
```py path=package/foo.py
X = 42
```
```py path=package/bar.py
# TODO: support submodule imports
from . import foo # error: [unresolved-import]
y = foo.X
# TODO: should be `Literal[42]`
reveal_type(y) # revealed: Unknown
```
## Non-existent + bare to module
```py path=package/__init__.py
```
```py path=package/bar.py
# TODO: support submodule imports
from . import foo # error: [unresolved-import]
reveal_type(foo) # revealed: Unknown
```

Some files were not shown because too many files have changed in this diff Show More