Compare commits

..

50 Commits

Author SHA1 Message Date
Micha Reiser
ae39ce56c0 Bump version to 0.6.8 (#13522) 2024-09-26 14:09:03 +02:00
Micha Reiser
ff2d214e11 Don't skip over imports and other nodes containing nested statements in import collector (#13521) 2024-09-26 11:57:05 +00:00
Micha Reiser
9442cd8fae Parenthesize match..case if guards (#13513) 2024-09-26 06:44:33 +00:00
Micha Reiser
8012707348 Align formatting of patterns in match-cases with expression formatting in clause headers (#13510) 2024-09-26 08:35:22 +02:00
Charlie Marsh
d7ffe46054 Disable the typeset plugin (#13517)
## Summary

There seems to be a bad interaction between enabling anchorlinks and the
`typeset` plugin. I think the former is more important than the
latter... so disabling the latter for now.

## Test Plan

Before:

![Screenshot 2024-09-25 at 7 53
21 PM](https://github.com/user-attachments/assets/bf7c70bb-19ab-4ece-9709-4c297f8ba67b)

After:

![Screenshot 2024-09-25 at 7 53
12 PM](https://github.com/user-attachments/assets/e767a575-1664-4288-aecb-82e8b1b1a7bd)
2024-09-25 23:58:35 +00:00
haarisr
7c83af419c red-knot: Implement the not operator for all Type variants (#13432)
Signed-off-by: haaris <haarisrahman@gmail.com>
Co-authored-by: Carl Meyer <carl@oddbird.net>
2024-09-25 13:44:19 -07:00
Zanie Blue
bbb044ebda Detect tuples bound to variadic positional arguments i.e. *args (#13512)
In https://github.com/astral-sh/ruff/pull/13503, we added supported for
detecting variadic keyword arguments as dictionaries, here we use the
same strategy for detecting variadic positional arguments as tuples.
2024-09-25 10:03:25 -05:00
Zanie Blue
481065238b Avoid UP028 false negatives with non-reference shadowed bindings of loop variables (#13504)
Closes https://github.com/astral-sh/ruff/issues/13266

Avoids false negatives for shadowed bindings that aren't actually
references to the loop variable. There are some shadowed bindings we
need to support still, e.g., `del` requires the loop variable to exist.
2024-09-25 10:03:09 -05:00
Zanie Blue
11f06e0d55 Detect SIM910 when using variadic keyword arguments, i.e., **kwargs (#13503)
Closes https://github.com/astral-sh/ruff/issues/13493
2024-09-25 10:02:59 -05:00
Dylan
f27a8b8c7a [internal] ComparableExpr (f)strings and bytes made invariant under concatenation (#13301) 2024-09-25 16:58:57 +02:00
Vince van Noort
ca0ae0a484 [pylint] Implement boolean-chained-comparison (R1716) (#13435)
Co-authored-by: Micha Reiser <micha@reiser.io>
2024-09-25 09:14:12 +00:00
TomerBin
be1d5e3368 [red-knot] Add Type::bool and boolean expression inference (#13449) 2024-09-25 00:02:26 +00:00
Simon Brugman
03503f7f56 C401 message missing closing parenthesis (#13498) 2024-09-24 14:55:32 +02:00
Charlie Marsh
ff4b6d11fa Detect basic wildcard imports in ruff analyze graph (#13486)
## Summary

I guess we can just ignore the `*` entirely for now? This will add the
`__init__.py` for anything that's importing a package.
2024-09-23 18:09:00 -04:00
Charlie Marsh
96e7f3f96f Exit gracefully on broken pipe errors (#13485)
## Summary

Closes https://github.com/astral-sh/ruff/issues/13483.

Closes https://github.com/astral-sh/ruff/issues/13442.

## Test Plan

```
❯ cargo run analyze graph ../django | head -n 10
   Compiling ruff v0.6.7 (/Users/crmarsh/workspace/ruff/crates/ruff)
    Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.63s
     Running `target/debug/ruff analyze graph ../django`
warning: `ruff analyze graph` is experimental and may change without warning
{
  "/Users/crmarsh/workspace/django/django/__init__.py": [
    "/Users/crmarsh/workspace/django/django/apps/__init__.py",
    "/Users/crmarsh/workspace/django/django/conf/__init__.py",
    "/Users/crmarsh/workspace/django/django/urls/__init__.py",
    "/Users/crmarsh/workspace/django/django/utils/log.py",
    "/Users/crmarsh/workspace/django/django/utils/version.py"
  ],
  "/Users/crmarsh/workspace/django/django/__main__.py": [
    "/Users/crmarsh/workspace/django/django/core/management/__init__.py"
```
2024-09-23 13:48:43 +00:00
Charlie Marsh
90dc7438ee Avoid panic when analyze graph hits broken pipe (#13484)
## Summary

I think we should also make the change that @BurntSushi recommended in
the linked issue, but this gets rid of the panic.

See: https://github.com/astral-sh/ruff/issues/13483

See: https://github.com/astral-sh/ruff/issues/13442

## Test Plan

```
warning: `ruff analyze graph` is experimental and may change without warning
{
  "/Users/crmarsh/workspace/django/django/__init__.py": [
    "/Users/crmarsh/workspace/django/django/apps/__init__.py",
    "/Users/crmarsh/workspace/django/django/conf/__init__.py",
    "/Users/crmarsh/workspace/django/django/urls/__init__.py",
    "/Users/crmarsh/workspace/django/django/utils/log.py",
    "/Users/crmarsh/workspace/django/django/utils/version.py"
  ],
  "/Users/crmarsh/workspace/django/django/__main__.py": [
    "/Users/crmarsh/workspace/django/django/core/management/__init__.py"
ruff failed
  Cause: Broken pipe (os error 32)
```
2024-09-23 09:43:09 -04:00
Micha Reiser
3e99ab141c Update Salsa (#13480) 2024-09-23 14:04:04 +02:00
renovate[bot]
115745a8ac Update dependency monaco-editor to ^0.52.0 (#13475)
This PR contains the following updates:

| Package | Change | Age | Adoption | Passing | Confidence |
|---|---|---|---|---|---|
| [monaco-editor](https://redirect.github.com/microsoft/monaco-editor) |
[`^0.51.0` ->
`^0.52.0`](https://renovatebot.com/diffs/npm/monaco-editor/0.51.0/0.52.0)
|
[![age](https://developer.mend.io/api/mc/badges/age/npm/monaco-editor/0.52.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/monaco-editor/0.52.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/monaco-editor/0.51.0/0.52.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|
[![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/monaco-editor/0.51.0/0.52.0?slim=true)](https://docs.renovatebot.com/merge-confidence/)
|

---

### Release Notes

<details>
<summary>microsoft/monaco-editor (monaco-editor)</summary>

###
[`v0.52.0`](https://redirect.github.com/microsoft/monaco-editor/blob/HEAD/CHANGELOG.md#0520)

[Compare
Source](https://redirect.github.com/microsoft/monaco-editor/compare/v0.51.0...v0.52.0)

-   Comment added inside of `IModelContentChangedEvent`

</details>

---

### Configuration

📅 **Schedule**: Branch creation - "before 4am on Monday" (UTC),
Automerge - At any time (no schedule defined).

🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.

♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.

🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.

---

- [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check
this box

---

This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/astral-sh/ruff).

<!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzOC44MC4wIiwidXBkYXRlZEluVmVyIjoiMzguODAuMCIsInRhcmdldEJyYW5jaCI6Im1haW4iLCJsYWJlbHMiOlsiaW50ZXJuYWwiXX0=-->

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-09-23 09:55:12 +02:00
renovate[bot]
8bb59d7216 Update Rust crate unicode_names2 to v1.3.0 (#13474)
This PR contains the following updates:

| Package | Type | Update | Change |
|---|---|---|---|
| [unicode_names2](https://redirect.github.com/progval/unicode_names2) |
workspace.dependencies | minor | `1.2.2` -> `1.3.0` |

---

### Configuration

📅 **Schedule**: Branch creation - "before 4am on Monday" (UTC),
Automerge - At any time (no schedule defined).

🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.

♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.

🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.

---

- [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check
this box

---

This PR was generated by [Mend Renovate](https://mend.io/renovate/).
View the [repository job
log](https://developer.mend.io/github/astral-sh/ruff).

<!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzOC44MC4wIiwidXBkYXRlZEluVmVyIjoiMzguODAuMCIsInRhcmdldEJyYW5jaCI6Im1haW4iLCJsYWJlbHMiOlsiaW50ZXJuYWwiXX0=-->

Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-09-23 09:54:44 +02:00
renovate[bot]
47aac060de Update Rust crate insta to v1.40.0 (#13472)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2024-09-23 09:40:02 +02:00
Steve C
7c55330534 Fix formatting for analyze direction values (#13476) 2024-09-23 09:18:28 +02:00
renovate[bot]
047d77c60b Update pre-commit dependencies (#13467) 2024-09-22 22:54:34 -04:00
renovate[bot]
18fddd458a Update dependency eslint to v8.57.1 (#13465) 2024-09-22 22:54:14 -04:00
Charlie Marsh
db76000521 Use anchorlinks rather than permalinks (#13471)
## Summary

See: https://github.com/astral-sh/uv/pull/7626
2024-09-23 02:44:45 +00:00
renovate[bot]
a2ed1e1cd1 Update Rust crate thiserror to v1.0.64 (#13462) 2024-09-22 22:32:45 -04:00
renovate[bot]
7457679582 Update Rust crate dashmap to v6.1.0 (#13470) 2024-09-22 22:32:26 -04:00
renovate[bot]
1d352872ba Update Rust crate codspeed-criterion-compat to v2.7.2 (#13469) 2024-09-22 22:32:20 -04:00
renovate[bot]
c8b905bc96 Update NPM Development dependencies (#13468) 2024-09-22 22:32:11 -04:00
renovate[bot]
5b593d0397 Update dependency ruff to v0.6.7 (#13466) 2024-09-22 22:32:02 -04:00
renovate[bot]
c5c5acda23 Update Rust crate unicode-normalization to v0.1.24 (#13464) 2024-09-22 22:31:53 -04:00
renovate[bot]
26747aae75 Update Rust crate unicode-ident to v1.0.13 (#13463) 2024-09-22 22:31:47 -04:00
renovate[bot]
85b825a2a1 Update Rust crate syn to v2.0.77 (#13461) 2024-09-22 22:31:40 -04:00
renovate[bot]
9e764ef6d0 Update Rust crate serde_json to v1.0.128 (#13460) 2024-09-23 02:03:47 +00:00
renovate[bot]
0e325a53ef Update Rust crate serde to v1.0.210 (#13459) 2024-09-23 02:03:15 +00:00
renovate[bot]
2a136cfb57 Update Rust crate pretty_assertions to v1.4.1 (#13458) 2024-09-23 02:02:12 +00:00
renovate[bot]
7749164d4a Update Rust crate ordermap to v0.5.3 (#13457) 2024-09-23 02:01:44 +00:00
renovate[bot]
da50e14524 Update Rust crate lsp-server to v0.7.7 (#13456) 2024-09-23 02:00:23 +00:00
renovate[bot]
1886b731a5 Update Rust crate ignore to v0.4.23 (#13455) 2024-09-22 22:00:06 -04:00
renovate[bot]
364eddc95a Update Rust crate globset to v0.4.15 (#13454) 2024-09-22 22:00:01 -04:00
renovate[bot]
48fb340e3b Update Rust crate filetime to v0.2.25 (#13453) 2024-09-22 21:59:50 -04:00
renovate[bot]
71bb4d3bdc Update Rust crate clap to v4.5.18 (#13452) 2024-09-22 21:59:44 -04:00
renovate[bot]
5c20f570d0 Update Rust crate anyhow to v1.0.89 (#13451) 2024-09-23 01:58:14 +00:00
Charlie Marsh
7441da287f Skip traversal for non-compound statements (#13441)
## Summary

None of these can contain imports.
2024-09-21 20:47:30 +00:00
Charlie Marsh
c2a5179d75 Reuse BTreeSets in module resolver (#13440)
## Summary

For dependencies, there's no reason to re-allocate here, since we know
the paths are unique.
2024-09-21 20:14:32 +00:00
Charlie Marsh
17c4690b5e Bump version to v0.6.7 (#13439) 2024-09-21 13:16:36 -04:00
Charlie Marsh
f06d44e6e5 Use forget for module resolver database (#13438)
## Summary

A tiny bit faster and the `red-knot` CLI does the same thing.
2024-09-21 17:00:02 +00:00
Micha Reiser
653c09001a Use an empty vendored file system in Ruff (#13436)
## Summary

This PR changes removes the typeshed stubs from the vendored file system
shipped with ruff
and instead ships an empty "typeshed".

Making the typeshed files optional required extracting the typshed files
into a new `ruff_vendored` crate. I do like this even if all our builds
always include typeshed because it means `red_knot_python_semantic`
contains less code that needs compiling.

This also allows us to use deflate because the compression algorithm
doesn't matter for an archive containing a single, empty file.

## Test Plan

`cargo test`

I verified with ` cargo tree -f "{p} {f}" -p <package> ` that:

* red_knot_wasm: enables `deflate` compression
* red_knot: enables `zstd` compression
* `ruff`: uses stored


I'm not quiet sure how to build the binary that maturin builds but
comparing the release artifact size with `strip = true` shows a `1.5MB`
size reduction

---------

Co-authored-by: Charlie Marsh <charlie.r.marsh@gmail.com>
2024-09-21 16:31:42 +00:00
Micha Reiser
8921fbb54c vendored_typeshed_versions should use db.vendored (#13434) 2024-09-21 16:35:06 +02:00
Charlie Marsh
3018303c87 Avoid parsing with Salsa (#13437)
## Summary

For reasons I haven't investigated, this speeds up the resolver about 2x
(from 6.404s to 3.612s on an extremely large codebase).

## Test Plan

\cc @BurntSushi 

```
[andrew@duff rippling]$ time ruff analyze graph --preview > /dev/null

real    3.274
user    16.039
sys     7.609
maxmem  11631 MB
faults  0
[andrew@duff rippling]$ time ruff-patch analyze graph --preview > /dev/null

real    1.841
user    14.625
sys     3.639
maxmem  7173 MB
faults  0
[andrew@duff rippling]$ time ruff-patch2 analyze graph --preview > /dev/null

real    2.087
user    15.333
sys     4.869
maxmem  8642 MB
faults  0
```

Where that's `main`, then (`ruff-patch`) using the version with no
`File`, no `SemanticModel`, then (`ruff-patch2`) using `File`.
2024-09-21 13:52:16 +00:00
haarisr
6c303b2445 red-knot: Add not unary operator for boolean literals (#13422)
## Summary

Contributes to #12701

## Test Plan

Added test for boolean literals

Signed-off-by: haaris <haarisrahman@gmail.com>
2024-09-20 15:24:38 -07:00
661 changed files with 3948 additions and 700 deletions

View File

@@ -37,13 +37,13 @@ jobs:
- name: Sync typeshed
id: sync
run: |
rm -rf ruff/crates/red_knot_python_semantic/vendor/typeshed
mkdir ruff/crates/red_knot_python_semantic/vendor/typeshed
cp typeshed/README.md ruff/crates/red_knot_python_semantic/vendor/typeshed
cp typeshed/LICENSE ruff/crates/red_knot_python_semantic/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/red_knot_python_semantic/vendor/typeshed/stdlib
rm -rf ruff/crates/red_knot_python_semantic/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/red_knot_python_semantic/vendor/typeshed/source_commit.txt
rm -rf ruff/crates/ruff_vendored/vendor/typeshed
mkdir ruff/crates/ruff_vendored/vendor/typeshed
cp typeshed/README.md ruff/crates/ruff_vendored/vendor/typeshed
cp typeshed/LICENSE ruff/crates/ruff_vendored/vendor/typeshed
cp -r typeshed/stdlib ruff/crates/ruff_vendored/vendor/typeshed/stdlib
rm -rf ruff/crates/ruff_vendored/vendor/typeshed/stdlib/@tests
git -C typeshed rev-parse HEAD > ruff/crates/ruff_vendored/vendor/typeshed/source_commit.txt
- name: Commit the changes
id: commit
if: ${{ steps.sync.outcome == 'success' }}

View File

@@ -2,7 +2,7 @@ fail_fast: true
exclude: |
(?x)^(
crates/red_knot_python_semantic/vendor/.*|
crates/ruff_vendored/vendor/.*|
crates/red_knot_workspace/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
@@ -45,7 +45,7 @@ repos:
)$
- repo: https://github.com/crate-ci/typos
rev: v1.24.5
rev: v1.24.6
hooks:
- id: typos
@@ -59,7 +59,7 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.5
rev: v0.6.7
hooks:
- id: ruff-format
- id: ruff

View File

@@ -1,5 +1,51 @@
# Changelog
## 0.6.8
### Preview features
- Remove unnecessary parentheses around `match case` clauses ([#13510](https://github.com/astral-sh/ruff/pull/13510))
- Parenthesize overlong `if` guards in `match..case` clauses ([#13513](https://github.com/astral-sh/ruff/pull/13513))
- Detect basic wildcard imports in `ruff analyze graph` ([#13486](https://github.com/astral-sh/ruff/pull/13486))
- \[`pylint`\] Implement `boolean-chained-comparison` (`R1716`) ([#13435](https://github.com/astral-sh/ruff/pull/13435))
### Rule changes
- \[`lake8-simplify`\] Detect `SIM910` when using variadic keyword arguments, i.e., `**kwargs` ([#13503](https://github.com/astral-sh/ruff/pull/13503))
- \[`pyupgrade`\] Avoid false negatives with non-reference shadowed bindings of loop variables (`UP028`) ([#13504](https://github.com/astral-sh/ruff/pull/13504))
### Bug fixes
- Detect tuples bound to variadic positional arguments i.e. `*args` ([#13512](https://github.com/astral-sh/ruff/pull/13512))
- Exit gracefully on broken pipe errors ([#13485](https://github.com/astral-sh/ruff/pull/13485))
- Avoid panic when analyze graph hits broken pipe ([#13484](https://github.com/astral-sh/ruff/pull/13484))
### Performance
- Reuse `BTreeSets` in module resolver ([#13440](https://github.com/astral-sh/ruff/pull/13440))
- Skip traversal for non-compound statements ([#13441](https://github.com/astral-sh/ruff/pull/13441))
## 0.6.7
### Preview features
- Add Python version support to ruff analyze CLI ([#13426](https://github.com/astral-sh/ruff/pull/13426))
- Add `exclude` support to `ruff analyze` ([#13425](https://github.com/astral-sh/ruff/pull/13425))
- Fix parentheses around return type annotations ([#13381](https://github.com/astral-sh/ruff/pull/13381))
### Rule changes
- \[`pycodestyle`\] Fix: Don't autofix if the first line ends in a question mark? (D400) ([#13399](https://github.com/astral-sh/ruff/pull/13399))
### Bug fixes
- Respect `lint.exclude` in ruff check `--add-noqa` ([#13427](https://github.com/astral-sh/ruff/pull/13427))
### Performance
- Avoid tracking module resolver files in Salsa ([#13437](https://github.com/astral-sh/ruff/pull/13437))
- Use `forget` for module resolver database ([#13438](https://github.com/astral-sh/ruff/pull/13438))
## 0.6.6
### Preview features

140
Cargo.lock generated
View File

@@ -129,9 +129,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.86"
version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
[[package]]
name = "append-only-vec"
@@ -353,9 +353,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.16"
version = "4.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019"
checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3"
dependencies = [
"clap_builder",
"clap_derive",
@@ -363,9 +363,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.15"
version = "4.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6"
checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b"
dependencies = [
"anstream",
"anstyle",
@@ -406,9 +406,9 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.5.13"
version = "4.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0"
checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
dependencies = [
"heck",
"proc-macro2",
@@ -437,9 +437,9 @@ dependencies = [
[[package]]
name = "codspeed"
version = "2.6.0"
version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a104ac948e0188b921eb3fcbdd55dcf62e542df4c7ab7e660623f6288302089"
checksum = "450a0e9df9df1c154156f4344f99d8f6f6e69d0fc4de96ef6e2e68b2ec3bce97"
dependencies = [
"colored",
"libc",
@@ -448,9 +448,9 @@ dependencies = [
[[package]]
name = "codspeed-criterion-compat"
version = "2.6.0"
version = "2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "722c36bdc62d9436d027256ce2627af81ac7a596dfc7d13d849d0d212448d7fe"
checksum = "8eb1a6cb9c20e177fde58cdef97c1c7c9264eb1424fe45c4fccedc2fb078a569"
dependencies = [
"codspeed",
"colored",
@@ -722,9 +722,9 @@ dependencies = [
[[package]]
name = "dashmap"
version = "6.0.1"
version = "6.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28"
checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf"
dependencies = [
"cfg-if",
"crossbeam-utils",
@@ -894,9 +894,9 @@ dependencies = [
[[package]]
name = "filetime"
version = "0.2.24"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550"
checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
dependencies = [
"cfg-if",
"libc",
@@ -987,9 +987,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "globset"
version = "0.4.14"
version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1"
checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19"
dependencies = [
"aho-corasick",
"bstr",
@@ -1106,9 +1106,9 @@ dependencies = [
[[package]]
name = "ignore"
version = "0.4.22"
version = "0.4.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b46810df39e66e925525d6e38ce1e7f6e1d208f72dc39757880fcb66e2c58af1"
checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b"
dependencies = [
"crossbeam-deque",
"globset",
@@ -1142,9 +1142,9 @@ dependencies = [
[[package]]
name = "indexmap"
version = "2.4.0"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c"
checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5"
dependencies = [
"equivalent",
"hashbrown",
@@ -1193,9 +1193,9 @@ dependencies = [
[[package]]
name = "insta"
version = "1.39.0"
version = "1.40.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "810ae6042d48e2c9e9215043563a58a80b877bc863228a74cf10c49d4620a6f5"
checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60"
dependencies = [
"console",
"globset",
@@ -1427,9 +1427,9 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "lsp-server"
version = "0.7.6"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "248f65b78f6db5d8e1b1604b4098a28b43d21a8eb1deeca22b1c421b276c7095"
checksum = "550446e84739dcaf6d48a4a093973850669e13e8a34d8f8d64851041be267cd9"
dependencies = [
"crossbeam-channel",
"log",
@@ -1644,9 +1644,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
[[package]]
name = "ordermap"
version = "0.5.2"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61d7d835be600a7ac71b24e39c92fe6fad9e818b3c71bfc379e3ba65e327d77f"
checksum = "31f2bd7b03bf2c767e1bb7b91505dbe022833776e60480275e6f2fb0db0c7503"
dependencies = [
"indexmap",
]
@@ -1934,9 +1934,9 @@ dependencies = [
[[package]]
name = "pretty_assertions"
version = "1.4.0"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66"
checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d"
dependencies = [
"diff",
"yansi",
@@ -2083,9 +2083,7 @@ dependencies = [
"countme",
"hashbrown",
"insta",
"once_cell",
"ordermap",
"path-slash",
"ruff_db",
"ruff_index",
"ruff_python_ast",
@@ -2094,6 +2092,7 @@ dependencies = [
"ruff_python_stdlib",
"ruff_source_file",
"ruff_text_size",
"ruff_vendored",
"rustc-hash 2.0.0",
"salsa",
"smallvec",
@@ -2102,8 +2101,6 @@ dependencies = [
"test-case",
"thiserror",
"tracing",
"walkdir",
"zip",
]
[[package]]
@@ -2159,6 +2156,7 @@ dependencies = [
"ruff_db",
"ruff_python_ast",
"ruff_text_size",
"ruff_vendored",
"rustc-hash 2.0.0",
"salsa",
"tempfile",
@@ -2255,7 +2253,7 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.6.6"
version = "0.6.8"
dependencies = [
"anyhow",
"argfile",
@@ -2353,7 +2351,7 @@ version = "0.0.0"
dependencies = [
"camino",
"countme",
"dashmap 6.0.1",
"dashmap 6.1.0",
"filetime",
"ignore",
"insta",
@@ -2450,15 +2448,18 @@ version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"once_cell",
"red_knot_python_semantic",
"ruff_cache",
"ruff_db",
"ruff_linter",
"ruff_macros",
"ruff_python_ast",
"ruff_python_parser",
"salsa",
"schemars",
"serde",
"zip",
]
[[package]]
@@ -2471,7 +2472,7 @@ dependencies = [
[[package]]
name = "ruff_linter"
version = "0.6.6"
version = "0.6.8"
dependencies = [
"aho-corasick",
"annotate-snippets 0.9.2",
@@ -2789,9 +2790,20 @@ dependencies = [
"static_assertions",
]
[[package]]
name = "ruff_vendored"
version = "0.0.0"
dependencies = [
"once_cell",
"path-slash",
"ruff_db",
"walkdir",
"zip",
]
[[package]]
name = "ruff_wasm"
version = "0.6.6"
version = "0.6.8"
dependencies = [
"console_error_panic_hook",
"console_log",
@@ -2932,12 +2944,12 @@ checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "salsa"
version = "0.18.0"
source = "git+https://github.com/salsa-rs/salsa.git?rev=f608ff8b24f07706492027199f51132244034f29#f608ff8b24f07706492027199f51132244034f29"
source = "git+https://github.com/salsa-rs/salsa.git?rev=4a7c955255e707e64e43f3ce5eabb771ae067768#4a7c955255e707e64e43f3ce5eabb771ae067768"
dependencies = [
"append-only-vec",
"arc-swap",
"crossbeam",
"dashmap 6.0.1",
"dashmap 6.1.0",
"hashlink",
"indexmap",
"lazy_static",
@@ -2952,12 +2964,12 @@ dependencies = [
[[package]]
name = "salsa-macro-rules"
version = "0.1.0"
source = "git+https://github.com/salsa-rs/salsa.git?rev=f608ff8b24f07706492027199f51132244034f29#f608ff8b24f07706492027199f51132244034f29"
source = "git+https://github.com/salsa-rs/salsa.git?rev=4a7c955255e707e64e43f3ce5eabb771ae067768#4a7c955255e707e64e43f3ce5eabb771ae067768"
[[package]]
name = "salsa-macros"
version = "0.18.0"
source = "git+https://github.com/salsa-rs/salsa.git?rev=f608ff8b24f07706492027199f51132244034f29#f608ff8b24f07706492027199f51132244034f29"
source = "git+https://github.com/salsa-rs/salsa.git?rev=4a7c955255e707e64e43f3ce5eabb771ae067768#4a7c955255e707e64e43f3ce5eabb771ae067768"
dependencies = [
"heck",
"proc-macro2",
@@ -3019,9 +3031,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
[[package]]
name = "serde"
version = "1.0.209"
version = "1.0.210"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
dependencies = [
"serde_derive",
]
@@ -3039,9 +3051,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.209"
version = "1.0.210"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
dependencies = [
"proc-macro2",
"quote",
@@ -3061,9 +3073,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.127"
version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
dependencies = [
"itoa",
"memchr",
@@ -3233,9 +3245,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
version = "2.0.76"
version = "2.0.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525"
checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
dependencies = [
"proc-macro2",
"quote",
@@ -3330,18 +3342,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.63"
version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724"
checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.63"
version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261"
checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
dependencies = [
"proc-macro2",
"quote",
@@ -3602,15 +3614,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"
[[package]]
name = "unicode-ident"
version = "1.0.12"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
[[package]]
name = "unicode-normalization"
version = "0.1.23"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5"
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
dependencies = [
"tinyvec",
]
@@ -3623,9 +3635,9 @@ checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d"
[[package]]
name = "unicode_names2"
version = "1.2.2"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "addeebf294df7922a1164f729fb27ebbbcea99cc32b3bf08afab62757f707677"
checksum = "d1673eca9782c84de5f81b82e4109dcfb3611c8ba0d52930ec4a9478f547b2dd"
dependencies = [
"phf",
"unicode_names2_generator",
@@ -3633,9 +3645,9 @@ dependencies = [
[[package]]
name = "unicode_names2_generator"
version = "1.2.2"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f444b8bba042fe3c1251ffaca35c603f2dc2ccc08d595c65a8c4f76f3e8426c0"
checksum = "b91e5b84611016120197efd7dc93ef76774f4e084cd73c9fb3ea4a86c570c56e"
dependencies = [
"getopts",
"log",
@@ -4121,9 +4133,9 @@ checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
[[package]]
name = "yansi"
version = "0.5.1"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
[[package]]
name = "yansi-term"

View File

@@ -14,7 +14,7 @@ license = "MIT"
[workspace.dependencies]
ruff = { path = "crates/ruff" }
ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db" }
ruff_db = { path = "crates/ruff_db", default-features = false }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_graph = { path = "crates/ruff_graph" }
@@ -34,11 +34,12 @@ ruff_python_trivia = { path = "crates/ruff_python_trivia" }
ruff_server = { path = "crates/ruff_server" }
ruff_source_file = { path = "crates/ruff_source_file" }
ruff_text_size = { path = "crates/ruff_text_size" }
ruff_vendored = { path = "crates/ruff_vendored" }
ruff_workspace = { path = "crates/ruff_workspace" }
red_knot_python_semantic = { path = "crates/red_knot_python_semantic" }
red_knot_server = { path = "crates/red_knot_server" }
red_knot_workspace = { path = "crates/red_knot_workspace" }
red_knot_workspace = { path = "crates/red_knot_workspace", default-features = false }
aho-corasick = { version = "1.1.3" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
@@ -111,7 +112,7 @@ rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "f608ff8b24f07706492027199f51132244034f29" }
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "4a7c955255e707e64e43f3ce5eabb771ae067768" }
schemars = { version = "0.8.16" }
seahash = { version = "4.1.0" }
serde = { version = "1.0.197", features = ["derive"] }

View File

@@ -136,8 +136,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.6.6/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.6/install.ps1 | iex"
curl -LsSf https://astral.sh/ruff/0.6.8/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.8/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -170,7 +170,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.6
rev: v0.6.8
hooks:
# Run the linter.
- id: ruff

View File

@@ -13,9 +13,8 @@ license.workspace = true
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_workspace = { workspace = true }
red_knot_workspace = { workspace = true, features = ["zstd"] }
red_knot_server = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
anyhow = { workspace = true }

View File

@@ -24,7 +24,6 @@ bitflags = { workspace = true }
camino = { workspace = true }
compact_str = { workspace = true }
countme = { workspace = true }
once_cell = { workspace = true }
ordermap = { workspace = true }
salsa = { workspace = true }
thiserror = { workspace = true }
@@ -35,25 +34,14 @@ smallvec = { workspace = true }
static_assertions = { workspace = true }
test-case = { workspace = true }
[build-dependencies]
path-slash = { workspace = true }
walkdir = { workspace = true }
[target.'cfg(not(target_arch = "powerpc64"))'.build-dependencies]
zip = { workspace = true, features = ["deflate", "zstd"] }
[target.'cfg(target_arch = "powerpc64")'.build-dependencies]
zip = { workspace = true, features = ["deflate"] }
[dev-dependencies]
ruff_db = { workspace = true, features = ["os", "testing"] }
ruff_python_parser = { workspace = true }
ruff_vendored = { workspace = true }
anyhow = { workspace = true }
insta = { workspace = true }
tempfile = { workspace = true }
walkdir = { workspace = true }
zip = { workspace = true }
[lints]
workspace = true

View File

@@ -11,7 +11,6 @@ pub trait Db: SourceDb + Upcast<dyn SourceDb> {
pub(crate) mod tests {
use std::sync::Arc;
use crate::module_resolver::vendored_typeshed_stubs;
use ruff_db::files::{File, Files};
use ruff_db::system::{DbWithTestSystem, System, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
@@ -33,7 +32,7 @@ pub(crate) mod tests {
Self {
storage: salsa::Storage::default(),
system: TestSystem::default(),
vendored: vendored_typeshed_stubs().clone(),
vendored: ruff_vendored::file_system().clone(),
events: std::sync::Arc::default(),
files: Files::default(),
}

View File

@@ -4,9 +4,7 @@ use rustc_hash::FxHasher;
pub use db::Db;
pub use module_name::ModuleName;
pub use module_resolver::{
resolve_module, system_module_search_paths, vendored_typeshed_stubs, Module,
};
pub use module_resolver::{resolve_module, system_module_search_paths, Module};
pub use program::{Program, ProgramSettings, SearchPathSettings, SitePackages};
pub use python_version::PythonVersion;
pub use semantic_model::{HasTy, SemanticModel};

View File

@@ -4,7 +4,6 @@ pub use module::Module;
pub use resolver::resolve_module;
pub(crate) use resolver::{file_to_module, SearchPaths};
use ruff_db::system::SystemPath;
pub use typeshed::vendored_typeshed_stubs;
use crate::module_resolver::resolver::search_paths;
use crate::Db;

View File

@@ -1,20 +1,21 @@
use rustc_hash::{FxBuildHasher, FxHashSet};
use std::borrow::Cow;
use std::iter::FusedIterator;
use std::ops::Deref;
use rustc_hash::{FxBuildHasher, FxHashSet};
use ruff_db::files::{File, FilePath, FileRootKind};
use ruff_db::system::{DirectoryEntry, System, SystemPath, SystemPathBuf};
use ruff_db::vendored::{VendoredFileSystem, VendoredPath};
use super::module::{Module, ModuleKind};
use super::path::{ModulePath, SearchPath, SearchPathValidationError};
use crate::db::Db;
use crate::module_name::ModuleName;
use crate::module_resolver::typeshed::{vendored_typeshed_versions, TypeshedVersions};
use crate::site_packages::VirtualEnvironment;
use crate::{Program, PythonVersion, SearchPathSettings, SitePackages};
use super::module::{Module, ModuleKind};
use super::path::{ModulePath, SearchPath, SearchPathValidationError};
/// Resolves a module name to a module.
pub fn resolve_module(db: &dyn Db, module_name: ModuleName) -> Option<Module> {
let interned_name = ModuleNameIngredient::new(db, module_name);
@@ -136,7 +137,7 @@ pub(crate) struct SearchPaths {
/// for the first `site-packages` path
site_packages: Vec<SearchPath>,
typeshed_versions: ResolvedTypeshedVersions,
typeshed_versions: TypeshedVersions,
}
impl SearchPaths {
@@ -202,11 +203,11 @@ impl SearchPaths {
let search_path = SearchPath::custom_stdlib(db, &custom_typeshed)?;
(ResolvedTypeshedVersions::Custom(parsed), search_path)
(parsed, search_path)
} else {
tracing::debug!("Using vendored stdlib");
(
ResolvedTypeshedVersions::Vendored(vendored_typeshed_versions()),
vendored_typeshed_versions(db),
SearchPath::vendored_stdlib(),
)
};
@@ -279,23 +280,6 @@ impl SearchPaths {
}
}
#[derive(Debug, PartialEq, Eq)]
enum ResolvedTypeshedVersions {
Vendored(&'static TypeshedVersions),
Custom(TypeshedVersions),
}
impl Deref for ResolvedTypeshedVersions {
type Target = TypeshedVersions;
fn deref(&self) -> &Self::Target {
match self {
ResolvedTypeshedVersions::Vendored(versions) => versions,
ResolvedTypeshedVersions::Custom(versions) => versions,
}
}
}
/// Collect all dynamic search paths. For each `site-packages` path:
/// - Collect that `site-packages` path
/// - Collect any search paths listed in `.pth` files in that `site-packages` directory

View File

@@ -4,25 +4,19 @@ use std::num::{NonZeroU16, NonZeroUsize};
use std::ops::{RangeFrom, RangeInclusive};
use std::str::FromStr;
use once_cell::sync::Lazy;
use rustc_hash::FxHashMap;
use super::vendored::vendored_typeshed_stubs;
use crate::db::Db;
use crate::module_name::ModuleName;
use crate::{Program, PythonVersion};
static VENDORED_VERSIONS: Lazy<TypeshedVersions> = Lazy::new(|| {
pub(in crate::module_resolver) fn vendored_typeshed_versions(db: &dyn Db) -> TypeshedVersions {
TypeshedVersions::from_str(
&vendored_typeshed_stubs()
&db.vendored()
.read_to_string("stdlib/VERSIONS")
.unwrap(),
.expect("The vendored typeshed stubs should contain a VERSIONS file"),
)
.unwrap()
});
pub(crate) fn vendored_typeshed_versions() -> &'static TypeshedVersions {
&VENDORED_VERSIONS
.expect("The VERSIONS file in the vendored typeshed stubs should be well-formed")
}
pub(crate) fn typeshed_versions(db: &dyn Db) -> &TypeshedVersions {
@@ -332,6 +326,8 @@ mod tests {
use insta::assert_snapshot;
use crate::db::tests::TestDb;
use super::*;
const TYPESHED_STDLIB_DIR: &str = "stdlib";
@@ -353,12 +349,9 @@ mod tests {
#[test]
fn can_parse_vendored_versions_file() {
let versions_data = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/vendor/typeshed/stdlib/VERSIONS"
));
let db = TestDb::new();
let versions = TypeshedVersions::from_str(versions_data).unwrap();
let versions = vendored_typeshed_versions(&db);
assert!(versions.len() > 100);
assert!(versions.len() < 1000);
@@ -395,9 +388,10 @@ mod tests {
#[test]
fn typeshed_versions_consistent_with_vendored_stubs() {
const VERSIONS_DATA: &str = include_str!("../../../vendor/typeshed/stdlib/VERSIONS");
let vendored_typeshed_dir = Path::new("vendor/typeshed").canonicalize().unwrap();
let vendored_typeshed_versions = TypeshedVersions::from_str(VERSIONS_DATA).unwrap();
let db = TestDb::new();
let vendored_typeshed_versions = vendored_typeshed_versions(&db);
let vendored_typeshed_dir =
Path::new(env!("CARGO_MANIFEST_DIR")).join("../ruff_vendored/vendor/typeshed");
let mut empty_iterator = true;

View File

@@ -1,8 +0,0 @@
pub use self::vendored::vendored_typeshed_stubs;
pub(super) use self::versions::{
typeshed_versions, vendored_typeshed_versions, TypeshedVersions, TypeshedVersionsParseError,
TypeshedVersionsQueryResult,
};
mod vendored;
mod versions;

View File

@@ -521,6 +521,54 @@ impl<'db> Type<'db> {
}
}
/// Resolves the boolean value of a type.
///
/// This is used to determine the value that would be returned
/// when `bool(x)` is called on an object `x`.
fn bool(&self, db: &'db dyn Db) -> Truthiness {
match self {
Type::Any | Type::Never | Type::Unknown | Type::Unbound => Truthiness::Ambiguous,
Type::None => Truthiness::AlwaysFalse,
Type::Function(_) | Type::RevealTypeFunction(_) => Truthiness::AlwaysTrue,
Type::Module(_) => Truthiness::AlwaysTrue,
Type::Class(_) => {
// TODO: lookup `__bool__` and `__len__` methods on the class's metaclass
// More info in https://docs.python.org/3/library/stdtypes.html#truth-value-testing
Truthiness::Ambiguous
}
Type::Instance(_) => {
// TODO: lookup `__bool__` and `__len__` methods on the instance's class
// More info in https://docs.python.org/3/library/stdtypes.html#truth-value-testing
Truthiness::Ambiguous
}
Type::Union(union) => {
let union_elements = union.elements(db);
let first_element_truthiness = union_elements[0].bool(db);
if first_element_truthiness.is_ambiguous() {
return Truthiness::Ambiguous;
}
if !union_elements
.iter()
.skip(1)
.all(|element| element.bool(db) == first_element_truthiness)
{
return Truthiness::Ambiguous;
}
first_element_truthiness
}
Type::Intersection(_) => {
// TODO
Truthiness::Ambiguous
}
Type::IntLiteral(num) => Truthiness::from(*num != 0),
Type::BooleanLiteral(bool) => Truthiness::from(*bool),
Type::StringLiteral(str) => Truthiness::from(!str.value(db).is_empty()),
Type::LiteralString => Truthiness::Ambiguous,
Type::BytesLiteral(bytes) => Truthiness::from(!bytes.value(db).is_empty()),
Type::Tuple(items) => Truthiness::from(!items.elements(db).is_empty()),
}
}
/// Return the type resulting from calling an object of this type.
///
/// Returns `None` if `self` is not a callable type.
@@ -701,12 +749,12 @@ enum CallOutcome<'db> {
impl<'db> CallOutcome<'db> {
/// Create a new `CallOutcome::Callable` with given return type.
fn callable(return_ty: Type<'db>) -> CallOutcome {
fn callable(return_ty: Type<'db>) -> CallOutcome<'db> {
CallOutcome::Callable { return_ty }
}
/// Create a new `CallOutcome::NotCallable` with given not-callable type.
fn not_callable(not_callable_ty: Type<'db>) -> CallOutcome {
fn not_callable(not_callable_ty: Type<'db>) -> CallOutcome<'db> {
CallOutcome::NotCallable { not_callable_ty }
}
@@ -719,7 +767,10 @@ impl<'db> CallOutcome<'db> {
}
/// Create a new `CallOutcome::Union` with given wrapped outcomes.
fn union(called_ty: Type<'db>, outcomes: impl Into<Box<[CallOutcome<'db>]>>) -> CallOutcome {
fn union(
called_ty: Type<'db>,
outcomes: impl Into<Box<[CallOutcome<'db>]>>,
) -> CallOutcome<'db> {
CallOutcome::Union {
called_ty,
outcomes: outcomes.into(),
@@ -870,6 +921,48 @@ impl<'db> IterationOutcome<'db> {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum Truthiness {
/// For an object `x`, `bool(x)` will always return `True`
AlwaysTrue,
/// For an object `x`, `bool(x)` will always return `False`
AlwaysFalse,
/// For an object `x`, `bool(x)` could return either `True` or `False`
Ambiguous,
}
impl Truthiness {
const fn is_ambiguous(self) -> bool {
matches!(self, Truthiness::Ambiguous)
}
const fn negate(self) -> Self {
match self {
Self::AlwaysTrue => Self::AlwaysFalse,
Self::AlwaysFalse => Self::AlwaysTrue,
Self::Ambiguous => Self::Ambiguous,
}
}
fn into_type(self, db: &dyn Db) -> Type {
match self {
Self::AlwaysTrue => Type::BooleanLiteral(true),
Self::AlwaysFalse => Type::BooleanLiteral(false),
Self::Ambiguous => builtins_symbol_ty(db, "bool").to_instance(db),
}
}
}
impl From<bool> for Truthiness {
fn from(value: bool) -> Self {
if value {
Truthiness::AlwaysTrue
} else {
Truthiness::AlwaysFalse
}
}
}
#[salsa::interned]
pub struct FunctionType<'db> {
/// name of the function at definition
@@ -1072,7 +1165,10 @@ pub struct TupleType<'db> {
#[cfg(test)]
mod tests {
use super::{builtins_symbol_ty, BytesLiteralType, StringLiteralType, Type, UnionType};
use super::{
builtins_symbol_ty, BytesLiteralType, StringLiteralType, Truthiness, TupleType, Type,
UnionType,
};
use crate::db::tests::TestDb;
use crate::program::{Program, SearchPathSettings};
use crate::python_version::PythonVersion;
@@ -1113,6 +1209,7 @@ mod tests {
BytesLiteral(&'static str),
BuiltinInstance(&'static str),
Union(Vec<Ty>),
Tuple(Vec<Ty>),
}
impl Ty {
@@ -1133,6 +1230,10 @@ mod tests {
Ty::Union(tys) => {
UnionType::from_elements(db, tys.into_iter().map(|ty| ty.into_type(db)))
}
Ty::Tuple(tys) => {
let elements = tys.into_iter().map(|ty| ty.into_type(db)).collect();
Type::Tuple(TupleType::new(db, elements))
}
}
}
}
@@ -1202,4 +1303,32 @@ mod tests {
assert!(from.into_type(&db).is_equivalent_to(&db, to.into_type(&db)));
}
#[test_case(Ty::IntLiteral(1); "is_int_literal_truthy")]
#[test_case(Ty::IntLiteral(-1))]
#[test_case(Ty::StringLiteral("foo"))]
#[test_case(Ty::Tuple(vec![Ty::IntLiteral(0)]))]
#[test_case(Ty::Union(vec![Ty::IntLiteral(1), Ty::IntLiteral(2)]))]
fn is_truthy(ty: Ty) {
let db = setup_db();
assert_eq!(ty.into_type(&db).bool(&db), Truthiness::AlwaysTrue);
}
#[test_case(Ty::Tuple(vec![]))]
#[test_case(Ty::IntLiteral(0))]
#[test_case(Ty::StringLiteral(""))]
#[test_case(Ty::Union(vec![Ty::IntLiteral(0), Ty::IntLiteral(0)]))]
fn is_falsy(ty: Ty) {
let db = setup_db();
assert_eq!(ty.into_type(&db).bool(&db), Truthiness::AlwaysFalse);
}
#[test_case(Ty::BuiltinInstance("str"))]
#[test_case(Ty::Union(vec![Ty::IntLiteral(1), Ty::IntLiteral(0)]))]
#[test_case(Ty::Union(vec![Ty::BuiltinInstance("str"), Ty::IntLiteral(0)]))]
#[test_case(Ty::Union(vec![Ty::BuiltinInstance("str"), Ty::IntLiteral(1)]))]
fn boolean_value_is_unknown(ty: Ty) {
let db = setup_db();
assert_eq!(ty.into_type(&db).bool(&db), Truthiness::Ambiguous);
}
}

View File

@@ -28,14 +28,13 @@
//! definitions once the rest of the types in the scope have been inferred.
use std::num::NonZeroU32;
use rustc_hash::FxHashMap;
use salsa;
use salsa::plumbing::AsId;
use ruff_db::files::File;
use ruff_db::parsed::parsed_module;
use ruff_python_ast::{self as ast, AnyNodeRef, ExprContext, UnaryOp};
use ruff_text_size::Ranged;
use rustc_hash::FxHashMap;
use salsa;
use salsa::plumbing::AsId;
use crate::module_name::ModuleName;
use crate::module_resolver::{file_to_module, resolve_module};
@@ -52,7 +51,7 @@ use crate::types::diagnostic::{TypeCheckDiagnostic, TypeCheckDiagnostics};
use crate::types::{
bindings_ty, builtins_symbol_ty, declarations_ty, global_symbol_ty, symbol_ty,
typing_extensions_symbol_ty, BytesLiteralType, ClassType, FunctionType, StringLiteralType,
TupleType, Type, TypeArrayDisplay, UnionType,
Truthiness, TupleType, Type, TypeArrayDisplay, UnionType,
};
use crate::Db;
@@ -2211,6 +2210,7 @@ impl<'db> TypeInferenceBuilder<'db> {
match (op, self.infer_expression(operand)) {
(UnaryOp::USub, Type::IntLiteral(value)) => Type::IntLiteral(-value),
(UnaryOp::Not, ty) => ty.bool(self.db).negate().into_type(self.db),
_ => Type::Unknown, // TODO other unary op types
}
}
@@ -2317,16 +2317,35 @@ impl<'db> TypeInferenceBuilder<'db> {
fn infer_boolean_expression(&mut self, bool_op: &ast::ExprBoolOp) -> Type<'db> {
let ast::ExprBoolOp {
range: _,
op: _,
op,
values,
} = bool_op;
for value in values {
self.infer_expression(value);
}
// TODO resolve bool op
Type::Unknown
let mut done = false;
UnionType::from_elements(
self.db,
values.iter().enumerate().map(|(i, value)| {
// We need to infer the type of every expression (that's an invariant maintained by
// type inference), even if we can short-circuit boolean evaluation of some of
// those types.
let value_ty = self.infer_expression(value);
if done {
Type::Never
} else {
let is_last = i == values.len() - 1;
match (value_ty.bool(self.db), is_last, op) {
(Truthiness::Ambiguous, _, _) => value_ty,
(Truthiness::AlwaysTrue, false, ast::BoolOp::And) => Type::Never,
(Truthiness::AlwaysFalse, false, ast::BoolOp::Or) => Type::Never,
(Truthiness::AlwaysFalse, _, ast::BoolOp::And)
| (Truthiness::AlwaysTrue, _, ast::BoolOp::Or) => {
done = true;
value_ty
}
(_, true, _) => value_ty,
}
}
}),
)
}
fn infer_compare_expression(&mut self, compare: &ast::ExprCompare) -> Type<'db> {
@@ -3142,6 +3161,241 @@ mod tests {
Ok(())
}
#[test]
fn not_none_literal() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
a = not None
b = not not None
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[True]");
assert_public_ty(&db, "src/a.py", "b", "Literal[False]");
Ok(())
}
#[test]
fn not_function() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
from typing import reveal_type
def f():
return 1
a = not f
b = not reveal_type
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[False]");
// TODO Unknown should not be part of the type of typing.reveal_type
// assert_public_ty(&db, "src/a.py", "b", "Literal[False]");
Ok(())
}
#[test]
fn not_module() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_files([
(
"src/a.py",
"import b; import warnings;
x = not b;
z = not warnings",
),
("src/b.py", "y = 1"),
])?;
assert_public_ty(&db, "src/a.py", "x", "Literal[False]");
assert_public_ty(&db, "src/a.py", "z", "Literal[False]");
Ok(())
}
#[test]
fn not_union() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
if flag:
p = 1
q = 3.3
r = "hello"
s = "world"
t = 0
else:
p = "hello"
q = 4
r = ""
s = 0
t = ""
a = not p
b = not q
c = not r
d = not s
e = not t
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[False]");
assert_public_ty(&db, "src/a.py", "b", "bool");
assert_public_ty(&db, "src/a.py", "c", "bool");
assert_public_ty(&db, "src/a.py", "d", "bool");
assert_public_ty(&db, "src/a.py", "e", "Literal[True]");
Ok(())
}
#[test]
fn not_integer_literal() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
a = not 1
b = not 1234567890987654321
e = not 0
x = not -1
y = not -1234567890987654321
z = not --987
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[False]");
assert_public_ty(&db, "src/a.py", "b", "Literal[False]");
assert_public_ty(&db, "src/a.py", "e", "Literal[True]");
assert_public_ty(&db, "src/a.py", "x", "Literal[False]");
assert_public_ty(&db, "src/a.py", "y", "Literal[False]");
assert_public_ty(&db, "src/a.py", "z", "Literal[False]");
Ok(())
}
#[test]
fn not_boolean_literal() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
w = True
x = False
y = not w
z = not x
"#,
)?;
assert_public_ty(&db, "src/a.py", "w", "Literal[True]");
assert_public_ty(&db, "src/a.py", "x", "Literal[False]");
assert_public_ty(&db, "src/a.py", "y", "Literal[False]");
assert_public_ty(&db, "src/a.py", "z", "Literal[True]");
Ok(())
}
#[test]
fn not_string_literal() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
a = not "hello"
b = not ""
c = not "0"
d = not "hello" + "world"
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[False]");
assert_public_ty(&db, "src/a.py", "b", "Literal[True]");
assert_public_ty(&db, "src/a.py", "c", "Literal[False]");
assert_public_ty(&db, "src/a.py", "d", "Literal[False]");
Ok(())
}
#[test]
fn not_literal_string() -> anyhow::Result<()> {
let mut db = setup_db();
let content = format!(
r#"
v = not "{y}"
w = not 10*"{y}"
x = not "{y}"*10
z = not 0*"{y}"
u = not (-100)*"{y}"
"#,
y = "a".repeat(TypeInferenceBuilder::MAX_STRING_LITERAL_SIZE + 1),
);
db.write_dedented("src/a.py", &content)?;
assert_public_ty(&db, "src/a.py", "v", "bool");
assert_public_ty(&db, "src/a.py", "w", "bool");
assert_public_ty(&db, "src/a.py", "x", "bool");
assert_public_ty(&db, "src/a.py", "z", "Literal[True]");
assert_public_ty(&db, "src/a.py", "u", "Literal[True]");
Ok(())
}
#[test]
fn not_bytes_literal() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
a = not b"hello"
b = not b""
c = not b"0"
d = not b"hello" + b"world"
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[False]");
assert_public_ty(&db, "src/a.py", "b", "Literal[True]");
assert_public_ty(&db, "src/a.py", "c", "Literal[False]");
assert_public_ty(&db, "src/a.py", "d", "Literal[False]");
Ok(())
}
#[test]
fn not_tuple() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_file(
"src/a.py",
r#"
a = not (1,)
b = not (1, 2)
c = not (1, 2, 3)
d = not ()
e = not ("hello",)
f = not (1, "hello")
"#,
)?;
assert_public_ty(&db, "src/a.py", "a", "Literal[False]");
assert_public_ty(&db, "src/a.py", "b", "Literal[False]");
assert_public_ty(&db, "src/a.py", "c", "Literal[False]");
assert_public_ty(&db, "src/a.py", "d", "Literal[True]");
assert_public_ty(&db, "src/a.py", "e", "Literal[False]");
assert_public_ty(&db, "src/a.py", "f", "Literal[False]");
Ok(())
}
#[test]
fn string_type() -> anyhow::Result<()> {
let mut db = setup_db();
@@ -6025,4 +6279,96 @@ mod tests {
);
Ok(())
}
#[test]
fn boolean_or_expression() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_dedented(
"/src/a.py",
"
def foo() -> str:
pass
a = True or False
b = 'x' or 'y' or 'z'
c = '' or 'y' or 'z'
d = False or 'z'
e = False or True
f = False or False
g = foo() or False
h = foo() or True
",
)?;
assert_public_ty(&db, "/src/a.py", "a", "Literal[True]");
assert_public_ty(&db, "/src/a.py", "b", r#"Literal["x"]"#);
assert_public_ty(&db, "/src/a.py", "c", r#"Literal["y"]"#);
assert_public_ty(&db, "/src/a.py", "d", r#"Literal["z"]"#);
assert_public_ty(&db, "/src/a.py", "e", "Literal[True]");
assert_public_ty(&db, "/src/a.py", "f", "Literal[False]");
assert_public_ty(&db, "/src/a.py", "g", "str | Literal[False]");
assert_public_ty(&db, "/src/a.py", "h", "str | Literal[True]");
Ok(())
}
#[test]
fn boolean_and_expression() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_dedented(
"/src/a.py",
"
def foo() -> str:
pass
a = True and False
b = False and True
c = foo() and False
d = foo() and True
e = 'x' and 'y' and 'z'
f = 'x' and 'y' and ''
g = '' and 'y'
",
)?;
assert_public_ty(&db, "/src/a.py", "a", "Literal[False]");
assert_public_ty(&db, "/src/a.py", "b", "Literal[False]");
assert_public_ty(&db, "/src/a.py", "c", "str | Literal[False]");
assert_public_ty(&db, "/src/a.py", "d", "str | Literal[True]");
assert_public_ty(&db, "/src/a.py", "e", r#"Literal["z"]"#);
assert_public_ty(&db, "/src/a.py", "f", r#"Literal[""]"#);
assert_public_ty(&db, "/src/a.py", "g", r#"Literal[""]"#);
Ok(())
}
#[test]
fn boolean_complex_expression() -> anyhow::Result<()> {
let mut db = setup_db();
db.write_dedented(
"/src/a.py",
r#"
def foo() -> str:
pass
a = "x" and "y" or "z"
b = "x" or "y" and "z"
c = "" and "y" or "z"
d = "" or "y" and "z"
e = "x" and "y" or ""
f = "x" or "y" and ""
"#,
)?;
assert_public_ty(&db, "/src/a.py", "a", r#"Literal["y"]"#);
assert_public_ty(&db, "/src/a.py", "b", r#"Literal["x"]"#);
assert_public_ty(&db, "/src/a.py", "c", r#"Literal["z"]"#);
assert_public_ty(&db, "/src/a.py", "d", r#"Literal["z"]"#);
assert_public_ty(&db, "/src/a.py", "e", r#"Literal["y"]"#);
assert_public_ty(&db, "/src/a.py", "f", r#"Literal["x"]"#);
Ok(())
}
}

View File

@@ -20,9 +20,9 @@ default = ["console_error_panic_hook"]
[dependencies]
red_knot_python_semantic = { workspace = true }
red_knot_workspace = { workspace = true }
red_knot_workspace = { workspace = true, default-features = false, features = ["deflate"] }
ruff_db = { workspace = true }
ruff_db = { workspace = true, features = [] }
ruff_notebook = { workspace = true }
console_error_panic_hook = { workspace = true, optional = true }

View File

@@ -18,6 +18,7 @@ ruff_cache = { workspace = true }
ruff_db = { workspace = true, features = ["os", "cache"] }
ruff_python_ast = { workspace = true }
ruff_text_size = { workspace = true }
ruff_vendored = { workspace = true }
anyhow = { workspace = true }
crossbeam = { workspace = true }
@@ -31,5 +32,10 @@ tracing = { workspace = true }
ruff_db = { workspace = true, features = ["testing"] }
tempfile = { workspace = true }
[features]
default = ["zstd"]
zstd = ["ruff_vendored/zstd"]
deflate = ["ruff_vendored/deflate"]
[lints]
workspace = true

View File

@@ -4,7 +4,7 @@ use std::sync::Arc;
use salsa::plumbing::ZalsaDatabase;
use salsa::{Cancelled, Event};
use red_knot_python_semantic::{vendored_typeshed_stubs, Db as SemanticDb, Program};
use red_knot_python_semantic::{Db as SemanticDb, Program};
use ruff_db::files::{File, Files};
use ruff_db::system::System;
use ruff_db::vendored::VendoredFileSystem;
@@ -124,7 +124,7 @@ impl SemanticDb for RootDatabase {
#[salsa::db]
impl SourceDb for RootDatabase {
fn vendored(&self) -> &VendoredFileSystem {
vendored_typeshed_stubs()
ruff_vendored::file_system()
}
fn system(&self) -> &dyn System {
@@ -161,7 +161,7 @@ pub(crate) mod tests {
use salsa::Event;
use red_knot_python_semantic::{vendored_typeshed_stubs, Db as SemanticDb};
use red_knot_python_semantic::Db as SemanticDb;
use ruff_db::files::Files;
use ruff_db::system::{DbWithTestSystem, System, TestSystem};
use ruff_db::vendored::VendoredFileSystem;
@@ -183,7 +183,7 @@ pub(crate) mod tests {
Self {
storage: salsa::Storage::default(),
system: TestSystem::default(),
vendored: vendored_typeshed_stubs().clone(),
vendored: ruff_vendored::file_system().clone(),
files: Files::default(),
events: Arc::default(),
}

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.6.6"
version = "0.6.8"
publish = true
authors = { workspace = true }
edition = { workspace = true }
@@ -14,7 +14,7 @@ default-run = "ruff"
[dependencies]
ruff_cache = { workspace = true }
ruff_db = { workspace = true }
ruff_db = { workspace = true, default-features = false, features = ["os"] }
ruff_diagnostics = { workspace = true }
ruff_graph = { workspace = true, features = ["serde", "clap"] }
ruff_linter = { workspace = true, features = ["clap"] }

View File

@@ -10,6 +10,7 @@ use ruff_linter::{warn_user, warn_user_once};
use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{match_exclusion, python_files_in_path, ResolvedFile};
use rustc_hash::FxHashMap;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
@@ -67,111 +68,120 @@ pub(crate) fn analyze_graph(
.into(),
)?;
// Create a cache for resolved globs.
let glob_resolver = Arc::new(Mutex::new(GlobResolver::default()));
let imports = {
// Create a cache for resolved globs.
let glob_resolver = Arc::new(Mutex::new(GlobResolver::default()));
// Collect and resolve the imports for each file.
let result = Arc::new(Mutex::new(Vec::new()));
let inner_result = Arc::clone(&result);
// Collect and resolve the imports for each file.
let result = Arc::new(Mutex::new(Vec::new()));
let inner_result = Arc::clone(&result);
let db = db.snapshot();
rayon::scope(move |scope| {
for resolved_file in paths {
let Ok(resolved_file) = resolved_file else {
continue;
};
rayon::scope(move |scope| {
for resolved_file in paths {
let Ok(resolved_file) = resolved_file else {
continue;
};
let path = resolved_file.path();
let package = path
.parent()
.and_then(|parent| package_roots.get(parent))
.and_then(Clone::clone);
let path = resolved_file.path();
let package = path
.parent()
.and_then(|parent| package_roots.get(parent))
.and_then(Clone::clone);
// Resolve the per-file settings.
let settings = resolver.resolve(path);
let string_imports = settings.analyze.detect_string_imports;
let include_dependencies = settings.analyze.include_dependencies.get(path).cloned();
// Resolve the per-file settings.
let settings = resolver.resolve(path);
let string_imports = settings.analyze.detect_string_imports;
let include_dependencies = settings.analyze.include_dependencies.get(path).cloned();
// Skip excluded files.
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.analyze.exclude,
)
{
continue;
}
// Ignore non-Python files.
let source_type = match settings.analyze.extension.get(path) {
None => match SourceType::from(&path) {
SourceType::Python(source_type) => source_type,
SourceType::Toml(_) => {
debug!("Ignoring TOML file: {}", path.display());
continue;
}
},
Some(language) => PySourceType::from(language),
};
if matches!(source_type, PySourceType::Ipynb) {
debug!("Ignoring Jupyter notebook: {}", path.display());
continue;
}
// Convert to system paths.
let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else {
warn!("Failed to convert package to system path");
continue;
};
let Ok(path) = SystemPathBuf::from_path_buf(resolved_file.into_path()) else {
warn!("Failed to convert path to system path");
continue;
};
let db = db.snapshot();
let glob_resolver = glob_resolver.clone();
let root = root.clone();
let result = inner_result.clone();
scope.spawn(move |_| {
// Identify any imports via static analysis.
let mut imports =
ModuleImports::detect(&db, &path, package.as_deref(), string_imports)
.unwrap_or_else(|err| {
warn!("Failed to generate import map for {path}: {err}");
ModuleImports::default()
});
debug!("Discovered {} imports for {}", imports.len(), path);
// Append any imports that were statically defined in the configuration.
if let Some((root, globs)) = include_dependencies {
let mut glob_resolver = glob_resolver.lock().unwrap();
imports.extend(glob_resolver.resolve(root, globs));
// Skip excluded files.
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.analyze.exclude,
)
{
continue;
}
// Convert the path (and imports) to be relative to the working directory.
let path = path
.strip_prefix(&root)
.map(SystemPath::to_path_buf)
.unwrap_or(path);
let imports = imports.relative_to(&root);
// Ignore non-Python files.
let source_type = match settings.analyze.extension.get(path) {
None => match SourceType::from(&path) {
SourceType::Python(source_type) => source_type,
SourceType::Toml(_) => {
debug!("Ignoring TOML file: {}", path.display());
continue;
}
},
Some(language) => PySourceType::from(language),
};
if matches!(source_type, PySourceType::Ipynb) {
debug!("Ignoring Jupyter notebook: {}", path.display());
continue;
}
result.lock().unwrap().push((path, imports));
});
}
});
// Convert to system paths.
let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else {
warn!("Failed to convert package to system path");
continue;
};
let Ok(path) = SystemPathBuf::from_path_buf(resolved_file.into_path()) else {
warn!("Failed to convert path to system path");
continue;
};
// Collect the results.
let imports = Arc::into_inner(result).unwrap().into_inner()?;
let db = db.snapshot();
let glob_resolver = glob_resolver.clone();
let root = root.clone();
let result = inner_result.clone();
scope.spawn(move |_| {
// Identify any imports via static analysis.
let mut imports =
ModuleImports::detect(&db, &path, package.as_deref(), string_imports)
.unwrap_or_else(|err| {
warn!("Failed to generate import map for {path}: {err}");
ModuleImports::default()
});
debug!("Discovered {} imports for {}", imports.len(), path);
// Append any imports that were statically defined in the configuration.
if let Some((root, globs)) = include_dependencies {
let mut glob_resolver = glob_resolver.lock().unwrap();
imports.extend(glob_resolver.resolve(root, globs));
}
// Convert the path (and imports) to be relative to the working directory.
let path = path
.strip_prefix(&root)
.map(SystemPath::to_path_buf)
.unwrap_or(path);
let imports = imports.relative_to(&root);
result.lock().unwrap().push((path, imports));
});
}
});
// Collect the results.
Arc::into_inner(result).unwrap().into_inner()?
};
// Generate the import map.
let import_map = match args.direction {
Direction::Dependencies => ImportMap::from_iter(imports),
Direction::Dependents => ImportMap::reverse(imports),
Direction::Dependencies => ImportMap::dependencies(imports),
Direction::Dependents => ImportMap::dependents(imports),
};
// Print to JSON.
println!("{}", serde_json::to_string_pretty(&import_map)?);
writeln!(
std::io::stdout(),
"{}",
serde_json::to_string_pretty(&import_map)?
)?;
std::mem::forget(db);
Ok(ExitStatus::Success)
}

View File

@@ -3,6 +3,7 @@ use std::process::ExitCode;
use clap::{Parser, Subcommand};
use colored::Colorize;
use log::error;
use std::io::Write;
use ruff::args::{Args, Command};
use ruff::{run, ExitStatus};
@@ -86,7 +87,16 @@ pub fn main() -> ExitCode {
Ok(code) => code.into(),
Err(err) => {
{
use std::io::Write;
// Exit "gracefully" on broken pipe errors.
//
// See: https://github.com/BurntSushi/ripgrep/blob/bf63fe8f258afc09bae6caa48f0ae35eaf115005/crates/core/main.rs#L47C1-L61C14
for cause in err.chain() {
if let Some(ioerr) = cause.downcast_ref::<std::io::Error>() {
if ioerr.kind() == std::io::ErrorKind::BrokenPipe {
return ExitCode::from(0);
}
}
}
// Use `writeln` instead of `eprintln` to avoid panicking when the stderr pipe is broken.
let mut stderr = std::io::stderr().lock();

View File

@@ -305,3 +305,120 @@ fn exclude() -> Result<()> {
Ok(())
}
#[test]
fn wildcard() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
from ruff.b import *
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
from ruff import c
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"
from ruff.utils import *
"#})?;
root.child("ruff")
.child("utils")
.child("__init__.py")
.write_str("from .helpers import *")?;
root.child("ruff")
.child("utils")
.child("helpers.py")
.write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": [
"ruff/utils/__init__.py"
],
"ruff/utils/__init__.py": [
"ruff/utils/helpers.py"
],
"ruff/utils/helpers.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn nested_imports() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
match x:
case 1:
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
try:
import ruff.c
except ImportError as e:
import ruff.d
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"def c(): ..."#})?;
root.child("ruff")
.child("d.py")
.write_str(indoc::indoc! {r#"def d(): ..."#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r#"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py",
"ruff/d.py"
],
"ruff/c.py": [],
"ruff/d.py": []
}
----- stderr -----
"#);
});
Ok(())
}

View File

@@ -33,14 +33,9 @@ tracing = { workspace = true }
tracing-subscriber = { workspace = true, optional = true }
tracing-tree = { workspace = true, optional = true }
rustc-hash = { workspace = true }
zip = { workspace = true }
[target.'cfg(not(any(target_arch = "wasm32", target_arch = "powerpc64")))'.dependencies]
zip = { workspace = true, features = ["zstd"] }
[target.'cfg(any(target_arch = "wasm32", target_arch = "powerpc64"))'.dependencies]
zip = { workspace = true, features = ["deflate"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
[target.'cfg(target_arch="wasm32")'.dependencies]
web-time = { version = "1.1.0" }
[dev-dependencies]
@@ -48,6 +43,7 @@ insta = { workspace = true }
tempfile = { workspace = true }
[features]
default = ["os"]
cache = ["ruff_cache"]
os = ["ignore"]
serde = ["dep:serde", "camino/serde1"]

View File

@@ -503,7 +503,8 @@ mod tests {
use crate::files::{system_path_to_file, vendored_path_to_file, FileError};
use crate::system::DbWithTestSystem;
use crate::tests::TestDb;
use crate::vendored::tests::VendoredFileSystemBuilder;
use crate::vendored::VendoredFileSystemBuilder;
use zip::CompressionMethod;
#[test]
fn system_existing_file() -> crate::system::Result<()> {
@@ -548,7 +549,7 @@ mod tests {
fn stubbed_vendored_file() -> crate::system::Result<()> {
let mut db = TestDb::new();
let mut vendored_builder = VendoredFileSystemBuilder::new();
let mut vendored_builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
vendored_builder
.add_file("test.pyi", "def foo() -> str")
.unwrap();

View File

@@ -79,8 +79,9 @@ mod tests {
use crate::parsed::parsed_module;
use crate::system::{DbWithTestSystem, SystemPath, SystemVirtualPath};
use crate::tests::TestDb;
use crate::vendored::{tests::VendoredFileSystemBuilder, VendoredPath};
use crate::vendored::{VendoredFileSystemBuilder, VendoredPath};
use crate::Db;
use zip::CompressionMethod;
#[test]
fn python_file() -> crate::system::Result<()> {
@@ -150,7 +151,7 @@ mod tests {
fn vendored_file() {
let mut db = TestDb::new();
let mut vendored_builder = VendoredFileSystemBuilder::new();
let mut vendored_builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
vendored_builder
.add_file(
"path.pyi",

View File

@@ -1,12 +1,13 @@
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::fmt::{self, Debug};
use std::io::{self, Read};
use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex, MutexGuard};
use zip::{read::ZipFile, ZipArchive, ZipWriter};
use crate::file_revision::FileRevision;
use zip::result::ZipResult;
use zip::write::FileOptions;
use zip::{read::ZipFile, CompressionMethod, ZipArchive, ZipWriter};
pub use self::path::{VendoredPath, VendoredPathBuf};
@@ -177,7 +178,6 @@ struct ZipFileDebugInfo {
crc32_hash: u32,
compressed_size: u64,
uncompressed_size: u64,
compression_method: zip::CompressionMethod,
kind: FileType,
}
@@ -187,7 +187,6 @@ impl<'a> From<ZipFile<'a>> for ZipFileDebugInfo {
crc32_hash: value.crc32(),
compressed_size: value.compressed_size(),
uncompressed_size: value.size(),
compression_method: value.compression(),
kind: if value.is_dir() {
FileType::Directory
} else {
@@ -341,69 +340,61 @@ impl<'a> From<&'a VendoredPath> for NormalizedVendoredPath<'a> {
}
}
pub struct VendoredFileSystemBuilder {
writer: ZipWriter<io::Cursor<Vec<u8>>>,
compression_method: CompressionMethod,
}
impl VendoredFileSystemBuilder {
pub fn new(compression_method: CompressionMethod) -> Self {
let buffer = io::Cursor::new(Vec::new());
Self {
writer: ZipWriter::new(buffer),
compression_method,
}
}
pub fn add_file(
&mut self,
path: impl AsRef<VendoredPath>,
content: &str,
) -> std::io::Result<()> {
self.writer
.start_file(path.as_ref().as_str(), self.options())?;
self.writer.write_all(content.as_bytes())
}
pub fn add_directory(&mut self, path: impl AsRef<VendoredPath>) -> ZipResult<()> {
self.writer
.add_directory(path.as_ref().as_str(), self.options())
}
pub fn finish(mut self) -> Result<VendoredFileSystem> {
let buffer = self.writer.finish()?;
VendoredFileSystem::new(buffer.into_inner())
}
fn options(&self) -> FileOptions {
FileOptions::default()
.compression_method(self.compression_method)
.unix_permissions(0o644)
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::io::Write;
use insta::assert_snapshot;
use zip::result::ZipResult;
use zip::write::FileOptions;
use zip::{CompressionMethod, ZipWriter};
use super::*;
const FUNCTOOLS_CONTENTS: &str = "def update_wrapper(): ...";
const ASYNCIO_TASKS_CONTENTS: &str = "class Task: ...";
pub struct VendoredFileSystemBuilder {
writer: ZipWriter<io::Cursor<Vec<u8>>>,
}
impl Default for VendoredFileSystemBuilder {
fn default() -> Self {
Self::new()
}
}
impl VendoredFileSystemBuilder {
pub fn new() -> Self {
let buffer = io::Cursor::new(Vec::new());
Self {
writer: ZipWriter::new(buffer),
}
}
pub fn add_file(
&mut self,
path: impl AsRef<VendoredPath>,
content: &str,
) -> std::io::Result<()> {
self.writer
.start_file(path.as_ref().as_str(), Self::options())?;
self.writer.write_all(content.as_bytes())
}
pub fn add_directory(&mut self, path: impl AsRef<VendoredPath>) -> ZipResult<()> {
self.writer
.add_directory(path.as_ref().as_str(), Self::options())
}
pub fn finish(mut self) -> Result<VendoredFileSystem> {
let buffer = self.writer.finish()?;
VendoredFileSystem::new(buffer.into_inner())
}
fn options() -> FileOptions {
FileOptions::default()
.compression_method(CompressionMethod::Zstd)
.unix_permissions(0o644)
}
}
fn mock_typeshed() -> VendoredFileSystem {
let mut builder = VendoredFileSystemBuilder::new();
let mut builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
builder.add_directory("stdlib/").unwrap();
builder
@@ -441,28 +432,24 @@ pub(crate) mod tests {
crc32_hash: 0,
compressed_size: 0,
uncompressed_size: 0,
compression_method: Stored,
kind: Directory,
},
"stdlib/asyncio/": ZipFileDebugInfo {
crc32_hash: 0,
compressed_size: 0,
uncompressed_size: 0,
compression_method: Stored,
kind: Directory,
},
"stdlib/asyncio/tasks.pyi": ZipFileDebugInfo {
crc32_hash: 2826547428,
compressed_size: 24,
compressed_size: 15,
uncompressed_size: 15,
compression_method: Zstd,
kind: File,
},
"stdlib/functools.pyi": ZipFileDebugInfo {
crc32_hash: 1099005079,
compressed_size: 34,
compressed_size: 25,
uncompressed_size: 25,
compression_method: Zstd,
kind: File,
},
},

View File

@@ -16,12 +16,15 @@ ruff_db = { workspace = true, features = ["os", "serde"] }
ruff_linter = { workspace = true }
ruff_macros = { workspace = true }
ruff_python_ast = { workspace = true }
ruff_python_parser = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true, optional = true }
once_cell = { workspace = true }
salsa = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
zip = { workspace = true, features = [] }
[lints]
workspace = true

View File

@@ -1,6 +1,8 @@
use red_knot_python_semantic::ModuleName;
use ruff_python_ast::visitor::source_order::{walk_body, walk_expr, walk_stmt, SourceOrderVisitor};
use ruff_python_ast::{self as ast, Expr, ModModule, Stmt};
use ruff_python_ast::visitor::source_order::{
walk_expr, walk_module, walk_stmt, SourceOrderVisitor,
};
use ruff_python_ast::{self as ast, Expr, Mod, Stmt};
/// Collect all imports for a given Python file.
#[derive(Default, Debug)]
@@ -23,8 +25,8 @@ impl<'a> Collector<'a> {
}
#[must_use]
pub(crate) fn collect(mut self, module: &ModModule) -> Vec<CollectedImport> {
walk_body(&mut self, &module.body);
pub(crate) fn collect(mut self, module: &Mod) -> Vec<CollectedImport> {
walk_module(&mut self, module);
self.imports
}
}
@@ -66,8 +68,10 @@ impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
components.extend(module.split('.'));
}
// Add the alias name.
components.push(alias.name.as_str());
// Add the alias name, unless it's a wildcard import.
if alias.name.as_str() != "*" {
components.push(alias.name.as_str());
}
if let Some(module_name) = ModuleName::from_components(components) {
self.imports.push(CollectedImport::ImportFrom(module_name));
@@ -81,9 +85,38 @@ impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
}
}
}
_ => {
Stmt::FunctionDef(_)
| Stmt::ClassDef(_)
| Stmt::While(_)
| Stmt::If(_)
| Stmt::With(_)
| Stmt::Match(_)
| Stmt::Try(_)
| Stmt::For(_) => {
// Always traverse into compound statements.
walk_stmt(self, stmt);
}
Stmt::Return(_)
| Stmt::Delete(_)
| Stmt::Assign(_)
| Stmt::AugAssign(_)
| Stmt::AnnAssign(_)
| Stmt::TypeAlias(_)
| Stmt::Raise(_)
| Stmt::Assert(_)
| Stmt::Global(_)
| Stmt::Nonlocal(_)
| Stmt::Expr(_)
| Stmt::Pass(_)
| Stmt::Break(_)
| Stmt::Continue(_)
| Stmt::IpyEscapeCommand(_) => {
// Only traverse simple statements when string imports is enabled.
if self.string_imports {
walk_stmt(self, stmt);
}
}
}
}

View File

@@ -1,17 +1,25 @@
use anyhow::Result;
use zip::CompressionMethod;
use red_knot_python_semantic::{Db, Program, ProgramSettings, PythonVersion, SearchPathSettings};
use ruff_db::files::{File, Files};
use ruff_db::system::{OsSystem, System, SystemPathBuf};
use ruff_db::vendored::VendoredFileSystem;
use ruff_db::vendored::{VendoredFileSystem, VendoredFileSystemBuilder};
use ruff_db::{Db as SourceDb, Upcast};
static EMPTY_VENDORED: once_cell::sync::Lazy<VendoredFileSystem> =
once_cell::sync::Lazy::new(|| {
let mut builder = VendoredFileSystemBuilder::new(CompressionMethod::Stored);
builder.add_file("stdlib/VERSIONS", "\n").unwrap();
builder.finish().unwrap()
});
#[salsa::db]
#[derive(Default)]
pub struct ModuleDb {
storage: salsa::Storage<Self>,
files: Files,
system: OsSystem,
vendored: VendoredFileSystem,
}
impl ModuleDb {
@@ -26,12 +34,10 @@ impl ModuleDb {
.next()
.ok_or_else(|| anyhow::anyhow!("No source roots provided"))?;
let mut search_paths = SearchPathSettings::new(src_root.to_path_buf());
let mut search_paths = SearchPathSettings::new(src_root);
// Add the remaining source roots as extra paths.
for src_root in src_roots {
search_paths.extra_paths.push(src_root.to_path_buf());
}
search_paths.extra_paths.extend(src_roots);
search_paths
};
@@ -54,7 +60,6 @@ impl ModuleDb {
Self {
storage: self.storage.clone(),
system: self.system.clone(),
vendored: self.vendored.clone(),
files: self.files.snapshot(),
}
}
@@ -72,7 +77,7 @@ impl Upcast<dyn SourceDb> for ModuleDb {
#[salsa::db]
impl SourceDb for ModuleDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
&EMPTY_VENDORED
}
fn system(&self) -> &dyn System {

View File

@@ -1,15 +1,15 @@
use std::collections::{BTreeMap, BTreeSet};
use anyhow::Result;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::helpers::to_module_path;
use ruff_python_parser::{parse, Mode};
use crate::collector::Collector;
pub use crate::db::ModuleDb;
use crate::resolver::Resolver;
pub use crate::settings::{AnalyzeSettings, Direction};
use anyhow::Result;
use red_knot_python_semantic::SemanticModel;
use ruff_db::files::system_path_to_file;
use ruff_db::parsed::parsed_module;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::helpers::to_module_path;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
mod collector;
mod db;
@@ -17,7 +17,7 @@ mod resolver;
mod settings;
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ModuleImports(BTreeSet<SystemPathBuf>);
impl ModuleImports {
@@ -29,11 +29,11 @@ impl ModuleImports {
string_imports: bool,
) -> Result<Self> {
// Read and parse the source code.
let file = system_path_to_file(db, path)?;
let parsed = parsed_module(db, file);
let source = std::fs::read_to_string(path)?;
let parsed = parse(&source, Mode::Module)?;
let module_path =
package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path()));
let model = SemanticModel::new(db, file);
// Collect the imports.
let imports =
@@ -42,7 +42,7 @@ impl ModuleImports {
// Resolve the imports.
let mut resolved_imports = ModuleImports::default();
for import in imports {
let Some(resolved) = Resolver::new(&model).resolve(import) else {
let Some(resolved) = Resolver::new(db).resolve(import) else {
continue;
};
let Some(path) = resolved.as_system_path() else {
@@ -92,18 +92,25 @@ impl ModuleImports {
}
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ImportMap(BTreeMap<SystemPathBuf, ModuleImports>);
impl ImportMap {
/// Insert a module's imports into the map.
pub fn insert(&mut self, path: SystemPathBuf, imports: ModuleImports) {
self.0.insert(path, imports);
/// Create an [`ImportMap`] of file to its dependencies.
///
/// Assumes that the input is a collection of unique file paths and their imports.
pub fn dependencies(imports: impl IntoIterator<Item = (SystemPathBuf, ModuleImports)>) -> Self {
let mut map = ImportMap::default();
for (path, imports) in imports {
map.0.insert(path, imports);
}
map
}
/// Reverse the [`ImportMap`], e.g., to convert from dependencies to dependents.
#[must_use]
pub fn reverse(imports: impl IntoIterator<Item = (SystemPathBuf, ModuleImports)>) -> Self {
/// Create an [`ImportMap`] of file to its dependents.
///
/// Assumes that the input is a collection of unique file paths and their imports.
pub fn dependents(imports: impl IntoIterator<Item = (SystemPathBuf, ModuleImports)>) -> Self {
let mut reverse = ImportMap::default();
for (path, imports) in imports {
for import in imports.0 {
@@ -114,13 +121,3 @@ impl ImportMap {
reverse
}
}
impl FromIterator<(SystemPathBuf, ModuleImports)> for ImportMap {
fn from_iter<I: IntoIterator<Item = (SystemPathBuf, ModuleImports)>>(iter: I) -> Self {
let mut map = ImportMap::default();
for (path, imports) in iter {
map.0.entry(path).or_default().0.extend(imports.0);
}
map
}
}

View File

@@ -1,37 +1,36 @@
use red_knot_python_semantic::SemanticModel;
use red_knot_python_semantic::resolve_module;
use ruff_db::files::FilePath;
use crate::collector::CollectedImport;
use crate::ModuleDb;
/// Collect all imports for a given Python file.
pub(crate) struct Resolver<'a> {
semantic: &'a SemanticModel<'a>,
db: &'a ModuleDb,
}
impl<'a> Resolver<'a> {
/// Initialize a [`Resolver`] with a given [`SemanticModel`].
pub(crate) fn new(semantic: &'a SemanticModel<'a>) -> Self {
Self { semantic }
/// Initialize a [`Resolver`] with a given [`ModuleDb`].
pub(crate) fn new(db: &'a ModuleDb) -> Self {
Self { db }
}
/// Resolve the [`CollectedImport`] into a [`FilePath`].
pub(crate) fn resolve(&self, import: CollectedImport) -> Option<&'a FilePath> {
match import {
CollectedImport::Import(import) => self
.semantic
.resolve_module(import)
.map(|module| module.file().path(self.semantic.db())),
CollectedImport::Import(import) => {
resolve_module(self.db, import).map(|module| module.file().path(self.db))
}
CollectedImport::ImportFrom(import) => {
// Attempt to resolve the member (e.g., given `from foo import bar`, look for `foo.bar`).
let parent = import.parent();
self.semantic
.resolve_module(import)
.map(|module| module.file().path(self.semantic.db()))
resolve_module(self.db, import)
.map(|module| module.file().path(self.db))
.or_else(|| {
// Attempt to resolve the module (e.g., given `from foo import bar`, look for `foo`).
self.semantic
.resolve_module(parent?)
.map(|module| module.file().path(self.semantic.db()))
resolve_module(self.db, parent?).map(|module| module.file().path(self.db))
})
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_linter"
version = "0.6.6"
version = "0.6.8"
publish = false
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -33,3 +33,19 @@ age = ages.get("Cat", None)
# OK
ages = ["Tom", "Maria", "Dog"]
age = ages.get("Cat", None)
# SIM910
def foo(**kwargs):
a = kwargs.get('a', None)
# SIM910
def foo(some_dict: dict):
a = some_dict.get('a', None)
# OK
def foo(some_other: object):
a = some_other.get('a', None)
# OK
def foo(some_other):
a = some_other.get('a', None)

View File

@@ -0,0 +1,120 @@
# ------------------
# less than examples
# ------------------
a = int(input())
b = int(input())
c = int(input())
if a < b and b < c: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
if a < b and b <= c: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
if a <= b and b < c: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
if a <= b and b <= c: # [boolean-chained-comparison]
pass
# ---------------------
# greater than examples
# ---------------------
a = int(input())
b = int(input())
c = int(input())
if a > b and b > c: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
if a >= b and b > c: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
if a > b and b >= c: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
if a >= b and b >= c: # [boolean-chained-comparison]
pass
# -----------------------
# multiple fixes examples
# -----------------------
a = int(input())
b = int(input())
c = int(input())
d = int(input())
if a < b and b < c and c < d: # [boolean-chained-comparison]
pass
a = int(input())
b = int(input())
c = int(input())
d = int(input())
e = int(input())
if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
pass
# ------------
# bad examples
# ------------
a = int(input())
b = int(input())
c = int(input())
if a > b or b > c:
pass
a = int(input())
b = int(input())
c = int(input())
if a > b and b in (1, 2):
pass
a = int(input())
b = int(input())
c = int(input())
if a < b and b > c:
pass
a = int(input())
b = int(input())
c = int(input())
if a < b and b >= c:
pass
a = int(input())
b = int(input())
c = int(input())
if a <= b and b > c:
pass
a = int(input())
b = int(input())
c = int(input())
if a <= b and b >= c:
pass
a = int(input())
b = int(input())
c = int(input())
if a > b and b < c:
pass

View File

@@ -81,3 +81,85 @@ def _serve_method(fn):
.markup(highlight=args.region)
):
yield h
# UP028: The later loop variable is not a reference to the earlier loop variable
def f():
for x in (1, 2, 3):
yield x
# Shadowing with another loop
for x in (1, 2, 3):
yield x
# UP028: The exception binding is not a reference to the loop variable
def f():
for x in (1, 2, 3):
yield x
# Shadowing with an `except`
try:
pass
except Exception as x:
pass
# UP028: The context binding is not a reference to the loop variable
def f():
for x in (1, 2, 3):
yield x
# Shadowing with `with`
with contextlib.nullcontext() as x:
pass
# UP028: The type annotation binding is not a reference to the loop variable
def f():
for x in (1, 2, 3):
yield x
# Shadowing with a type annotation
x: int
# OK: The `del` statement requires the loop variable to exist
def f():
for x in (1, 2, 3):
yield x
# Shadowing with `del`
del x
# UP028: The exception bindings are not a reference to the loop variable
def f():
for x in (1, 2, 3):
yield x
# Shadowing with multiple `except` blocks
try:
pass
except Exception as x:
pass
try:
pass
except Exception as x:
pass
# OK: The `del` statement requires the loop variable to exist
def f():
for x in (1, 2, 3):
yield x
# Shadowing with multiple `del` statements
del x
del x
# OK: The `print` call requires the loop variable to exist
def f():
for x in (1, 2, 3):
yield x
# Shadowing with a reference and non-reference binding
print(x)
try:
pass
except Exception as x:
pass

View File

@@ -1537,6 +1537,9 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
}
}
Expr::BoolOp(bool_op) => {
if checker.enabled(Rule::BooleanChainedComparison) {
pylint::rules::boolean_chained_comparison(checker, bool_op);
}
if checker.enabled(Rule::MultipleStartsEndsWith) {
flake8_pie::rules::multiple_starts_ends_with(checker, expr);
}

View File

@@ -257,6 +257,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "R1714") => (RuleGroup::Stable, rules::pylint::rules::RepeatedEqualityComparison),
(Pylint, "R1722") => (RuleGroup::Stable, rules::pylint::rules::SysExitAlias),
(Pylint, "R1730") => (RuleGroup::Stable, rules::pylint::rules::IfStmtMinMax),
(Pylint, "R1716") => (RuleGroup::Preview, rules::pylint::rules::BooleanChainedComparison),
(Pylint, "R1733") => (RuleGroup::Preview, rules::pylint::rules::UnnecessaryDictIndexLookup),
(Pylint, "R1736") => (RuleGroup::Stable, rules::pylint::rules::UnnecessaryListIndexLookup),
(Pylint, "R2004") => (RuleGroup::Stable, rules::pylint::rules::MagicValueComparison),

View File

@@ -47,7 +47,7 @@ impl AlwaysFixableViolation for UnnecessaryGeneratorSet {
#[derive_message_formats]
fn message(&self) -> String {
if self.short_circuit {
format!("Unnecessary generator (rewrite using `set()`")
format!("Unnecessary generator (rewrite using `set()`)")
} else {
format!("Unnecessary generator (rewrite as a `set` comprehension)")
}

View File

@@ -103,7 +103,7 @@ C401.py:12:17: C401 [*] Unnecessary generator (rewrite as a `set` comprehension)
14 14 |
15 15 | # Short-circuit case, combine with C416 and should produce x = set(range(3))
C401.py:16:5: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:16:5: C401 [*] Unnecessary generator (rewrite using `set()`)
|
15 | # Short-circuit case, combine with C416 and should produce x = set(range(3))
16 | x = set(x for x in range(3))
@@ -123,7 +123,7 @@ C401.py:16:5: C401 [*] Unnecessary generator (rewrite using `set()`
18 18 | x for x in range(3)
19 19 | )
C401.py:17:5: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:17:5: C401 [*] Unnecessary generator (rewrite using `set()`)
|
15 | # Short-circuit case, combine with C416 and should produce x = set(range(3))
16 | x = set(x for x in range(3))
@@ -149,7 +149,7 @@ C401.py:17:5: C401 [*] Unnecessary generator (rewrite using `set()`
21 19 | print(f"{set(a for a in 'abc') - set(a for a in 'ab')}")
22 20 | print(f"{ set(a for a in 'abc') - set(a for a in 'ab') }")
C401.py:20:16: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:20:16: C401 [*] Unnecessary generator (rewrite using `set()`)
|
18 | x for x in range(3)
19 | )
@@ -170,7 +170,7 @@ C401.py:20:16: C401 [*] Unnecessary generator (rewrite using `set()`
22 22 | print(f"{ set(a for a in 'abc') - set(a for a in 'ab') }")
23 23 |
C401.py:21:10: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:21:10: C401 [*] Unnecessary generator (rewrite using `set()`)
|
19 | )
20 | print(f"Hello {set(a for a in range(3))} World")
@@ -190,7 +190,7 @@ C401.py:21:10: C401 [*] Unnecessary generator (rewrite using `set()`
23 23 |
24 24 |
C401.py:21:34: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:21:34: C401 [*] Unnecessary generator (rewrite using `set()`)
|
19 | )
20 | print(f"Hello {set(a for a in range(3))} World")
@@ -210,7 +210,7 @@ C401.py:21:34: C401 [*] Unnecessary generator (rewrite using `set()`
23 23 |
24 24 |
C401.py:22:11: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:22:11: C401 [*] Unnecessary generator (rewrite using `set()`)
|
20 | print(f"Hello {set(a for a in range(3))} World")
21 | print(f"{set(a for a in 'abc') - set(a for a in 'ab')}")
@@ -229,7 +229,7 @@ C401.py:22:11: C401 [*] Unnecessary generator (rewrite using `set()`
24 24 |
25 25 | # Not built-in set.
C401.py:22:35: C401 [*] Unnecessary generator (rewrite using `set()`
C401.py:22:35: C401 [*] Unnecessary generator (rewrite using `set()`)
|
20 | print(f"Hello {set(a for a in range(3))} World")
21 | print(f"{set(a for a in 'abc') - set(a for a in 'ab')}")

View File

@@ -119,4 +119,44 @@ SIM910.py:31:7: SIM910 [*] Use `ages.get("Cat")` instead of `ages.get("Cat", Non
33 33 | # OK
34 34 | ages = ["Tom", "Maria", "Dog"]
SIM910.py:39:9: SIM910 [*] Use `kwargs.get('a')` instead of `kwargs.get('a', None)`
|
37 | # SIM910
38 | def foo(**kwargs):
39 | a = kwargs.get('a', None)
| ^^^^^^^^^^^^^^^^^^^^^ SIM910
40 |
41 | # SIM910
|
= help: Replace `kwargs.get('a', None)` with `kwargs.get('a')`
Safe fix
36 36 |
37 37 | # SIM910
38 38 | def foo(**kwargs):
39 |- a = kwargs.get('a', None)
39 |+ a = kwargs.get('a')
40 40 |
41 41 | # SIM910
42 42 | def foo(some_dict: dict):
SIM910.py:43:9: SIM910 [*] Use `some_dict.get('a')` instead of `some_dict.get('a', None)`
|
41 | # SIM910
42 | def foo(some_dict: dict):
43 | a = some_dict.get('a', None)
| ^^^^^^^^^^^^^^^^^^^^^^^^ SIM910
44 |
45 | # OK
|
= help: Replace `some_dict.get('a', None)` with `some_dict.get('a')`
Safe fix
40 40 |
41 41 | # SIM910
42 42 | def foo(some_dict: dict):
43 |- a = some_dict.get('a', None)
43 |+ a = some_dict.get('a')
44 44 |
45 45 | # OK
46 46 | def foo(some_other: object):

View File

@@ -36,6 +36,10 @@ mod tests {
#[test_case(Rule::BadStringFormatType, Path::new("bad_string_format_type.py"))]
#[test_case(Rule::BidirectionalUnicode, Path::new("bidirectional_unicode.py"))]
#[test_case(Rule::BinaryOpException, Path::new("binary_op_exception.py"))]
#[test_case(
Rule::BooleanChainedComparison,
Path::new("boolean_chained_comparison.py")
)]
#[test_case(Rule::CollapsibleElseIf, Path::new("collapsible_else_if.py"))]
#[test_case(Rule::CompareToEmptyString, Path::new("compare_to_empty_string.py"))]
#[test_case(Rule::ComparisonOfConstant, Path::new("comparison_of_constant.py"))]

View File

@@ -0,0 +1,120 @@
use itertools::Itertools;
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{name::Name, BoolOp, CmpOp, Expr, ExprBoolOp, ExprCompare};
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
/// ## What it does
/// Check for chained boolean operations that can be simplified.
///
/// ## Why is this bad?
/// Refactoring the code will improve readability for these cases.
///
/// ## Example
///
/// ```python
/// a = int(input())
/// b = int(input())
/// c = int(input())
/// if a < b and b < c:
/// pass
/// ```
///
/// Use instead:
///
/// ```python
/// a = int(input())
/// b = int(input())
/// c = int(input())
/// if a < b < c:
/// pass
/// ```
#[violation]
pub struct BooleanChainedComparison {
variable: Name,
}
impl AlwaysFixableViolation for BooleanChainedComparison {
#[derive_message_formats]
fn message(&self) -> String {
format!("Contains chained boolean comparison that can be simplified")
}
fn fix_title(&self) -> String {
"Use a single compare expression".to_string()
}
}
/// PLR1716
pub(crate) fn boolean_chained_comparison(checker: &mut Checker, expr_bool_op: &ExprBoolOp) {
// early exit for non `and` boolean operations
if expr_bool_op.op != BoolOp::And {
return;
}
// early exit when not all expressions are compare expressions
if !expr_bool_op.values.iter().all(Expr::is_compare_expr) {
return;
}
// retrieve all compare statements from expression
let compare_expressions = expr_bool_op
.values
.iter()
.map(|stmt| stmt.as_compare_expr().unwrap());
let diagnostics = compare_expressions
.tuple_windows()
.filter(|(left_compare, right_compare)| {
are_compare_expr_simplifiable(left_compare, right_compare)
})
.filter_map(|(left_compare, right_compare)| {
let Expr::Name(left_compare_right) = left_compare.comparators.first()? else {
return None;
};
let Expr::Name(right_compare_left) = &*right_compare.left else {
return None;
};
if left_compare_right.id() != right_compare_left.id() {
return None;
}
let edit = Edit::range_replacement(
left_compare_right.id().to_string(),
TextRange::new(left_compare_right.start(), right_compare_left.end()),
);
Some(
Diagnostic::new(
BooleanChainedComparison {
variable: left_compare_right.id().clone(),
},
TextRange::new(left_compare.start(), right_compare.end()),
)
.with_fix(Fix::safe_edit(edit)),
)
});
checker.diagnostics.extend(diagnostics);
}
/// Checks whether two compare expressions are simplifiable
fn are_compare_expr_simplifiable(left: &ExprCompare, right: &ExprCompare) -> bool {
let [left_operator] = &*left.ops else {
return false;
};
let [right_operator] = &*right.ops else {
return false;
};
matches!(
(left_operator, right_operator),
(CmpOp::Lt | CmpOp::LtE, CmpOp::Lt | CmpOp::LtE)
| (CmpOp::Gt | CmpOp::GtE, CmpOp::Gt | CmpOp::GtE)
)
}

View File

@@ -9,6 +9,7 @@ pub(crate) use bad_string_format_character::BadStringFormatCharacter;
pub(crate) use bad_string_format_type::*;
pub(crate) use bidirectional_unicode::*;
pub(crate) use binary_op_exception::*;
pub(crate) use boolean_chained_comparison::*;
pub(crate) use collapsible_else_if::*;
pub(crate) use compare_to_empty_string::*;
pub(crate) use comparison_of_constant::*;
@@ -112,6 +113,7 @@ pub(crate) mod bad_string_format_character;
mod bad_string_format_type;
mod bidirectional_unicode;
mod binary_op_exception;
mod boolean_chained_comparison;
mod collapsible_else_if;
mod compare_to_empty_string;
mod comparison_of_constant;

View File

@@ -0,0 +1,262 @@
---
source: crates/ruff_linter/src/rules/pylint/mod.rs
---
boolean_chained_comparison.py:8:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
6 | b = int(input())
7 | c = int(input())
8 | if a < b and b < c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
9 | pass
|
= help: Use a single compare expression
Safe fix
5 5 | a = int(input())
6 6 | b = int(input())
7 7 | c = int(input())
8 |-if a < b and b < c: # [boolean-chained-comparison]
8 |+if a < b < c: # [boolean-chained-comparison]
9 9 | pass
10 10 |
11 11 | a = int(input())
boolean_chained_comparison.py:14:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
12 | b = int(input())
13 | c = int(input())
14 | if a < b and b <= c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^^ PLR1716
15 | pass
|
= help: Use a single compare expression
Safe fix
11 11 | a = int(input())
12 12 | b = int(input())
13 13 | c = int(input())
14 |-if a < b and b <= c: # [boolean-chained-comparison]
14 |+if a < b <= c: # [boolean-chained-comparison]
15 15 | pass
16 16 |
17 17 | a = int(input())
boolean_chained_comparison.py:20:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
18 | b = int(input())
19 | c = int(input())
20 | if a <= b and b < c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^^ PLR1716
21 | pass
|
= help: Use a single compare expression
Safe fix
17 17 | a = int(input())
18 18 | b = int(input())
19 19 | c = int(input())
20 |-if a <= b and b < c: # [boolean-chained-comparison]
20 |+if a <= b < c: # [boolean-chained-comparison]
21 21 | pass
22 22 |
23 23 | a = int(input())
boolean_chained_comparison.py:26:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
24 | b = int(input())
25 | c = int(input())
26 | if a <= b and b <= c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^^^ PLR1716
27 | pass
|
= help: Use a single compare expression
Safe fix
23 23 | a = int(input())
24 24 | b = int(input())
25 25 | c = int(input())
26 |-if a <= b and b <= c: # [boolean-chained-comparison]
26 |+if a <= b <= c: # [boolean-chained-comparison]
27 27 | pass
28 28 |
29 29 | # ---------------------
boolean_chained_comparison.py:36:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
34 | b = int(input())
35 | c = int(input())
36 | if a > b and b > c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
37 | pass
|
= help: Use a single compare expression
Safe fix
33 33 | a = int(input())
34 34 | b = int(input())
35 35 | c = int(input())
36 |-if a > b and b > c: # [boolean-chained-comparison]
36 |+if a > b > c: # [boolean-chained-comparison]
37 37 | pass
38 38 |
39 39 | a = int(input())
boolean_chained_comparison.py:42:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
40 | b = int(input())
41 | c = int(input())
42 | if a >= b and b > c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^^ PLR1716
43 | pass
|
= help: Use a single compare expression
Safe fix
39 39 | a = int(input())
40 40 | b = int(input())
41 41 | c = int(input())
42 |-if a >= b and b > c: # [boolean-chained-comparison]
42 |+if a >= b > c: # [boolean-chained-comparison]
43 43 | pass
44 44 |
45 45 | a = int(input())
boolean_chained_comparison.py:48:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
46 | b = int(input())
47 | c = int(input())
48 | if a > b and b >= c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^^ PLR1716
49 | pass
|
= help: Use a single compare expression
Safe fix
45 45 | a = int(input())
46 46 | b = int(input())
47 47 | c = int(input())
48 |-if a > b and b >= c: # [boolean-chained-comparison]
48 |+if a > b >= c: # [boolean-chained-comparison]
49 49 | pass
50 50 |
51 51 | a = int(input())
boolean_chained_comparison.py:54:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
52 | b = int(input())
53 | c = int(input())
54 | if a >= b and b >= c: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^^^ PLR1716
55 | pass
|
= help: Use a single compare expression
Safe fix
51 51 | a = int(input())
52 52 | b = int(input())
53 53 | c = int(input())
54 |-if a >= b and b >= c: # [boolean-chained-comparison]
54 |+if a >= b >= c: # [boolean-chained-comparison]
55 55 | pass
56 56 |
57 57 | # -----------------------
boolean_chained_comparison.py:65:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
63 | c = int(input())
64 | d = int(input())
65 | if a < b and b < c and c < d: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
66 | pass
|
= help: Use a single compare expression
Safe fix
62 62 | b = int(input())
63 63 | c = int(input())
64 64 | d = int(input())
65 |-if a < b and b < c and c < d: # [boolean-chained-comparison]
65 |+if a < b < c and c < d: # [boolean-chained-comparison]
66 66 | pass
67 67 |
68 68 | a = int(input())
boolean_chained_comparison.py:65:14: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
63 | c = int(input())
64 | d = int(input())
65 | if a < b and b < c and c < d: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
66 | pass
|
= help: Use a single compare expression
Safe fix
62 62 | b = int(input())
63 63 | c = int(input())
64 64 | d = int(input())
65 |-if a < b and b < c and c < d: # [boolean-chained-comparison]
65 |+if a < b and b < c < d: # [boolean-chained-comparison]
66 66 | pass
67 67 |
68 68 | a = int(input())
boolean_chained_comparison.py:73:4: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
71 | d = int(input())
72 | e = int(input())
73 | if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
74 | pass
|
= help: Use a single compare expression
Safe fix
70 70 | c = int(input())
71 71 | d = int(input())
72 72 | e = int(input())
73 |-if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
73 |+if a < b < c and c < d and d < e: # [boolean-chained-comparison]
74 74 | pass
75 75 |
76 76 | # ------------
boolean_chained_comparison.py:73:14: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
71 | d = int(input())
72 | e = int(input())
73 | if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
74 | pass
|
= help: Use a single compare expression
Safe fix
70 70 | c = int(input())
71 71 | d = int(input())
72 72 | e = int(input())
73 |-if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
73 |+if a < b and b < c < d and d < e: # [boolean-chained-comparison]
74 74 | pass
75 75 |
76 76 | # ------------
boolean_chained_comparison.py:73:24: PLR1716 [*] Contains chained boolean comparison that can be simplified
|
71 | d = int(input())
72 | e = int(input())
73 | if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
| ^^^^^^^^^^^^^^^ PLR1716
74 | pass
|
= help: Use a single compare expression
Safe fix
70 70 | c = int(input())
71 71 | d = int(input())
72 72 | e = int(input())
73 |-if a < b and b < c and c < d and d < e: # [boolean-chained-comparison]
73 |+if a < b and b < c and c < d < e: # [boolean-chained-comparison]
74 74 | pass
75 75 |
76 76 | # ------------

View File

@@ -101,7 +101,9 @@ pub(crate) fn yield_in_for_loop(checker: &mut Checker, stmt_for: &ast::StmtFor)
.semantic()
.current_scope()
.get_all(name.id.as_str())
.any(|binding_id| {
// Skip unbound bindings like `del x`
.find(|&id| !checker.semantic().binding(id).is_unbound())
.is_some_and(|binding_id| {
let binding = checker.semantic().binding(binding_id);
binding.references.iter().any(|reference_id| {
checker.semantic().reference(*reference_id).range() != name.range()

View File

@@ -298,5 +298,102 @@ UP028_0.py:79:5: UP028 [*] Replace `yield` over `for` loop with `yield from`
82 |- ):
83 |- yield h
82 |+ )
84 83 |
85 84 |
86 85 | # UP028: The later loop variable is not a reference to the earlier loop variable
UP028_0.py:97:5: UP028 [*] Replace `yield` over `for` loop with `yield from`
|
95 | # UP028: The exception binding is not a reference to the loop variable
96 | def f():
97 | for x in (1, 2, 3):
| _____^
98 | | yield x
| |_______________^ UP028
99 | # Shadowing with an `except`
100 | try:
|
= help: Replace with `yield from`
Unsafe fix
94 94 |
95 95 | # UP028: The exception binding is not a reference to the loop variable
96 96 | def f():
97 |- for x in (1, 2, 3):
98 |- yield x
97 |+ yield from (1, 2, 3)
99 98 | # Shadowing with an `except`
100 99 | try:
101 100 | pass
UP028_0.py:108:5: UP028 [*] Replace `yield` over `for` loop with `yield from`
|
106 | # UP028: The context binding is not a reference to the loop variable
107 | def f():
108 | for x in (1, 2, 3):
| _____^
109 | | yield x
| |_______________^ UP028
110 | # Shadowing with `with`
111 | with contextlib.nullcontext() as x:
|
= help: Replace with `yield from`
Unsafe fix
105 105 |
106 106 | # UP028: The context binding is not a reference to the loop variable
107 107 | def f():
108 |- for x in (1, 2, 3):
109 |- yield x
108 |+ yield from (1, 2, 3)
110 109 | # Shadowing with `with`
111 110 | with contextlib.nullcontext() as x:
112 111 | pass
UP028_0.py:118:5: UP028 [*] Replace `yield` over `for` loop with `yield from`
|
116 | # UP028: The type annotation binding is not a reference to the loop variable
117 | def f():
118 | for x in (1, 2, 3):
| _____^
119 | | yield x
| |_______________^ UP028
120 | # Shadowing with a type annotation
121 | x: int
|
= help: Replace with `yield from`
Unsafe fix
115 115 |
116 116 | # UP028: The type annotation binding is not a reference to the loop variable
117 117 | def f():
118 |- for x in (1, 2, 3):
119 |- yield x
118 |+ yield from (1, 2, 3)
120 119 | # Shadowing with a type annotation
121 120 | x: int
122 121 |
UP028_0.py:134:5: UP028 [*] Replace `yield` over `for` loop with `yield from`
|
132 | # UP028: The exception bindings are not a reference to the loop variable
133 | def f():
134 | for x in (1, 2, 3):
| _____^
135 | | yield x
| |_______________^ UP028
136 | # Shadowing with multiple `except` blocks
137 | try:
|
= help: Replace with `yield from`
Unsafe fix
131 131 |
132 132 | # UP028: The exception bindings are not a reference to the loop variable
133 133 | def f():
134 |- for x in (1, 2, 3):
135 |- yield x
134 |+ yield from (1, 2, 3)
136 135 | # Shadowing with multiple `except` blocks
137 136 | try:
138 137 | pass

View File

@@ -15,6 +15,8 @@
//! an implicit concatenation of string literals, as these expressions are considered to
//! have the same shape in that they evaluate to the same value.
use std::borrow::Cow;
use crate as ast;
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)]
@@ -511,7 +513,7 @@ impl<'a> From<&'a ast::ExceptHandler> for ComparableExceptHandler<'a> {
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableFStringElement<'a> {
Literal(&'a str),
Literal(Cow<'a, str>),
FStringExpressionElement(FStringExpressionElement<'a>),
}
@@ -527,23 +529,34 @@ impl<'a> From<&'a ast::FStringElement> for ComparableFStringElement<'a> {
fn from(fstring_element: &'a ast::FStringElement) -> Self {
match fstring_element {
ast::FStringElement::Literal(ast::FStringLiteralElement { value, .. }) => {
Self::Literal(value)
}
ast::FStringElement::Expression(formatted_value) => {
Self::FStringExpressionElement(FStringExpressionElement {
expression: (&formatted_value.expression).into(),
debug_text: formatted_value.debug_text.as_ref(),
conversion: formatted_value.conversion,
format_spec: formatted_value
.format_spec
.as_ref()
.map(|spec| spec.elements.iter().map(Into::into).collect()),
})
Self::Literal(value.as_ref().into())
}
ast::FStringElement::Expression(formatted_value) => formatted_value.into(),
}
}
}
impl<'a> From<&'a ast::FStringExpressionElement> for ComparableFStringElement<'a> {
fn from(fstring_expression_element: &'a ast::FStringExpressionElement) -> Self {
let ast::FStringExpressionElement {
expression,
debug_text,
conversion,
format_spec,
range: _,
} = fstring_expression_element;
Self::FStringExpressionElement(FStringExpressionElement {
expression: (expression).into(),
debug_text: debug_text.as_ref(),
conversion: *conversion,
format_spec: format_spec
.as_ref()
.map(|spec| spec.elements.iter().map(Into::into).collect()),
})
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableElifElseClause<'a> {
test: Option<ComparableExpr<'a>>,
@@ -597,28 +610,82 @@ impl<'a> From<ast::LiteralExpressionRef<'a>> for ComparableLiteral<'a> {
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableFString<'a> {
elements: Vec<ComparableFStringElement<'a>>,
elements: Box<[ComparableFStringElement<'a>]>,
}
impl<'a> From<&'a ast::FString> for ComparableFString<'a> {
fn from(fstring: &'a ast::FString) -> Self {
Self {
elements: fstring.elements.iter().map(Into::into).collect(),
impl<'a> From<&'a ast::FStringValue> for ComparableFString<'a> {
// The approach below is somewhat complicated, so it may
// require some justification.
//
// Suppose given an f-string of the form
// `f"{foo!r} one" " and two " f" and three {bar!s}"`
// This decomposes as:
// - An `FStringPart::FString`, `f"{foo!r} one"` with elements
// - `FStringElement::Expression` encoding `{foo!r}`
// - `FStringElement::Literal` encoding " one"
// - An `FStringPart::Literal` capturing `" and two "`
// - An `FStringPart::FString`, `f" and three {bar!s}"` with elements
// - `FStringElement::Literal` encoding " and three "
// - `FStringElement::Expression` encoding `{bar!s}`
//
// We would like to extract from this a vector of (comparable) f-string
// _elements_ which alternate between expression elements and literal
// elements. In order to do so, we need to concatenate adjacent string
// literals. String literals may be separated for two reasons: either
// they appear in adjacent string literal parts, or else a string literal
// part is adjacent to a string literal _element_ inside of an f-string part.
fn from(value: &'a ast::FStringValue) -> Self {
#[derive(Default)]
struct Collector<'a> {
elements: Vec<ComparableFStringElement<'a>>,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ComparableFStringPart<'a> {
Literal(ComparableStringLiteral<'a>),
FString(ComparableFString<'a>),
}
impl<'a> Collector<'a> {
// The logic for concatenating adjacent string literals
// occurs here, implicitly: when we encounter a sequence
// of string literals, the first gets pushed to the
// `elements` vector, while subsequent strings
// are concatenated onto this top string.
fn push_literal(&mut self, literal: &'a str) {
if let Some(ComparableFStringElement::Literal(existing_literal)) =
self.elements.last_mut()
{
existing_literal.to_mut().push_str(literal);
} else {
self.elements
.push(ComparableFStringElement::Literal(literal.into()));
}
}
impl<'a> From<&'a ast::FStringPart> for ComparableFStringPart<'a> {
fn from(f_string_part: &'a ast::FStringPart) -> Self {
match f_string_part {
ast::FStringPart::Literal(string_literal) => Self::Literal(string_literal.into()),
ast::FStringPart::FString(f_string) => Self::FString(f_string.into()),
fn push_expression(&mut self, expression: &'a ast::FStringExpressionElement) {
self.elements.push(expression.into());
}
}
let mut collector = Collector::default();
for part in value {
match part {
ast::FStringPart::Literal(string_literal) => {
collector.push_literal(&string_literal.value);
}
ast::FStringPart::FString(fstring) => {
for element in &fstring.elements {
match element {
ast::FStringElement::Literal(literal) => {
collector.push_literal(&literal.value);
}
ast::FStringElement::Expression(expression) => {
collector.push_expression(expression);
}
}
}
}
}
}
Self {
elements: collector.elements.into_boxed_slice(),
}
}
}
@@ -638,13 +705,13 @@ impl<'a> From<&'a ast::StringLiteral> for ComparableStringLiteral<'a> {
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ComparableBytesLiteral<'a> {
value: &'a [u8],
value: Cow<'a, [u8]>,
}
impl<'a> From<&'a ast::BytesLiteral> for ComparableBytesLiteral<'a> {
fn from(bytes_literal: &'a ast::BytesLiteral) -> Self {
Self {
value: &bytes_literal.value,
value: Cow::Borrowed(&bytes_literal.value),
}
}
}
@@ -775,17 +842,17 @@ pub struct ExprFStringExpressionElement<'a> {
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprFString<'a> {
parts: Vec<ComparableFStringPart<'a>>,
value: ComparableFString<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprStringLiteral<'a> {
parts: Vec<ComparableStringLiteral<'a>>,
value: ComparableStringLiteral<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprBytesLiteral<'a> {
parts: Vec<ComparableBytesLiteral<'a>>,
value: ComparableBytesLiteral<'a>,
}
#[derive(Debug, PartialEq, Eq, Hash)]
@@ -1019,17 +1086,21 @@ impl<'a> From<&'a ast::Expr> for ComparableExpr<'a> {
}),
ast::Expr::FString(ast::ExprFString { value, range: _ }) => {
Self::FString(ExprFString {
parts: value.iter().map(Into::into).collect(),
value: value.into(),
})
}
ast::Expr::StringLiteral(ast::ExprStringLiteral { value, range: _ }) => {
Self::StringLiteral(ExprStringLiteral {
parts: value.iter().map(Into::into).collect(),
value: ComparableStringLiteral {
value: value.to_str(),
},
})
}
ast::Expr::BytesLiteral(ast::ExprBytesLiteral { value, range: _ }) => {
Self::BytesLiteral(ExprBytesLiteral {
parts: value.iter().map(Into::into).collect(),
value: ComparableBytesLiteral {
value: Cow::from(value),
},
})
}
ast::Expr::NumberLiteral(ast::ExprNumberLiteral { value, range: _ }) => {

View File

@@ -1,5 +1,6 @@
#![allow(clippy::derive_partial_eq_without_eq)]
use std::borrow::Cow;
use std::fmt;
use std::fmt::Debug;
use std::iter::FusedIterator;
@@ -2186,6 +2187,22 @@ impl PartialEq<[u8]> for BytesLiteralValue {
}
}
impl<'a> From<&'a BytesLiteralValue> for Cow<'a, [u8]> {
fn from(value: &'a BytesLiteralValue) -> Self {
match &value.inner {
BytesLiteralValueInner::Single(BytesLiteral {
value: bytes_value, ..
}) => Cow::from(bytes_value.as_ref()),
BytesLiteralValueInner::Concatenated(bytes_literal_vec) => Cow::Owned(
bytes_literal_vec
.iter()
.flat_map(|bytes_literal| bytes_literal.value.to_vec())
.collect::<Vec<u8>>(),
),
}
}
}
/// An internal representation of [`BytesLiteralValue`].
#[derive(Clone, Debug, PartialEq)]
enum BytesLiteralValueInner {

View File

@@ -0,0 +1,47 @@
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_parser::{parse_expression, ParseError};
#[test]
fn concatenated_strings_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"'a' 'b' r'\n raw'"#;
let value_contents = r#"'ab\\n raw'"#;
let split_parsed = parse_expression(split_contents)?;
let value_parsed = parse_expression(value_contents)?;
let split_compr = ComparableExpr::from(split_parsed.expr());
let value_compr = ComparableExpr::from(value_parsed.expr());
assert_eq!(split_compr, value_compr);
Ok(())
}
#[test]
fn concatenated_bytes_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"b'a' b'b'"#;
let value_contents = r#"b'ab'"#;
let split_parsed = parse_expression(split_contents)?;
let value_parsed = parse_expression(value_contents)?;
let split_compr = ComparableExpr::from(split_parsed.expr());
let value_compr = ComparableExpr::from(value_parsed.expr());
assert_eq!(split_compr, value_compr);
Ok(())
}
#[test]
fn concatenated_fstrings_compare_equal() -> Result<(), ParseError> {
let split_contents = r#"f"{foo!r} this" r"\n raw" f" and {bar!s} that""#;
let value_contents = r#"f"{foo!r} this\\n raw and {bar!s} that""#;
let split_parsed = parse_expression(split_contents)?;
let value_parsed = parse_expression(value_contents)?;
let split_compr = ComparableExpr::from(split_parsed.expr());
let value_compr = ComparableExpr::from(value_parsed.expr());
assert_eq!(split_compr, value_compr);
Ok(())
}

View File

@@ -0,0 +1,292 @@
# Patterns that use BestFit should be parenthesized if they exceed the configured line width
# but fit within parentheses.
match x:
case (
"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPar"
):
pass
match x:
case (
b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa"
):
pass
match x:
case (
f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa"
):
pass
match x:
case (
5444444444444444444444444444444444444444444444444444444444444444444444444444444j
):
pass
match x:
case (
5444444444444444444444444444444444444444444444444444444444444444444444444444444
):
pass
match x:
case (
5.44444444444444444444444444444444444444444444444444444444444444444444444444444
):
pass
match x:
case (
averyLongIdentThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenth
):
pass
# But they aren't parenthesized when they exceed the line length even parenthesized
match x:
case "averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case 54444444444444444444444444444444444444444444444444444444444444444444444444444444444j:
pass
match x:
case 5444444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case 5.444444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case averyLongIdentifierThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized:
pass
# It uses the Multiline layout when there's an alias.
match x:
case (
averyLongIdentifierThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthe as b
):
pass
match x:
case (
"an implicit concatenated" "string literal" "in a match case" "that goes over multiple lines"
):
pass
## Patterns ending with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
case A | [
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
]:
pass
match x:
case A | (
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
match x:
case A | {
"a": aaaaaa,
"b": bbbbbbbbbbbbbbbb,
"c": cccccccccccccccccc,
"d": ddddddddddddddddddddddddddd,
}:
pass
match x:
case A | Class(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
match x:
case A | (
aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
):
pass
## Patterns starting with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
case [
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
] | A:
pass
match x:
case (
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
) | A:
pass
match x:
case {
"a": aaaaaa,
"b": bbbbbbbbbbbbbbbb,
"c": cccccccccccccccccc,
"d": ddddddddddddddddddddddddddd,
} | A:
pass
match x:
case Class(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
## Not for non-parenthesized sequence patterns
match x:
case (
(1) | aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
):
pass
## Parenthesize patterns that start with a token
match x:
case (
A(
aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
)
| B
):
pass
## Always use parentheses for implicitly concatenated strings
match x:
case (
"implicit"
"concatenated"
"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
match x:
case (
b"implicit"
b"concatenated"
b"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
match x:
case (
f"implicit"
"concatenated"
"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
## Complex number expressions and unary expressions
match x:
case 4 - 3j | [
aaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccc,
]:
pass
match x:
case 4 + 3j | [
aaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccc,
]:
pass
match x:
case -1 | [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]:
pass
### Parenthesized patterns
match x:
case (1) | [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]:
pass
match x:
case ( # comment
1
) | [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]:
pass

View File

@@ -588,3 +588,28 @@ match n % 3, n % 5:
match x:
case Child(aaaaaaaaa, bbbbbbbbbbbbbbb, cccccc), Doc(aaaaa, bbbbbbbbbb, ddddddddddddd):
pass
match guard_comments:
case "abcd" if ( # trailing open parentheses comment
aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2
):
pass
case "bcdef" if (
aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2 # trailing end of line comment
): # comment
pass
case "efgh" if (
# leading own line comment
aaaaaahhhhhh == 1
):
pass
case "hijk" if (
aaaaaaaaa == 1
# trailing own line comment
):
pass

View File

@@ -59,6 +59,10 @@ pub(crate) enum Parenthesize {
/// Same as [`Self::IfBreaks`] except that it uses [`parenthesize_if_expands`] for expressions
/// with the layout [`NeedsParentheses::BestFit`] which is used by non-splittable
/// expressions like literals, name, and strings.
///
/// Use this layout over `IfBreaks` when there's a sequence of `maybe_parenthesize_expression`
/// in a single logical-line and you want to break from right-to-left. Use `IfBreaks` for the
/// first expression and `IfBreaksParenthesized` for the rest.
IfBreaksParenthesized,
/// Same as [`Self::IfBreaksParenthesized`] but uses [`parenthesize_if_expands`] for nested

View File

@@ -3,8 +3,13 @@ use ruff_python_ast::AstNode;
use ruff_python_ast::MatchCase;
use crate::builders::parenthesize_if_expands;
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses, Parentheses};
use crate::expression::maybe_parenthesize_expression;
use crate::expression::parentheses::{
NeedsParentheses, OptionalParentheses, Parentheses, Parenthesize,
};
use crate::pattern::maybe_parenthesize_pattern;
use crate::prelude::*;
use crate::preview::is_match_case_parentheses_enabled;
use crate::statement::clause::{clause_body, clause_header, ClauseHeader};
use crate::statement::suite::SuiteKind;
@@ -34,6 +39,45 @@ impl FormatNodeRule<MatchCase> for FormatMatchCase {
let comments = f.context().comments().clone();
let dangling_item_comments = comments.dangling(item);
let format_pattern = format_with(|f| {
if is_match_case_parentheses_enabled(f.context()) {
maybe_parenthesize_pattern(pattern, item).fmt(f)
} else {
let has_comments =
comments.has_leading(pattern) || comments.has_trailing_own_line(pattern);
if has_comments {
pattern.format().with_options(Parentheses::Always).fmt(f)
} else {
match pattern.needs_parentheses(item.as_any_node_ref(), f.context()) {
OptionalParentheses::Multiline => parenthesize_if_expands(
&pattern.format().with_options(Parentheses::Never),
)
.fmt(f),
OptionalParentheses::Always => {
pattern.format().with_options(Parentheses::Always).fmt(f)
}
OptionalParentheses::Never | OptionalParentheses::BestFit => {
pattern.format().with_options(Parentheses::Never).fmt(f)
}
}
}
}
});
let format_guard = guard.as_deref().map(|guard| {
format_with(|f| {
write!(f, [space(), token("if"), space()])?;
if is_match_case_parentheses_enabled(f.context()) {
maybe_parenthesize_expression(guard, item, Parenthesize::IfBreaksParenthesized)
.fmt(f)
} else {
guard.format().fmt(f)
}
})
});
write!(
f,
[
@@ -41,38 +85,7 @@ impl FormatNodeRule<MatchCase> for FormatMatchCase {
ClauseHeader::MatchCase(item),
dangling_item_comments,
&format_with(|f| {
write!(f, [token("case"), space()])?;
let has_comments = comments.has_leading(pattern)
|| comments.has_trailing_own_line(pattern);
if has_comments {
pattern.format().with_options(Parentheses::Always).fmt(f)?;
} else {
match pattern.needs_parentheses(item.as_any_node_ref(), f.context()) {
OptionalParentheses::Multiline => {
parenthesize_if_expands(
&pattern.format().with_options(Parentheses::Never),
)
.fmt(f)?;
}
OptionalParentheses::Always => {
pattern.format().with_options(Parentheses::Always).fmt(f)?;
}
OptionalParentheses::Never => {
pattern.format().with_options(Parentheses::Never).fmt(f)?;
}
OptionalParentheses::BestFit => {
pattern.format().with_options(Parentheses::Never).fmt(f)?;
}
}
}
if let Some(guard) = guard {
write!(f, [space(), token("if"), space(), guard.format()])?;
}
Ok(())
write!(f, [token("case"), space(), format_pattern, format_guard])
}),
),
clause_body(

View File

@@ -1,14 +1,17 @@
use ruff_formatter::{FormatOwnedWithRule, FormatRefWithRule, FormatRule, FormatRuleWithOptions};
use ruff_python_ast::AnyNodeRef;
use ruff_python_ast::Pattern;
use ruff_python_ast::{AnyNodeRef, Expr};
use ruff_python_ast::{MatchCase, Pattern};
use ruff_python_trivia::CommentRanges;
use ruff_python_trivia::{
first_non_trivia_token, BackwardsTokenizer, SimpleToken, SimpleTokenKind,
};
use ruff_text_size::Ranged;
use std::cmp::Ordering;
use crate::builders::parenthesize_if_expands;
use crate::context::{NodeLevel, WithNodeLevel};
use crate::expression::parentheses::{
parenthesized, NeedsParentheses, OptionalParentheses, Parentheses,
optional_parentheses, parenthesized, NeedsParentheses, OptionalParentheses, Parentheses,
};
use crate::prelude::*;
@@ -150,3 +153,254 @@ impl NeedsParentheses for Pattern {
}
}
}
pub(crate) fn maybe_parenthesize_pattern<'a>(
pattern: &'a Pattern,
case: &'a MatchCase,
) -> MaybeParenthesizePattern<'a> {
MaybeParenthesizePattern { pattern, case }
}
#[derive(Debug)]
pub(crate) struct MaybeParenthesizePattern<'a> {
pattern: &'a Pattern,
case: &'a MatchCase,
}
impl Format<PyFormatContext<'_>> for MaybeParenthesizePattern<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
let MaybeParenthesizePattern { pattern, case } = self;
let comments = f.context().comments();
let pattern_comments = comments.leading_dangling_trailing(*pattern);
// If the pattern has comments, we always want to preserve the parentheses. This also
// ensures that we correctly handle parenthesized comments, and don't need to worry about
// them in the implementation below.
if pattern_comments.has_leading() || pattern_comments.has_trailing_own_line() {
return pattern.format().with_options(Parentheses::Always).fmt(f);
}
let needs_parentheses = pattern.needs_parentheses(AnyNodeRef::from(*case), f.context());
match needs_parentheses {
OptionalParentheses::Always => {
pattern.format().with_options(Parentheses::Always).fmt(f)
}
OptionalParentheses::Never => pattern.format().with_options(Parentheses::Never).fmt(f),
OptionalParentheses::Multiline => {
if can_pattern_omit_optional_parentheses(pattern, f.context()) {
optional_parentheses(&pattern.format().with_options(Parentheses::Never)).fmt(f)
} else {
parenthesize_if_expands(&pattern.format().with_options(Parentheses::Never))
.fmt(f)
}
}
OptionalParentheses::BestFit => {
if pattern_comments.has_trailing() {
pattern.format().with_options(Parentheses::Always).fmt(f)
} else {
// The group id is necessary because the nested expressions may reference it.
let group_id = f.group_id("optional_parentheses");
let f = &mut WithNodeLevel::new(NodeLevel::Expression(Some(group_id)), f);
best_fit_parenthesize(&pattern.format().with_options(Parentheses::Never))
.with_group_id(Some(group_id))
.fmt(f)
}
}
}
}
}
/// This function is very similar to [`can_omit_optional_parentheses`] with the only difference that it is for patterns
/// and not expressions.
///
/// The base idea of the omit optional parentheses layout is to prefer using parentheses of sub-patterns
/// when splitting the pattern over introducing new patterns. For example, prefer splitting the sequence pattern in
/// `a | [b, c]` over splitting before the `|` operator.
///
/// The layout is only applied when the parenthesized pattern is the first or last item in the pattern.
/// For example, the layout isn't used for `a | [b, c] | d` because that would look weird.
pub(crate) fn can_pattern_omit_optional_parentheses(
pattern: &Pattern,
context: &PyFormatContext,
) -> bool {
let mut visitor = CanOmitOptionalParenthesesVisitor::default();
visitor.visit_pattern(pattern, context);
if !visitor.any_parenthesized_expressions {
// Only use the more complex IR if there's a parenthesized pattern that can be split before
// splitting other patterns. E.g. split the sequence pattern before the string literal `"a" "b" | [a, b, c, d]`.
false
} else if visitor.max_precedence_count > 1 {
false
} else {
// It's complicated
fn has_parentheses_and_is_non_empty(pattern: &Pattern, context: &PyFormatContext) -> bool {
let has_own_non_empty = match pattern {
Pattern::MatchValue(_)
| Pattern::MatchSingleton(_)
| Pattern::MatchStar(_)
| Pattern::MatchAs(_)
| Pattern::MatchOr(_) => false,
Pattern::MatchSequence(sequence) => {
!sequence.patterns.is_empty() || context.comments().has_dangling(pattern)
}
Pattern::MatchMapping(mapping) => {
!mapping.patterns.is_empty() || context.comments().has_dangling(pattern)
}
Pattern::MatchClass(class) => !class.arguments.patterns.is_empty(),
};
if has_own_non_empty {
true
} else {
// If the pattern has no own parentheses or it is empty (e.g. ([])), check for surrounding parentheses (that should be preserved).
is_pattern_parenthesized(pattern, context.comments().ranges(), context.source())
}
}
visitor
.last
.is_some_and(|last| has_parentheses_and_is_non_empty(last, context))
|| visitor
.first
.pattern()
.is_some_and(|first| has_parentheses_and_is_non_empty(first, context))
}
}
#[derive(Debug, Default)]
struct CanOmitOptionalParenthesesVisitor<'input> {
max_precedence: OperatorPrecedence,
max_precedence_count: usize,
any_parenthesized_expressions: bool,
last: Option<&'input Pattern>,
first: First<'input>,
}
impl<'a> CanOmitOptionalParenthesesVisitor<'a> {
fn visit_pattern(&mut self, pattern: &'a Pattern, context: &PyFormatContext) {
match pattern {
Pattern::MatchSequence(_) | Pattern::MatchMapping(_) => {
self.any_parenthesized_expressions = true;
}
Pattern::MatchValue(value) => match &*value.value {
Expr::StringLiteral(string) => {
self.update_max_precedence(OperatorPrecedence::String, string.value.len());
}
Expr::BytesLiteral(bytes) => {
self.update_max_precedence(OperatorPrecedence::String, bytes.value.len());
}
// F-strings are allowed according to python's grammar but fail with a syntax error at runtime.
// That's why we need to support them for formatting.
Expr::FString(string) => {
self.update_max_precedence(
OperatorPrecedence::String,
string.value.as_slice().len(),
);
}
Expr::NumberLiteral(_) | Expr::Attribute(_) | Expr::UnaryOp(_) => {
// require no state update other than visit_pattern does.
}
// `case 4+3j:` or `case 4-3j:
// Can not contain arbitrary expressions. Limited to complex numbers.
Expr::BinOp(_) => {
self.update_max_precedence(OperatorPrecedence::Additive, 1);
}
_ => {
debug_assert!(
false,
"Unsupported expression in pattern mach value: {:?}",
value.value
);
}
},
Pattern::MatchClass(_) => {
self.any_parenthesized_expressions = true;
// The pattern doesn't start with a parentheses pattern, but with the class's identifier.
self.first.set_if_none(First::Token);
}
Pattern::MatchStar(_) | Pattern::MatchSingleton(_) | Pattern::MatchAs(_) => {}
Pattern::MatchOr(or_pattern) => {
self.update_max_precedence(
OperatorPrecedence::Or,
or_pattern.patterns.len().saturating_sub(1),
);
for pattern in &or_pattern.patterns {
self.visit_sub_pattern(pattern, context);
}
}
}
}
fn visit_sub_pattern(&mut self, pattern: &'a Pattern, context: &PyFormatContext) {
self.last = Some(pattern);
// Rule only applies for non-parenthesized patterns.
if is_pattern_parenthesized(pattern, context.comments().ranges(), context.source()) {
self.any_parenthesized_expressions = true;
} else {
self.visit_pattern(pattern, context);
}
self.first.set_if_none(First::Pattern(pattern));
}
fn update_max_precedence(&mut self, precedence: OperatorPrecedence, count: usize) {
match self.max_precedence.cmp(&precedence) {
Ordering::Less => {
self.max_precedence_count = count;
self.max_precedence = precedence;
}
Ordering::Equal => {
self.max_precedence_count += count;
}
Ordering::Greater => {}
}
}
}
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Default)]
enum OperatorPrecedence {
#[default]
None,
Additive,
Or,
// Implicit string concatenation
String,
}
#[derive(Copy, Clone, Debug, Default)]
enum First<'a> {
#[default]
None,
/// Pattern starts with a non-parentheses token. E.g. `*x`
Token,
Pattern(&'a Pattern),
}
impl<'a> First<'a> {
#[inline]
fn set_if_none(&mut self, first: First<'a>) {
if matches!(self, First::None) {
*self = first;
}
}
fn pattern(self) -> Option<&'a Pattern> {
match self {
First::None | First::Token => None,
First::Pattern(pattern) => Some(pattern),
}
}
}

View File

@@ -5,6 +5,7 @@ use ruff_python_ast::PatternMatchAs;
use crate::comments::dangling_comments;
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses};
use crate::prelude::*;
use crate::preview::is_match_case_parentheses_enabled;
#[derive(Default)]
pub struct FormatPatternMatchAs;
@@ -54,8 +55,16 @@ impl NeedsParentheses for PatternMatchAs {
fn needs_parentheses(
&self,
_parent: AnyNodeRef,
_context: &PyFormatContext,
context: &PyFormatContext,
) -> OptionalParentheses {
OptionalParentheses::Multiline
if is_match_case_parentheses_enabled(context) {
if self.name.is_some() {
OptionalParentheses::Multiline
} else {
OptionalParentheses::BestFit
}
} else {
OptionalParentheses::Multiline
}
}
}

View File

@@ -46,7 +46,7 @@ impl NeedsParentheses for PatternMatchClass {
// ): ...
// ```
if context.comments().has_dangling(self) {
OptionalParentheses::Multiline
OptionalParentheses::Always
} else {
OptionalParentheses::Never
}

View File

@@ -4,9 +4,11 @@ use ruff_python_ast::PatternMatchOr;
use crate::comments::leading_comments;
use crate::expression::parentheses::{
in_parentheses_only_soft_line_break_or_space, NeedsParentheses, OptionalParentheses,
in_parentheses_only_group, in_parentheses_only_soft_line_break_or_space, NeedsParentheses,
OptionalParentheses,
};
use crate::prelude::*;
use crate::preview::is_match_case_parentheses_enabled;
#[derive(Default)]
pub struct FormatPatternMatchOr;
@@ -41,7 +43,11 @@ impl FormatNodeRule<PatternMatchOr> for FormatPatternMatchOr {
Ok(())
});
inner.fmt(f)
if is_match_case_parentheses_enabled(f.context()) {
in_parentheses_only_group(&inner).fmt(f)
} else {
inner.fmt(f)
}
}
}

View File

@@ -3,6 +3,7 @@ use ruff_python_ast::{PatternMatchSingleton, Singleton};
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses};
use crate::prelude::*;
use crate::preview::is_match_case_parentheses_enabled;
#[derive(Default)]
pub struct FormatPatternMatchSingleton;
@@ -21,8 +22,12 @@ impl NeedsParentheses for PatternMatchSingleton {
fn needs_parentheses(
&self,
_parent: AnyNodeRef,
_context: &PyFormatContext,
context: &PyFormatContext,
) -> OptionalParentheses {
OptionalParentheses::Never
if is_match_case_parentheses_enabled(context) {
OptionalParentheses::BestFit
} else {
OptionalParentheses::Never
}
}
}

View File

@@ -31,6 +31,8 @@ impl NeedsParentheses for PatternMatchStar {
_parent: AnyNodeRef,
_context: &PyFormatContext,
) -> OptionalParentheses {
// Doesn't matter what we return here because starred patterns can never be used
// outside a sequence pattern.
OptionalParentheses::Never
}
}

View File

@@ -3,6 +3,7 @@ use ruff_python_ast::PatternMatchValue;
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses, Parentheses};
use crate::prelude::*;
use crate::preview::is_match_case_parentheses_enabled;
#[derive(Default)]
pub struct FormatPatternMatchValue;
@@ -17,9 +18,13 @@ impl FormatNodeRule<PatternMatchValue> for FormatPatternMatchValue {
impl NeedsParentheses for PatternMatchValue {
fn needs_parentheses(
&self,
_parent: AnyNodeRef,
_context: &PyFormatContext,
parent: AnyNodeRef,
context: &PyFormatContext,
) -> OptionalParentheses {
OptionalParentheses::Never
if is_match_case_parentheses_enabled(context) {
self.value.needs_parentheses(parent, context)
} else {
OptionalParentheses::Never
}
}
}

View File

@@ -36,3 +36,11 @@ pub(crate) fn is_empty_parameters_no_unnecessary_parentheses_around_return_value
) -> bool {
context.is_preview()
}
/// See [#6933](https://github.com/astral-sh/ruff/issues/6933).
/// This style also covers the black preview styles `remove_redundant_guard_parens` and `parens_for_long_if_clauses_in_case_block `.
/// WARNING: This preview style depends on `is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled`
/// because it relies on the new semantic of `IfBreaksParenthesized`.
pub(crate) fn is_match_case_parentheses_enabled(context: &PyFormatContext) -> bool {
context.is_preview()
}

View File

@@ -44,36 +44,9 @@ match x:
```diff
--- Black
+++ Ruff
@@ -3,34 +3,36 @@
@@ -21,11 +21,17 @@
pass
match smth:
- case "test" if (
- "any long condition" != "another long condition" and "this is a long condition"
- ):
+ case "test" if "any long condition" != "another long condition" and "this is a long condition":
pass
- case test if (
- "any long condition" != "another long condition"
- and "this is a looooong condition"
- ):
+ case (
+ test
+ ) if "any long condition" != "another long condition" and "this is a looooong condition":
pass
- case test if (
- "any long condition" != "another long condition"
- and "this is a looooong condition"
- ): # some additional comments
+ case (
+ test
+ ) if "any long condition" != "another long condition" and "this is a looooong condition": # some additional comments
pass
- case test if True: # some comment
+ case test if (True): # some comment
pass
- case test if False: # some comment
+ case test if (False): # some comment
case test if False: # some comment
pass
- case test if True: # some comment
+ case test if (
@@ -92,12 +65,6 @@ match x:
pass # some comment
# case black_test_patma_052 (originally in the pattern_matching_complex test case)
match x:
case [1, 0] if x := x[:0]:
y = 1
- case [1, 0] if x := x[:0]:
+ case [1, 0] if (x := x[:0]):
y = 1
```
## Ruff Output
@@ -108,19 +75,23 @@ match match:
pass
match smth:
case "test" if "any long condition" != "another long condition" and "this is a long condition":
case "test" if (
"any long condition" != "another long condition" and "this is a long condition"
):
pass
case (
test
) if "any long condition" != "another long condition" and "this is a looooong condition":
case test if (
"any long condition" != "another long condition"
and "this is a looooong condition"
):
pass
case (
test
) if "any long condition" != "another long condition" and "this is a looooong condition": # some additional comments
case test if (
"any long condition" != "another long condition"
and "this is a looooong condition"
): # some additional comments
pass
case test if (True): # some comment
case test if True: # some comment
pass
case test if (False): # some comment
case test if False: # some comment
pass
case test if (
True # some comment
@@ -139,7 +110,7 @@ match smth:
match x:
case [1, 0] if x := x[:0]:
y = 1
case [1, 0] if (x := x[:0]):
case [1, 0] if x := x[:0]:
y = 1
```

View File

@@ -69,20 +69,7 @@ match 1:
```diff
--- Black
+++ Ruff
@@ -1,10 +1,10 @@
match 1:
- case _ if True:
+ case _ if (True):
pass
match 1:
- case _ if True:
+ case _ if (True):
pass
@@ -25,27 +25,33 @@
@@ -25,12 +25,16 @@
match 1:
@@ -101,18 +88,7 @@ match 1:
pass
match 1:
- case _ if True: # this is a comment
+ case _ if (True): # this is a comment
pass
match 1:
- case _ if True: # comment over the line limit unless parens are removed x
+ case _ if (
+ True
+ ): # comment over the line limit unless parens are removed x
pass
@@ -45,7 +49,7 @@
match 1:
@@ -129,12 +105,12 @@ match 1:
```python
match 1:
case _ if (True):
case _ if True:
pass
match 1:
case _ if (True):
case _ if True:
pass
@@ -169,14 +145,12 @@ match 1:
match 1:
case _ if (True): # this is a comment
case _ if True: # this is a comment
pass
match 1:
case _ if (
True
): # comment over the line limit unless parens are removed x
case _ if True: # comment over the line limit unless parens are removed x
pass

View File

@@ -0,0 +1,925 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/pattern/pattern_maybe_parenthesize.py
---
## Input
```python
# Patterns that use BestFit should be parenthesized if they exceed the configured line width
# but fit within parentheses.
match x:
case (
"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPar"
):
pass
match x:
case (
b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa"
):
pass
match x:
case (
f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa"
):
pass
match x:
case (
5444444444444444444444444444444444444444444444444444444444444444444444444444444j
):
pass
match x:
case (
5444444444444444444444444444444444444444444444444444444444444444444444444444444
):
pass
match x:
case (
5.44444444444444444444444444444444444444444444444444444444444444444444444444444
):
pass
match x:
case (
averyLongIdentThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenth
):
pass
# But they aren't parenthesized when they exceed the line length even parenthesized
match x:
case "averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case 54444444444444444444444444444444444444444444444444444444444444444444444444444444444j:
pass
match x:
case 5444444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case 5.444444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case averyLongIdentifierThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized:
pass
# It uses the Multiline layout when there's an alias.
match x:
case (
averyLongIdentifierThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthe as b
):
pass
match x:
case (
"an implicit concatenated" "string literal" "in a match case" "that goes over multiple lines"
):
pass
## Patterns ending with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
case A | [
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
]:
pass
match x:
case A | (
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
match x:
case A | {
"a": aaaaaa,
"b": bbbbbbbbbbbbbbbb,
"c": cccccccccccccccccc,
"d": ddddddddddddddddddddddddddd,
}:
pass
match x:
case A | Class(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
match x:
case A | (
aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
):
pass
## Patterns starting with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
case [
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
] | A:
pass
match x:
case (
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
) | A:
pass
match x:
case {
"a": aaaaaa,
"b": bbbbbbbbbbbbbbbb,
"c": cccccccccccccccccc,
"d": ddddddddddddddddddddddddddd,
} | A:
pass
match x:
case Class(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
## Not for non-parenthesized sequence patterns
match x:
case (
(1) | aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
):
pass
## Parenthesize patterns that start with a token
match x:
case (
A(
aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
)
| B
):
pass
## Always use parentheses for implicitly concatenated strings
match x:
case (
"implicit"
"concatenated"
"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
match x:
case (
b"implicit"
b"concatenated"
b"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
match x:
case (
f"implicit"
"concatenated"
"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
## Complex number expressions and unary expressions
match x:
case 4 - 3j | [
aaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccc,
]:
pass
match x:
case 4 + 3j | [
aaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccc,
]:
pass
match x:
case -1 | [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]:
pass
### Parenthesized patterns
match x:
case (1) | [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]:
pass
match x:
case ( # comment
1
) | [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]:
pass
```
## Output
```python
# Patterns that use BestFit should be parenthesized if they exceed the configured line width
# but fit within parentheses.
match x:
case "averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPar":
pass
match x:
case b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa":
pass
match x:
case f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa":
pass
match x:
case 5444444444444444444444444444444444444444444444444444444444444444444444444444444j:
pass
match x:
case 5444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case 5.44444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case (
averyLongIdentThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenth
):
pass
# But they aren't parenthesized when they exceed the line length even parenthesized
match x:
case "averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized":
pass
match x:
case 54444444444444444444444444444444444444444444444444444444444444444444444444444444444j:
pass
match x:
case 5444444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case 5.444444444444444444444444444444444444444444444444444444444444444444444444444444444:
pass
match x:
case (
averyLongIdentifierThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthesized
):
pass
# It uses the Multiline layout when there's an alias.
match x:
case (
averyLongIdentifierThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsParenthe as b
):
pass
match x:
case "an implicit concatenated" "string literal" "in a match case" "that goes over multiple lines":
pass
## Patterns ending with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
case (
A
| [
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
]
):
pass
match x:
case (
A
| (
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
)
):
pass
match x:
case (
A
| {
"a": aaaaaa,
"b": bbbbbbbbbbbbbbbb,
"c": cccccccccccccccccc,
"d": ddddddddddddddddddddddddddd,
}
):
pass
match x:
case (
A
| Class(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
)
):
pass
match x:
case (
A
| (
aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
)
):
pass
## Patterns starting with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
case (
[
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
]
| A
):
pass
match x:
case (
(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
)
| A
):
pass
match x:
case (
{
"a": aaaaaa,
"b": bbbbbbbbbbbbbbbb,
"c": cccccccccccccccccc,
"d": ddddddddddddddddddddddddddd,
}
| A
):
pass
match x:
case Class(
aaaaaa,
bbbbbbbbbbbbbbbb,
cccccccccccccccccc,
ddddddddddddddddddddddddddd,
):
pass
## Not for non-parenthesized sequence patterns
match x:
case (
(1)
| aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
):
pass
## Parenthesize patterns that start with a token
match x:
case (
A(
aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
)
| B
):
pass
## Always use parentheses for implicitly concatenated strings
match x:
case (
"implicit" "concatenated" "string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
match x:
case (
b"implicit" b"concatenated" b"string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
match x:
case (
f"implicit" "concatenated" "string"
| [aaaaaa, bbbbbbbbbbbbbbbb, cccccccccccccccccc, ddddddddddddddddddddddddddd]
):
pass
## Complex number expressions and unary expressions
match x:
case (
4 - 3j
| [
aaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccc,
]
):
pass
match x:
case (
4 + 3j
| [
aaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
cccccccccccccccccccccccccccccccccccccccc,
]
):
pass
match x:
case (
-1
| [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]
):
pass
### Parenthesized patterns
match x:
case (
(1)
| [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]
):
pass
match x:
case (
( # comment
1
)
| [
aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
]
):
pass
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -1,31 +1,43 @@
# Patterns that use BestFit should be parenthesized if they exceed the configured line width
# but fit within parentheses.
match x:
- case "averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPar":
+ case (
+ "averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPar"
+ ):
pass
match x:
- case b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa":
+ case (
+ b"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa"
+ ):
pass
match x:
- case f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa":
+ case (
+ f"averyLongStringThatGetsParenthesizedOnceItExceedsTheConfiguredLineWidthFitsPa"
+ ):
pass
match x:
- case 5444444444444444444444444444444444444444444444444444444444444444444444444444444j:
+ case (
+ 5444444444444444444444444444444444444444444444444444444444444444444444444444444j
+ ):
pass
match x:
- case 5444444444444444444444444444444444444444444444444444444444444444444444444444444:
+ case (
+ 5444444444444444444444444444444444444444444444444444444444444444444444444444444
+ ):
pass
match x:
- case 5.44444444444444444444444444444444444444444444444444444444444444444444444444444:
+ case (
+ 5.44444444444444444444444444444444444444444444444444444444444444444444444444444
+ ):
pass
@@ -82,108 +94,89 @@
match x:
- case "an implicit concatenated" "string literal" "in a match case" "that goes over multiple lines":
+ case (
+ "an implicit concatenated"
+ "string literal"
+ "in a match case"
+ "that goes over multiple lines"
+ ):
pass
## Patterns ending with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
- case (
- A
- | [
- aaaaaa,
- bbbbbbbbbbbbbbbb,
- cccccccccccccccccc,
- ddddddddddddddddddddddddddd,
- ]
- ):
+ case A | [
+ aaaaaa,
+ bbbbbbbbbbbbbbbb,
+ cccccccccccccccccc,
+ ddddddddddddddddddddddddddd,
+ ]:
pass
match x:
- case (
- A
- | (
- aaaaaa,
- bbbbbbbbbbbbbbbb,
- cccccccccccccccccc,
- ddddddddddddddddddddddddddd,
- )
+ case A | (
+ aaaaaa,
+ bbbbbbbbbbbbbbbb,
+ cccccccccccccccccc,
+ ddddddddddddddddddddddddddd,
):
pass
match x:
- case (
- A
- | {
- "a": aaaaaa,
- "b": bbbbbbbbbbbbbbbb,
- "c": cccccccccccccccccc,
- "d": ddddddddddddddddddddddddddd,
- }
- ):
+ case A | {
+ "a": aaaaaa,
+ "b": bbbbbbbbbbbbbbbb,
+ "c": cccccccccccccccccc,
+ "d": ddddddddddddddddddddddddddd,
+ }:
pass
match x:
- case (
- A
- | Class(
- aaaaaa,
- bbbbbbbbbbbbbbbb,
- cccccccccccccccccc,
- ddddddddddddddddddddddddddd,
- )
+ case A | Class(
+ aaaaaa,
+ bbbbbbbbbbbbbbbb,
+ cccccccccccccccccc,
+ ddddddddddddddddddddddddddd,
):
pass
match x:
- case (
- A
- | (
- aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
- )
+ case A | (
+ aaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc.ddddddddddddddddddddddd
):
pass
## Patterns starting with a sequence, mapping, class, or parenthesized pattern should break the parenthesized-like pattern first
match x:
- case (
- [
- aaaaaa,
- bbbbbbbbbbbbbbbb,
- cccccccccccccccccc,
- ddddddddddddddddddddddddddd,
- ]
- | A
- ):
+ case [
+ aaaaaa,
+ bbbbbbbbbbbbbbbb,
+ cccccccccccccccccc,
+ ddddddddddddddddddddddddddd,
+ ] | A:
pass
match x:
case (
- (
- aaaaaa,
- bbbbbbbbbbbbbbbb,
- cccccccccccccccccc,
- ddddddddddddddddddddddddddd,
- )
- | A
- ):
+ aaaaaa,
+ bbbbbbbbbbbbbbbb,
+ cccccccccccccccccc,
+ ddddddddddddddddddddddddddd,
+ ) | A:
pass
match x:
- case (
- {
- "a": aaaaaa,
- "b": bbbbbbbbbbbbbbbb,
- "c": cccccccccccccccccc,
- "d": ddddddddddddddddddddddddddd,
- }
- | A
- ):
+ case {
+ "a": aaaaaa,
+ "b": bbbbbbbbbbbbbbbb,
+ "c": cccccccccccccccccc,
+ "d": ddddddddddddddddddddddddddd,
+ } | A:
pass
@@ -200,8 +193,7 @@
## Not for non-parenthesized sequence patterns
match x:
case (
- (1)
- | aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
+ (1) | aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
ccccccccccccccccccccccccccccccccc,
):
@@ -246,63 +238,48 @@
## Complex number expressions and unary expressions
match x:
- case (
- 4 - 3j
- | [
- aaaaaaaaaaaaaaaaaaaaaaaa,
- bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
- cccccccccccccccccccccccccccccccccccccccc,
- ]
- ):
+ case 4 - 3j | [
+ aaaaaaaaaaaaaaaaaaaaaaaa,
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
+ cccccccccccccccccccccccccccccccccccccccc,
+ ]:
pass
match x:
- case (
- 4 + 3j
- | [
- aaaaaaaaaaaaaaaaaaaaaaaa,
- bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
- cccccccccccccccccccccccccccccccccccccccc,
- ]
- ):
+ case 4 + 3j | [
+ aaaaaaaaaaaaaaaaaaaaaaaa,
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbb,
+ cccccccccccccccccccccccccccccccccccccccc,
+ ]:
pass
match x:
- case (
- -1
- | [
- aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
- bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
- ccccccccccccccccccccccccccccccccc,
- ]
- ):
+ case -1 | [
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
+ ccccccccccccccccccccccccccccccccc,
+ ]:
pass
### Parenthesized patterns
match x:
- case (
- (1)
- | [
- aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
- bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
- ccccccccccccccccccccccccccccccccc,
- ]
- ):
+ case (1) | [
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
+ ccccccccccccccccccccccccccccccccc,
+ ]:
pass
match x:
- case (
- ( # comment
- 1
- )
- | [
- aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
- bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
- ccccccccccccccccccccccccccccccccc,
- ]
- ):
+ case ( # comment
+ 1
+ ) | [
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaa,
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,
+ ccccccccccccccccccccccccccccccccc,
+ ]:
pass
```

View File

@@ -594,6 +594,31 @@ match n % 3, n % 5:
match x:
case Child(aaaaaaaaa, bbbbbbbbbbbbbbb, cccccc), Doc(aaaaa, bbbbbbbbbb, ddddddddddddd):
pass
match guard_comments:
case "abcd" if ( # trailing open parentheses comment
aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2
):
pass
case "bcdef" if (
aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2 # trailing end of line comment
): # comment
pass
case "efgh" if (
# leading own line comment
aaaaaahhhhhh == 1
):
pass
case "hijk" if (
aaaaaaaaa == 1
# trailing own line comment
):
pass
```
## Output
@@ -1232,7 +1257,124 @@ match x:
aaaaa, bbbbbbbbbb, ddddddddddddd
):
pass
match guard_comments:
case "abcd" if ( # trailing open parentheses comment
aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2
):
pass
case "bcdef" if (
aaaaaaaaahhhhhhhh == 1
and bbbbbbaaaaaaaaaaa == 2 # trailing end of line comment
): # comment
pass
case "efgh" if (
# leading own line comment
aaaaaahhhhhh == 1
):
pass
case "hijk" if (
aaaaaaaaa == 1
# trailing own line comment
):
pass
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -69,7 +69,7 @@
case "case comment with newlines" if foo == 2: # second
pass
- case "one", "newline" if (foo := 1): # third
+ case "one", "newline" if foo := 1: # third
pass
case "two newlines":
@@ -82,7 +82,9 @@
match long_lines:
- case "this is a long line for if condition" if aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2: # comment
+ case "this is a long line for if condition" if (
+ aaaaaaaaahhhhhhhh == 1 and bbbbbbaaaaaaaaaaa == 2
+ ): # comment
pass
case "this is a long line for if condition with parentheses" if (
@@ -93,7 +95,7 @@
case "named expressions aren't special" if foo := 1:
pass
- case "named expressions aren't that special" if (foo := 1):
+ case "named expressions aren't that special" if foo := 1:
pass
case "but with already broken long lines" if (
@@ -101,9 +103,9 @@
): # another comment
pass
- case {
- "long_long_long_key": str(long_long_long_key)
- } if value := "long long long long long long long long long long long value":
+ case {"long_long_long_key": str(long_long_long_key)} if (
+ value := "long long long long long long long long long long long value"
+ ):
pass
@@ -198,7 +200,9 @@
# trailing own 2
):
pass
- case True: # trailing
+ case (
+ True # trailing
+ ):
pass
case False:
pass
@@ -249,7 +253,9 @@
1
):
y = 1
- case 1: # comment
+ case (
+ 1 # comment
+ ):
y = 1
case (
1
@@ -507,11 +513,8 @@
pass
case (
- (
- a # trailing
- )
- | (b)
- ):
+ a # trailing
+ ) | (b):
pass
case a | b | c:
@@ -525,8 +528,7 @@
pass
case ( # end of line
- a
- | b
+ a | b
# own line
):
pass
```

View File

@@ -731,16 +731,33 @@ impl TypeChecker for IoBaseChecker {
/// Test whether the given binding can be considered a list.
///
/// For this, we check what value might be associated with it through it's initialization and
/// what annotation it has (we consider `list` and `typing.List`).
/// what annotation it has (we consider `list` and `typing.List`)
pub fn is_list(binding: &Binding, semantic: &SemanticModel) -> bool {
check_type::<ListChecker>(binding, semantic)
}
/// Test whether the given binding can be considered a dictionary.
///
/// For this, we check what value might be associated with it through it's initialization and
/// what annotation it has (we consider `dict` and `typing.Dict`).
/// For this, we check what value might be associated with it through it's initialization,
/// what annotation it has (we consider `dict` and `typing.Dict`), and if it is a variadic keyword
/// argument parameter.
pub fn is_dict(binding: &Binding, semantic: &SemanticModel) -> bool {
// ```python
// def foo(**kwargs):
// ...
// ```
if matches!(binding.kind, BindingKind::Argument) {
if let Some(Stmt::FunctionDef(ast::StmtFunctionDef { parameters, .. })) =
binding.statement(semantic)
{
if let Some(kwarg_parameter) = parameters.kwarg.as_deref() {
if kwarg_parameter.name.range() == binding.range() {
return true;
}
}
}
}
check_type::<DictChecker>(binding, semantic)
}
@@ -754,10 +771,26 @@ pub fn is_set(binding: &Binding, semantic: &SemanticModel) -> bool {
/// Test whether the given binding can be considered a tuple.
///
/// For this, we check what value might be associated with it through
/// it's initialization and what annotation it has (we consider `tuple` and
/// `typing.Tuple`).
/// For this, we check what value might be associated with it through it's initialization, what
/// annotation it has (we consider `tuple` and `typing.Tuple`), and if it is a variadic positional
/// argument.
pub fn is_tuple(binding: &Binding, semantic: &SemanticModel) -> bool {
// ```python
// def foo(*args):
// ...
// ```
if matches!(binding.kind, BindingKind::Argument) {
if let Some(Stmt::FunctionDef(ast::StmtFunctionDef { parameters, .. })) =
binding.statement(semantic)
{
if let Some(arg_parameter) = parameters.vararg.as_deref() {
if arg_parameter.name.range() == binding.range() {
return true;
}
}
}
}
check_type::<TupleChecker>(binding, semantic)
}

View File

@@ -0,0 +1,32 @@
[package]
name = "ruff_vendored"
version = "0.0.0"
publish = false
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff_db = { workspace = true }
once_cell = { workspace = true }
zip = { workspace = true }
[build-dependencies]
path-slash = { workspace = true }
walkdir = { workspace = true }
zip = { workspace = true, features = ["zstd", "deflate"] }
[dev-dependencies]
walkdir = { workspace = true }
[features]
zstd = ["zip/zstd"]
deflate = ["zip/deflate"]
[lints]
workspace = true

View File

@@ -30,17 +30,12 @@ fn zip_dir(directory_path: &str, writer: File) -> ZipResult<File> {
// We can't use `#[cfg(...)]` here because the target-arch in a build script is the
// architecture of the system running the build script and not the architecture of the build-target.
// That's why we use the `TARGET` environment variable here.
#[cfg(target_arch = "powerpc64")]
let method = CompressionMethod::Deflated;
#[cfg(not(target_arch = "powerpc64"))]
let method = {
let target = std::env::var("TARGET").unwrap();
if target.contains("wasm32") || target.contains("powerpc64") {
CompressionMethod::Deflated
} else {
CompressionMethod::Zstd
}
let method = if cfg!(feature = "zstd") {
CompressionMethod::Zstd
} else if cfg!(feature = "deflate") {
CompressionMethod::Deflated
} else {
CompressionMethod::Stored
};
let options = FileOptions::default()

View File

@@ -6,7 +6,7 @@ use ruff_db::vendored::VendoredFileSystem;
// Luckily this crate will fail to build if this file isn't available at build time.
static TYPESHED_ZIP_BYTES: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/zipped_typeshed.zip"));
pub fn vendored_typeshed_stubs() -> &'static VendoredFileSystem {
pub fn file_system() -> &'static VendoredFileSystem {
static VENDORED_TYPESHED_STUBS: Lazy<VendoredFileSystem> =
Lazy::new(|| VendoredFileSystem::new_static(TYPESHED_ZIP_BYTES).unwrap());
&VENDORED_TYPESHED_STUBS
@@ -42,7 +42,7 @@ mod tests {
#[test]
fn typeshed_vfs_consistent_with_vendored_stubs() {
let vendored_typeshed_dir = Path::new("vendor/typeshed").canonicalize().unwrap();
let vendored_typeshed_stubs = vendored_typeshed_stubs();
let vendored_typeshed_stubs = file_system();
let mut empty_iterator = true;
for entry in walkdir::WalkDir::new(&vendored_typeshed_dir).min_depth(1) {

Some files were not shown because too many files have changed in this diff Show More