Compare commits

..

1 Commits

Author SHA1 Message Date
Zanie
69fc2caa31 Add CI job to auto-update pre-commit dependencies weekly 2023-11-01 12:06:14 -05:00
78 changed files with 335 additions and 4084 deletions

View File

@@ -209,7 +209,7 @@ jobs:
run: |
pip install ./python/ruff-ecosystem
- name: Run `ruff check` stable ecosystem check
- name: Run `ruff check` ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
run: |
# Make executable, since artifact download doesn't preserve this
@@ -218,30 +218,13 @@ jobs:
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check
cat ecosystem-result-check-stable > $GITHUB_STEP_SUMMARY
echo "### Linter (stable)" > ecosystem-result
cat ecosystem-result-check-stable >> ecosystem-result
cat ecosystem-result-check > $GITHUB_STEP_SUMMARY
cat ecosystem-result-check > ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff check` preview ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
cat ecosystem-result-check-preview > $GITHUB_STEP_SUMMARY
echo "### Linter (preview)" >> ecosystem-result
cat ecosystem-result-check-preview >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` stable ecosystem check
- name: Run `ruff format` ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
run: |
# Make executable, since artifact download doesn't preserve this
@@ -250,28 +233,10 @@ jobs:
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format
cat ecosystem-result-format-stable > $GITHUB_STEP_SUMMARY
echo "### Formatter (stable)" >> ecosystem-result
cat ecosystem-result-format-stable >> ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` preview ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
# Set pipefail to avoid hiding errors with tee
set -eo pipefail
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
cat ecosystem-result-format-preview > $GITHUB_STEP_SUMMARY
echo "### Formatter (preview)" >> ecosystem-result
cat ecosystem-result-format-preview >> ecosystem-result
echo "" >> ecosystem-result
cat ecosystem-result-format > $GITHUB_STEP_SUMMARY
cat ecosystem-result-format >> ecosystem-result
- name: Export pull request number
run: |

View File

@@ -1,4 +1,4 @@
name: Ecosystem check comment
name: PR Check Comment
on:
workflow_run:
@@ -18,13 +18,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: dawidd6/action-download-artifact@v2
name: Download pull request number
name: Download PR Number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
- name: Parse pull request number
- name: Extract PR Number
id: pr-number
run: |
if [[ -f pr-number ]]
@@ -33,7 +33,7 @@ jobs:
fi
- uses: dawidd6/action-download-artifact@v2
name: "Download ecosystem results"
name: "Download Ecosystem Result"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
with:
@@ -44,15 +44,13 @@ jobs:
workflow_conclusion: completed
if_no_artifact_found: ignore
- name: Generate comment content
- name: Generate Comment
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt
echo '## PR Check Results' >> comment.txt
echo '## `ruff-ecosystem` results' >> comment.txt
echo "### Ecosystem" >> comment.txt
cat pr/ecosystem/ecosystem-result >> comment.txt
echo "" >> comment.txt
@@ -60,14 +58,14 @@ jobs:
cat comment.txt >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
- name: Find existing comment
- name: Find Comment
uses: peter-evans/find-comment@v2
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment ecosystem -->"
body-includes: PR Check Results
- name: Create or update comment
if: steps.find-comment.outcome == 'success'

36
.github/workflows/pre-commit.yaml vendored Normal file
View File

@@ -0,0 +1,36 @@
# Until Dependabot support is released https://github.com/dependabot/dependabot-core/issues/1524
name: Pre-commit update
on:
# every week on monday
schedule:
- cron: "0 0 * * 1"
workflow_dispatch:
permissions:
pull-requests: write
jobs:
upgrade:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Run autoupdate
run: |
pre-commit autoupdate
- name: Commit and push
run: |
git add ".pre-commit-config.yaml"
git commit -m "Upgrade pre-commit dependencies"
git push origin upgrade/pre-commit
- name: Open pull request
run: |
gh pr create --fill

View File

@@ -48,6 +48,7 @@ jobs:
args: --out dist
- name: "Test sdist"
run: |
rustup default $(cat rust-toolchain)
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
ruff --help
python -m ruff --help

View File

@@ -47,13 +47,10 @@ repos:
language: system
types: [rust]
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.3
hooks:
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
name: ruff
entry: cargo run --bin ruff -- check --no-cache --force-exclude --fix --exit-non-zero-on-fix
language: system
types_or: [python, pyi]
require_serial: true
exclude: |
@@ -62,6 +59,12 @@ repos:
crates/ruff_python_formatter/resources/.*
)$
# Black
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black
# Prettier
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.0

View File

@@ -337,15 +337,16 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
## Ecosystem CI
GitHub Actions will run your changes against a number of real-world projects from GitHub and
report on any linter or formatter differences. You can also run those checks locally via:
report on any diagnostic differences. You can also run those checks locally via:
```shell
pip install -e ./python/ruff-ecosystem
ruff-ecosystem check ruff "./target/debug/ruff"
ruff-ecosystem format ruff "./target/debug/ruff"
python scripts/check_ecosystem.py path/to/your/ruff path/to/older/ruff
```
See the [ruff-ecosystem package](https://github.com/astral-sh/ruff/tree/main/python/ruff-ecosystem) for more details.
You can also run the Ecosystem CI check in a Docker container across a larger set of projects by
downloading the [`known-github-tomls.json`](https://github.com/akx/ruff-usage-aggregate/blob/master/data/known-github-tomls.jsonl)
as `github_search.jsonl` and following the instructions in [scripts/Dockerfile.ecosystem](https://github.com/astral-sh/ruff/blob/main/scripts/Dockerfile.ecosystem).
Note that this check will take a while to run.
## Benchmarking and Profiling

25
LICENSE
View File

@@ -1269,31 +1269,6 @@ are:
SOFTWARE.
"""
- flake8-trio, licensed as follows:
"""
MIT License
Copyright (c) 2022 Zac Hatfield-Dodds
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
- Pyright, licensed as follows:
"""
MIT License

View File

@@ -314,7 +314,6 @@ quality tools, including:
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-trio](https://pypi.org/project/flake8-trio/)
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/astral-sh/ruff/issues/2102))

View File

@@ -409,9 +409,6 @@ pub struct FormatCommand {
force_exclude: bool,
#[clap(long, overrides_with("force_exclude"), hide = true)]
no_force_exclude: bool,
/// Set the line-length.
#[arg(long, help_heading = "Format configuration")]
pub line_length: Option<LineLength>,
/// Ignore all configuration files.
#[arg(long, conflicts_with = "config", help_heading = "Miscellaneous")]
pub isolated: bool,
@@ -555,7 +552,6 @@ impl FormatCommand {
stdin_filename: self.stdin_filename,
},
CliOverrides {
line_length: self.line_length,
respect_gitignore: resolve_bool_arg(
self.respect_gitignore,
self.no_respect_gitignore,

View File

@@ -1,18 +0,0 @@
import trio
async def foo():
with trio.fail_after():
...
async def foo():
with trio.fail_at():
await ...
async def foo():
with trio.move_on_after():
...
async def foo():
with trio.move_at():
await ...

View File

@@ -1,106 +0,0 @@
def func():
import numpy as np
np.add_docstring
np.add_newdoc
np.add_newdoc_ufunc
np.asfarray([1,2,3])
np.byte_bounds(np.array([1,2,3]))
np.cast
np.cfloat(12+34j)
np.clongfloat(12+34j)
np.compat
np.complex_(12+34j)
np.DataSource
np.deprecate
np.deprecate_with_doc
np.disp(10)
np.fastCopyAndTranspose
np.find_common_type
np.get_array_wrap
np.float_
np.geterrobj
np.Inf
np.Infinity
np.infty
np.issctype
np.issubclass_(np.int32, np.integer)
np.issubsctype
np.mat
np.maximum_sctype
np.NaN
np.nbytes[np.int64]
np.NINF
np.NZERO
np.longcomplex(12+34j)
np.longfloat(12+34j)
np.lookfor
np.obj2sctype(int)
np.PINF
np.PZERO
np.recfromcsv
np.recfromtxt
np.round_(12.34)
np.safe_eval
np.sctype2char
np.sctypes
np.seterrobj
np.set_numeric_ops
np.set_string_function
np.singlecomplex(12+1j)
np.string_("asdf")
np.source
np.tracemalloc_domain
np.unicode_("asf")
np.who()

View File

@@ -57,9 +57,3 @@ r'\%03o' % (ord(c),)
'(%r, %r, %r, %r)' % (hostname, address, username, '$PASSWORD')
'%r' % ({'server_school_roles': server_school_roles, 'is_school_multiserver_domain': is_school_multiserver_domain}, )
"%d" % (1 if x > 0 else 2)
# Special cases for %c allowing single character strings
# https://github.com/astral-sh/ruff/issues/8406
"%c" % ("x",)
"%c" % "x"
"%c" % "œ"

View File

@@ -23,11 +23,3 @@ MyType = typing.NamedTuple("MyType", a=int, b=tuple[str, ...])
MyType = typing.NamedTuple("MyType", [("a", int)], [("b", str)])
MyType = typing.NamedTuple("MyType", [("a", int)], b=str)
MyType = typing.NamedTuple(typename="MyType", a=int, b=str)
# Regression test for: https://github.com/astral-sh/ruff/issues/8402#issuecomment-1788787357
S3File = NamedTuple(
"S3File",
[
("dataHPK",* str),
],
)

View File

@@ -43,6 +43,3 @@ if 1 is {1}:
if "a" == "a":
pass
if 1 in {*[1]}:
pass

View File

@@ -45,11 +45,3 @@ x = f"string { # And here's a comment with an unusual parenthesis:
# And here's a comment with a greek alpha:
foo # And here's a comment with an unusual punctuation mark:
}"
# At runtime the attribute will be stored as Greek small letter mu instead of
# micro sign because of PEP 3131's NFKC normalization
class Labware:
µL = 1.5
assert getattr(Labware(), "µL") == 1.5

View File

@@ -158,9 +158,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
if checker.enabled(Rule::NumpyDeprecatedFunction) {
numpy::rules::deprecated_function(checker, expr);
}
if checker.enabled(Rule::Numpy2Deprecation) {
numpy::rules::numpy_2_0_deprecation(checker, expr);
}
if checker.enabled(Rule::CollectionsNamedTuple) {
flake8_pyi::rules::collections_named_tuple(checker, expr);
}
@@ -317,9 +314,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
if checker.enabled(Rule::NumpyDeprecatedFunction) {
numpy::rules::deprecated_function(checker, expr);
}
if checker.enabled(Rule::Numpy2Deprecation) {
numpy::rules::numpy_2_0_deprecation(checker, expr);
}
if checker.enabled(Rule::DeprecatedMockImport) {
pyupgrade::rules::deprecated_mock_attribute(checker, expr);
}

View File

@@ -12,8 +12,8 @@ use crate::rules::{
airflow, flake8_bandit, flake8_boolean_trap, flake8_bugbear, flake8_builtins, flake8_debugger,
flake8_django, flake8_errmsg, flake8_import_conventions, flake8_pie, flake8_pyi,
flake8_pytest_style, flake8_raise, flake8_return, flake8_simplify, flake8_slots,
flake8_tidy_imports, flake8_trio, flake8_type_checking, mccabe, pandas_vet, pep8_naming,
perflint, pycodestyle, pyflakes, pygrep_hooks, pylint, pyupgrade, refurb, ruff, tryceratops,
flake8_tidy_imports, flake8_type_checking, mccabe, pandas_vet, pep8_naming, perflint,
pycodestyle, pyflakes, pygrep_hooks, pylint, pyupgrade, refurb, ruff, tryceratops,
};
use crate::settings::types::PythonVersion;
@@ -1195,9 +1195,6 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
if checker.enabled(Rule::UselessWithLock) {
pylint::rules::useless_with_lock(checker, with_stmt);
}
if checker.enabled(Rule::TrioTimeoutWithoutAwait) {
flake8_trio::rules::timeout_without_await(checker, with_stmt, items);
}
}
Stmt::While(ast::StmtWhile { body, orelse, .. }) => {
if checker.enabled(Rule::FunctionUsesLoopVariable) {

View File

@@ -290,9 +290,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8Async, "101") => (RuleGroup::Stable, rules::flake8_async::rules::OpenSleepOrSubprocessInAsyncFunction),
(Flake8Async, "102") => (RuleGroup::Stable, rules::flake8_async::rules::BlockingOsCallInAsyncFunction),
// flake8-trio
(Flake8Trio, "100") => (RuleGroup::Preview, rules::flake8_trio::rules::TrioTimeoutWithoutAwait),
// flake8-builtins
(Flake8Builtins, "001") => (RuleGroup::Stable, rules::flake8_builtins::rules::BuiltinVariableShadowing),
(Flake8Builtins, "002") => (RuleGroup::Stable, rules::flake8_builtins::rules::BuiltinArgumentShadowing),
@@ -859,7 +856,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Numpy, "001") => (RuleGroup::Stable, rules::numpy::rules::NumpyDeprecatedTypeAlias),
(Numpy, "002") => (RuleGroup::Stable, rules::numpy::rules::NumpyLegacyRandom),
(Numpy, "003") => (RuleGroup::Stable, rules::numpy::rules::NumpyDeprecatedFunction),
(Numpy, "201") => (RuleGroup::Preview, rules::numpy::rules::Numpy2Deprecation),
// ruff
(Ruff, "001") => (RuleGroup::Stable, rules::ruff::rules::AmbiguousUnicodeCharacterString),

View File

@@ -64,9 +64,6 @@ pub enum Linter {
/// [flake8-async](https://pypi.org/project/flake8-async/)
#[prefix = "ASYNC"]
Flake8Async,
/// [flake8-trio](https://pypi.org/project/flake8-trio/)
#[prefix = "TRIO"]
Flake8Trio,
/// [flake8-bandit](https://pypi.org/project/flake8-bandit/)
#[prefix = "S"]
Flake8Bandit,

View File

@@ -1,26 +0,0 @@
//! Rules from [flake8-trio](https://pypi.org/project/flake8-trio/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::assert_messages;
use crate::registry::Rule;
use crate::settings::LinterSettings;
use crate::test::test_path;
#[test_case(Rule::TrioTimeoutWithoutAwait, Path::new("TRIO100.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_trio").join(path).as_path(),
&LinterSettings::for_rule(rule_code),
)?;
assert_messages!(snapshot, diagnostics);
Ok(())
}
}

View File

@@ -1,3 +0,0 @@
pub(crate) use timeout_without_await::*;
mod timeout_without_await;

View File

@@ -1,125 +0,0 @@
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::call_path::CallPath;
use ruff_python_ast::visitor::{walk_expr, walk_stmt, Visitor};
use ruff_python_ast::{Expr, ExprAwait, Stmt, StmtWith, WithItem};
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for trio functions that should contain await but don't.
///
/// ## Why is this bad?
/// Some trio context managers, such as `trio.fail_after` and
/// `trio.move_on_after`, have no effect unless they contain an `await`
/// statement. The use of such functions without an `await` statement is
/// likely a mistake.
///
/// ## Example
/// ```python
/// async def func():
/// with trio.move_on_after(2):
/// do_something()
/// ```
///
/// Use instead:
/// ```python
/// async def func():
/// with trio.move_on_after(2):
/// do_something()
/// await awaitable()
/// ```
#[violation]
pub struct TrioTimeoutWithoutAwait {
method_name: MethodName,
}
impl Violation for TrioTimeoutWithoutAwait {
#[derive_message_formats]
fn message(&self) -> String {
let Self { method_name } = self;
format!("A `with {method_name}(...):` context does not contain any `await` statements. This makes it pointless, as the timeout can only be triggered by a checkpoint.")
}
}
/// TRIO100
pub(crate) fn timeout_without_await(
checker: &mut Checker,
with_stmt: &StmtWith,
with_items: &[WithItem],
) {
let Some(method_name) = with_items.iter().find_map(|item| {
let call = item.context_expr.as_call_expr()?;
let call_path = checker.semantic().resolve_call_path(call.func.as_ref())?;
MethodName::try_from(&call_path)
}) else {
return;
};
let mut visitor = AwaitVisitor::default();
visitor.visit_body(&with_stmt.body);
if visitor.seen_await {
return;
}
checker.diagnostics.push(Diagnostic::new(
TrioTimeoutWithoutAwait { method_name },
with_stmt.range,
));
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum MethodName {
MoveOnAfter,
MoveOnAt,
FailAfter,
FailAt,
CancelScope,
}
impl MethodName {
fn try_from(call_path: &CallPath<'_>) -> Option<Self> {
match call_path.as_slice() {
["trio", "move_on_after"] => Some(Self::MoveOnAfter),
["trio", "move_on_at"] => Some(Self::MoveOnAt),
["trio", "fail_after"] => Some(Self::FailAfter),
["trio", "fail_at"] => Some(Self::FailAt),
["trio", "CancelScope"] => Some(Self::CancelScope),
_ => None,
}
}
}
impl std::fmt::Display for MethodName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MethodName::MoveOnAfter => write!(f, "trio.move_on_after"),
MethodName::MoveOnAt => write!(f, "trio.move_on_at"),
MethodName::FailAfter => write!(f, "trio.fail_after"),
MethodName::FailAt => write!(f, "trio.fail_at"),
MethodName::CancelScope => write!(f, "trio.CancelScope"),
}
}
}
#[derive(Debug, Default)]
struct AwaitVisitor {
seen_await: bool,
}
impl Visitor<'_> for AwaitVisitor {
fn visit_stmt(&mut self, stmt: &Stmt) {
match stmt {
Stmt::FunctionDef(_) | Stmt::ClassDef(_) => (),
_ => walk_stmt(self, stmt),
}
}
fn visit_expr(&mut self, expr: &Expr) {
if let Expr::Await(ExprAwait { .. }) = expr {
self.seen_await = true;
} else {
walk_expr(self, expr);
}
}
}

View File

@@ -1,26 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_trio/mod.rs
---
TRIO100.py:5:5: TRIO100 A `with trio.fail_after(...):` context does not contain any `await` statements. This makes it pointless, as the timeout can only be triggered by a checkpoint.
|
4 | async def foo():
5 | with trio.fail_after():
| _____^
6 | | ...
| |___________^ TRIO100
7 |
8 | async def foo():
|
TRIO100.py:13:5: TRIO100 A `with trio.move_on_after(...):` context does not contain any `await` statements. This makes it pointless, as the timeout can only be triggered by a checkpoint.
|
12 | async def foo():
13 | with trio.move_on_after():
| _____^
14 | | ...
| |___________^ TRIO100
15 |
16 | async def foo():
|

View File

@@ -37,7 +37,6 @@ pub mod flake8_simplify;
pub mod flake8_slots;
pub mod flake8_tidy_imports;
pub mod flake8_todos;
pub mod flake8_trio;
pub mod flake8_type_checking;
pub mod flake8_unused_arguments;
pub mod flake8_use_pathlib;

View File

@@ -16,7 +16,6 @@ mod tests {
#[test_case(Rule::NumpyDeprecatedTypeAlias, Path::new("NPY001.py"))]
#[test_case(Rule::NumpyLegacyRandom, Path::new("NPY002.py"))]
#[test_case(Rule::NumpyDeprecatedFunction, Path::new("NPY003.py"))]
#[test_case(Rule::Numpy2Deprecation, Path::new("NPY201.py"))]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.as_ref(), path.to_string_lossy());
let diagnostics = test_path(

View File

@@ -1,9 +1,7 @@
pub(crate) use deprecated_function::*;
pub(crate) use deprecated_type_alias::*;
pub(crate) use legacy_random::*;
pub(crate) use numpy_2_0_deprecation::*;
mod deprecated_function;
mod deprecated_type_alias;
mod legacy_random;
mod numpy_2_0_deprecation;

View File

@@ -1,476 +0,0 @@
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::Expr;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::importer::ImportRequest;
/// ## What it does
/// Checks for uses of NumPy functions and constants that were removed from
/// the main namespace in NumPy 2.0.
///
/// ## Why is this bad?
/// NumPy 2.0 includes an overhaul of NumPy's Python API, intended to remove
/// redundant aliases and routines, and establish unambiguous mechanisms for
/// accessing constants, dtypes, and functions.
///
/// As part of this overhaul, a variety of deprecated NumPy functions and
/// constants were removed from the main namespace.
///
/// The majority of these functions and constants can be automatically replaced
/// by other members of the NumPy API, even prior to NumPy 2.0, or by
/// equivalents from the Python standard library. This rule flags all uses of
/// removed members, along with automatic fixes for any backwards-compatible
/// replacements.
///
/// ## Examples
/// ```python
/// import numpy as np
///
/// arr1 = [np.Infinity, np.NaN, np.nan, np.PINF, np.inf]
/// arr2 = [np.float_(1.5), np.float64(5.1)]
/// np.round_(arr2)
/// ```
///
/// Use instead:
/// ```python
/// import numpy as np
///
/// arr1 = [np.inf, np.nan, np.nan, np.inf, np.inf]
/// arr2 = [np.float64(1.5), np.float64(5.1)]
/// np.round(arr2)
/// ```
#[violation]
pub struct Numpy2Deprecation {
existing: String,
migration_guide: Option<String>,
}
impl Violation for Numpy2Deprecation {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
let Numpy2Deprecation {
existing,
migration_guide,
} = self;
match migration_guide {
Some(migration_guide) => {
format!("`np.{existing}` will be removed in NumPy 2.0. {migration_guide}",)
}
None => format!("`np.{existing}` will be removed without replacement in NumPy 2.0."),
}
}
fn fix_title(&self) -> Option<String> {
let Numpy2Deprecation {
existing: _,
migration_guide,
} = self;
migration_guide.clone()
}
}
#[derive(Debug)]
struct Replacement<'a> {
existing: &'a str,
details: Details<'a>,
}
#[derive(Debug)]
enum Details<'a> {
/// The deprecated member can be replaced by another member in the NumPy API.
AutoImport { path: &'a str, name: &'a str },
/// The deprecated member can be replaced by a member of the Python standard library.
AutoPurePython { python_expr: &'a str },
/// The deprecated member can be replaced by a manual migration.
Manual { guideline: Option<&'a str> },
}
impl Details<'_> {
fn guideline(&self) -> Option<String> {
match self {
Details::AutoImport { path, name } => Some(format!("Use `{path}.{name}` instead.")),
Details::AutoPurePython { python_expr } => {
Some(format!("Use `{python_expr}` instead."))
}
Details::Manual { guideline } => guideline.map(ToString::to_string),
}
}
}
/// NPY201
pub(crate) fn numpy_2_0_deprecation(checker: &mut Checker, expr: &Expr) {
let maybe_replacement = checker
.semantic()
.resolve_call_path(expr)
.and_then(|call_path| match call_path.as_slice() {
// NumPy's main namespace np.* members removed in 2.0
["numpy", "add_docstring"] => Some(Replacement {
existing: "add_docstring",
details: Details::AutoImport {
path: "numpy.lib",
name: "add_docstring",
},
}),
["numpy", "add_newdoc"] => Some(Replacement {
existing: "add_newdoc",
details: Details::AutoImport {
path: "numpy.lib",
name: "add_newdoc",
},
}),
["numpy", "add_newdoc_ufunc"] => Some(Replacement {
existing: "add_newdoc_ufunc",
details: Details::Manual {
guideline: Some("`add_newdoc_ufunc` is an internal function."),
},
}),
["numpy", "asfarray"] => Some(Replacement {
existing: "asfarray",
details: Details::Manual {
guideline: Some("Use `np.asarray` with a `float` dtype instead."),
},
}),
["numpy", "byte_bounds"] => Some(Replacement {
existing: "byte_bounds",
details: Details::AutoImport {
path: "numpy.lib.array_utils",
name: "byte_bounds",
},
}),
["numpy", "cast"] => Some(Replacement {
existing: "cast",
details: Details::Manual {
guideline: Some("Use `np.asarray(arr, dtype=dtype)` instead."),
},
}),
["numpy", "cfloat"] => Some(Replacement {
existing: "cfloat",
details: Details::AutoImport {
path: "numpy",
name: "complex128",
},
}),
["numpy", "clongfloat"] => Some(Replacement {
existing: "clongfloat",
details: Details::AutoImport {
path: "numpy",
name: "clongdouble",
},
}),
["numpy", "compat"] => Some(Replacement {
existing: "compat",
details: Details::Manual {
guideline: Some("Python 2 is no longer supported."),
},
}),
["numpy", "complex_"] => Some(Replacement {
existing: "complex_",
details: Details::AutoImport {
path: "numpy",
name: "complex128",
},
}),
["numpy", "DataSource"] => Some(Replacement {
existing: "DataSource",
details: Details::AutoImport {
path: "numpy.lib.npyio",
name: "DataSource",
},
}),
["numpy", "deprecate"] => Some(Replacement {
existing: "deprecate",
details: Details::Manual {
guideline: Some("Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`."),
},
}),
["numpy", "deprecate_with_doc"] => Some(Replacement {
existing: "deprecate_with_doc",
details: Details::Manual {
guideline: Some("Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`."),
},
}),
["numpy", "disp"] => Some(Replacement {
existing: "disp",
details: Details::Manual {
guideline: Some("Use a dedicated print function instead."),
},
}),
["numpy", "fastCopyAndTranspose"] => Some(Replacement {
existing: "fastCopyAndTranspose",
details: Details::Manual {
guideline: Some("Use `arr.T.copy()` instead."),
},
}),
["numpy", "find_common_type"] => Some(Replacement {
existing: "find_common_type",
details: Details::Manual {
guideline: Some("Use `numpy.promote_types` or `numpy.result_type` instead. To achieve semantics for the `scalar_types` argument, use `numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`."),
},
}),
["numpy", "get_array_wrap"] => Some(Replacement {
existing: "get_array_wrap",
details: Details::Manual {
guideline: None,
},
}),
["numpy", "float_"] => Some(Replacement {
existing: "float_",
details: Details::AutoImport {
path: "numpy",
name: "float64",
},
}),
["numpy", "geterrobj"] => Some(Replacement {
existing: "geterrobj",
details: Details::Manual {
guideline: Some("Use the `np.errstate` context manager instead."),
},
}),
["numpy", "INF"] => Some(Replacement {
existing: "INF",
details: Details::AutoImport {
path: "numpy",
name: "inf",
},
}),
["numpy", "Inf"] => Some(Replacement {
existing: "Inf",
details: Details::AutoImport {
path: "numpy",
name: "inf",
},
}),
["numpy", "Infinity"] => Some(Replacement {
existing: "Infinity",
details: Details::AutoImport {
path: "numpy",
name: "inf",
},
}),
["numpy", "infty"] => Some(Replacement {
existing: "infty",
details: Details::AutoImport {
path: "numpy",
name: "inf",
},
}),
["numpy", "issctype"] => Some(Replacement {
existing: "issctype",
details: Details::Manual {
guideline: None,
},
}),
["numpy", "issubclass_"] => Some(Replacement {
existing: "issubclass_",
details: Details::AutoPurePython {
python_expr: "issubclass",
},
}),
["numpy", "issubsctype"] => Some(Replacement {
existing: "issubsctype",
details: Details::AutoImport {
path: "numpy",
name: "issubdtype",
},
}),
["numpy", "mat"] => Some(Replacement {
existing: "mat",
details: Details::AutoImport {
path: "numpy",
name: "asmatrix",
},
}),
["numpy", "maximum_sctype"] => Some(Replacement {
existing: "maximum_sctype",
details: Details::Manual {
guideline: None,
},
}),
["numpy", "NaN"] => Some(Replacement {
existing: "NaN",
details: Details::AutoImport {
path: "numpy",
name: "nan",
},
}),
["numpy", "nbytes"] => Some(Replacement {
existing: "nbytes",
details: Details::Manual {
guideline: Some("Use `np.dtype(<dtype>).itemsize` instead."),
},
}),
["numpy", "NINF"] => Some(Replacement {
existing: "NINF",
details: Details::AutoPurePython {
python_expr: "-np.inf",
},
}),
["numpy", "NZERO"] => Some(Replacement {
existing: "NZERO",
details: Details::AutoPurePython {
python_expr: "-0.0",
},
}),
["numpy", "longcomplex"] => Some(Replacement {
existing: "longcomplex",
details: Details::AutoImport {
path: "numpy",
name: "clongdouble",
},
}),
["numpy", "longfloat"] => Some(Replacement {
existing: "longfloat",
details: Details::AutoImport {
path: "numpy",
name: "longdouble",
},
}),
["numpy", "lookfor"] => Some(Replacement {
existing: "lookfor",
details: Details::Manual {
guideline: Some("Search NumPys documentation directly."),
},
}),
["numpy", "obj2sctype"] => Some(Replacement {
existing: "obj2sctype",
details: Details::Manual {
guideline: None,
},
}),
["numpy", "PINF"] => Some(Replacement {
existing: "PINF",
details: Details::AutoImport {
path: "numpy",
name: "inf",
},
}),
["numpy", "PZERO"] => Some(Replacement {
existing: "PZERO",
details: Details::AutoPurePython { python_expr: "0.0" },
}),
["numpy", "recfromcsv"] => Some(Replacement {
existing: "recfromcsv",
details: Details::Manual {
guideline: Some("Use `np.genfromtxt` with comma delimiter instead."),
},
}),
["numpy", "recfromtxt"] => Some(Replacement {
existing: "recfromtxt",
details: Details::Manual {
guideline: Some("Use `np.genfromtxt` instead."),
},
}),
["numpy", "round_"] => Some(Replacement {
existing: "round_",
details: Details::AutoImport {
path: "numpy",
name: "round",
},
}),
["numpy", "safe_eval"] => Some(Replacement {
existing: "safe_eval",
details: Details::AutoImport {
path: "ast",
name: "literal_eval",
},
}),
["numpy", "sctype2char"] => Some(Replacement {
existing: "sctype2char",
details: Details::Manual {
guideline: None,
},
}),
["numpy", "sctypes"] => Some(Replacement {
existing: "sctypes",
details: Details::Manual {
guideline: None,
},
}),
["numpy", "seterrobj"] => Some(Replacement {
existing: "seterrobj",
details: Details::Manual {
guideline: Some("Use the `np.errstate` context manager instead."),
},
}),
["numpy", "set_string_function"] => Some(Replacement {
existing: "set_string_function",
details: Details::Manual {
guideline: Some("Use `np.set_printoptions` for custom printing of NumPy objects."),
},
}),
["numpy", "singlecomplex"] => Some(Replacement {
existing: "singlecomplex",
details: Details::AutoImport {
path: "numpy",
name: "complex64",
},
}),
["numpy", "string_"] => Some(Replacement {
existing: "string_",
details: Details::AutoImport {
path: "numpy",
name: "bytes_",
},
}),
["numpy", "source"] => Some(Replacement {
existing: "source",
details: Details::AutoImport {
path: "inspect",
name: "getsource",
},
}),
["numpy", "tracemalloc_domain"] => Some(Replacement {
existing: "tracemalloc_domain",
details: Details::AutoImport {
path: "numpy.lib",
name: "tracemalloc_domain",
},
}),
["numpy", "unicode_"] => Some(Replacement {
existing: "unicode_",
details: Details::AutoImport {
path: "numpy",
name: "str_",
},
}),
["numpy", "who"] => Some(Replacement {
existing: "who",
details: Details::Manual {
guideline: Some("Use an IDE variable explorer or `locals()` instead."),
},
}),
_ => None,
});
if let Some(replacement) = maybe_replacement {
let mut diagnostic = Diagnostic::new(
Numpy2Deprecation {
existing: replacement.existing.to_string(),
migration_guide: replacement.details.guideline(),
},
expr.range(),
);
match replacement.details {
Details::AutoImport { path, name } => {
diagnostic.try_set_fix(|| {
let (import_edit, binding) = checker.importer().get_or_import_symbol(
&ImportRequest::import_from(path, name),
expr.start(),
checker.semantic(),
)?;
let replacement_edit = Edit::range_replacement(binding, expr.range());
Ok(Fix::safe_edits(import_edit, [replacement_edit]))
});
}
Details::AutoPurePython { python_expr } => diagnostic.set_fix(Fix::safe_edit(
Edit::range_replacement(python_expr.to_string(), expr.range()),
)),
Details::Manual { guideline: _ } => {}
};
checker.diagnostics.push(diagnostic);
}
}

View File

@@ -1,865 +0,0 @@
---
source: crates/ruff_linter/src/rules/numpy/mod.rs
---
NPY201.py:4:5: NPY201 [*] `np.add_docstring` will be removed in NumPy 2.0. Use `numpy.lib.add_docstring` instead.
|
2 | import numpy as np
3 |
4 | np.add_docstring
| ^^^^^^^^^^^^^^^^ NPY201
5 |
6 | np.add_newdoc
|
= help: Use `numpy.lib.add_docstring` instead.
Fix
1 |+from numpy.lib import add_docstring
1 2 | def func():
2 3 | import numpy as np
3 4 |
4 |- np.add_docstring
5 |+ add_docstring
5 6 |
6 7 | np.add_newdoc
7 8 |
NPY201.py:6:5: NPY201 [*] `np.add_newdoc` will be removed in NumPy 2.0. Use `numpy.lib.add_newdoc` instead.
|
4 | np.add_docstring
5 |
6 | np.add_newdoc
| ^^^^^^^^^^^^^ NPY201
7 |
8 | np.add_newdoc_ufunc
|
= help: Use `numpy.lib.add_newdoc` instead.
Fix
1 |+from numpy.lib import add_newdoc
1 2 | def func():
2 3 | import numpy as np
3 4 |
4 5 | np.add_docstring
5 6 |
6 |- np.add_newdoc
7 |+ add_newdoc
7 8 |
8 9 | np.add_newdoc_ufunc
9 10 |
NPY201.py:8:5: NPY201 `np.add_newdoc_ufunc` will be removed in NumPy 2.0. `add_newdoc_ufunc` is an internal function.
|
6 | np.add_newdoc
7 |
8 | np.add_newdoc_ufunc
| ^^^^^^^^^^^^^^^^^^^ NPY201
9 |
10 | np.asfarray([1,2,3])
|
= help: `add_newdoc_ufunc` is an internal function.
NPY201.py:10:5: NPY201 `np.asfarray` will be removed in NumPy 2.0. Use `np.asarray` with a `float` dtype instead.
|
8 | np.add_newdoc_ufunc
9 |
10 | np.asfarray([1,2,3])
| ^^^^^^^^^^^ NPY201
11 |
12 | np.byte_bounds(np.array([1,2,3]))
|
= help: Use `np.asarray` with a `float` dtype instead.
NPY201.py:12:5: NPY201 [*] `np.byte_bounds` will be removed in NumPy 2.0. Use `numpy.lib.array_utils.byte_bounds` instead.
|
10 | np.asfarray([1,2,3])
11 |
12 | np.byte_bounds(np.array([1,2,3]))
| ^^^^^^^^^^^^^^ NPY201
13 |
14 | np.cast
|
= help: Use `numpy.lib.array_utils.byte_bounds` instead.
Fix
1 |+from numpy.lib.array_utils import byte_bounds
1 2 | def func():
2 3 | import numpy as np
3 4 |
--------------------------------------------------------------------------------
9 10 |
10 11 | np.asfarray([1,2,3])
11 12 |
12 |- np.byte_bounds(np.array([1,2,3]))
13 |+ byte_bounds(np.array([1,2,3]))
13 14 |
14 15 | np.cast
15 16 |
NPY201.py:14:5: NPY201 `np.cast` will be removed in NumPy 2.0. Use `np.asarray(arr, dtype=dtype)` instead.
|
12 | np.byte_bounds(np.array([1,2,3]))
13 |
14 | np.cast
| ^^^^^^^ NPY201
15 |
16 | np.cfloat(12+34j)
|
= help: Use `np.asarray(arr, dtype=dtype)` instead.
NPY201.py:16:5: NPY201 [*] `np.cfloat` will be removed in NumPy 2.0. Use `numpy.complex128` instead.
|
14 | np.cast
15 |
16 | np.cfloat(12+34j)
| ^^^^^^^^^ NPY201
17 |
18 | np.clongfloat(12+34j)
|
= help: Use `numpy.complex128` instead.
Fix
13 13 |
14 14 | np.cast
15 15 |
16 |- np.cfloat(12+34j)
16 |+ np.complex128(12+34j)
17 17 |
18 18 | np.clongfloat(12+34j)
19 19 |
NPY201.py:18:5: NPY201 [*] `np.clongfloat` will be removed in NumPy 2.0. Use `numpy.clongdouble` instead.
|
16 | np.cfloat(12+34j)
17 |
18 | np.clongfloat(12+34j)
| ^^^^^^^^^^^^^ NPY201
19 |
20 | np.compat
|
= help: Use `numpy.clongdouble` instead.
Fix
15 15 |
16 16 | np.cfloat(12+34j)
17 17 |
18 |- np.clongfloat(12+34j)
18 |+ np.clongdouble(12+34j)
19 19 |
20 20 | np.compat
21 21 |
NPY201.py:20:5: NPY201 `np.compat` will be removed in NumPy 2.0. Python 2 is no longer supported.
|
18 | np.clongfloat(12+34j)
19 |
20 | np.compat
| ^^^^^^^^^ NPY201
21 |
22 | np.complex_(12+34j)
|
= help: Python 2 is no longer supported.
NPY201.py:22:5: NPY201 [*] `np.complex_` will be removed in NumPy 2.0. Use `numpy.complex128` instead.
|
20 | np.compat
21 |
22 | np.complex_(12+34j)
| ^^^^^^^^^^^ NPY201
23 |
24 | np.DataSource
|
= help: Use `numpy.complex128` instead.
Fix
19 19 |
20 20 | np.compat
21 21 |
22 |- np.complex_(12+34j)
22 |+ np.complex128(12+34j)
23 23 |
24 24 | np.DataSource
25 25 |
NPY201.py:24:5: NPY201 [*] `np.DataSource` will be removed in NumPy 2.0. Use `numpy.lib.npyio.DataSource` instead.
|
22 | np.complex_(12+34j)
23 |
24 | np.DataSource
| ^^^^^^^^^^^^^ NPY201
25 |
26 | np.deprecate
|
= help: Use `numpy.lib.npyio.DataSource` instead.
Fix
1 |+from numpy.lib.npyio import DataSource
1 2 | def func():
2 3 | import numpy as np
3 4 |
--------------------------------------------------------------------------------
21 22 |
22 23 | np.complex_(12+34j)
23 24 |
24 |- np.DataSource
25 |+ DataSource
25 26 |
26 27 | np.deprecate
27 28 |
NPY201.py:26:5: NPY201 `np.deprecate` will be removed in NumPy 2.0. Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`.
|
24 | np.DataSource
25 |
26 | np.deprecate
| ^^^^^^^^^^^^ NPY201
27 |
28 | np.deprecate_with_doc
|
= help: Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`.
NPY201.py:28:5: NPY201 `np.deprecate_with_doc` will be removed in NumPy 2.0. Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`.
|
26 | np.deprecate
27 |
28 | np.deprecate_with_doc
| ^^^^^^^^^^^^^^^^^^^^^ NPY201
29 |
30 | np.disp(10)
|
= help: Emit `DeprecationWarning` with `warnings.warn` directly, or use `typing.deprecated`.
NPY201.py:30:5: NPY201 `np.disp` will be removed in NumPy 2.0. Use a dedicated print function instead.
|
28 | np.deprecate_with_doc
29 |
30 | np.disp(10)
| ^^^^^^^ NPY201
31 |
32 | np.fastCopyAndTranspose
|
= help: Use a dedicated print function instead.
NPY201.py:32:5: NPY201 `np.fastCopyAndTranspose` will be removed in NumPy 2.0. Use `arr.T.copy()` instead.
|
30 | np.disp(10)
31 |
32 | np.fastCopyAndTranspose
| ^^^^^^^^^^^^^^^^^^^^^^^ NPY201
33 |
34 | np.find_common_type
|
= help: Use `arr.T.copy()` instead.
NPY201.py:34:5: NPY201 `np.find_common_type` will be removed in NumPy 2.0. Use `numpy.promote_types` or `numpy.result_type` instead. To achieve semantics for the `scalar_types` argument, use `numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.
|
32 | np.fastCopyAndTranspose
33 |
34 | np.find_common_type
| ^^^^^^^^^^^^^^^^^^^ NPY201
35 |
36 | np.get_array_wrap
|
= help: Use `numpy.promote_types` or `numpy.result_type` instead. To achieve semantics for the `scalar_types` argument, use `numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.
NPY201.py:36:5: NPY201 `np.get_array_wrap` will be removed without replacement in NumPy 2.0.
|
34 | np.find_common_type
35 |
36 | np.get_array_wrap
| ^^^^^^^^^^^^^^^^^ NPY201
37 |
38 | np.float_
|
NPY201.py:38:5: NPY201 [*] `np.float_` will be removed in NumPy 2.0. Use `numpy.float64` instead.
|
36 | np.get_array_wrap
37 |
38 | np.float_
| ^^^^^^^^^ NPY201
39 |
40 | np.geterrobj
|
= help: Use `numpy.float64` instead.
Fix
35 35 |
36 36 | np.get_array_wrap
37 37 |
38 |- np.float_
38 |+ np.float64
39 39 |
40 40 | np.geterrobj
41 41 |
NPY201.py:40:5: NPY201 `np.geterrobj` will be removed in NumPy 2.0. Use the `np.errstate` context manager instead.
|
38 | np.float_
39 |
40 | np.geterrobj
| ^^^^^^^^^^^^ NPY201
41 |
42 | np.Inf
|
= help: Use the `np.errstate` context manager instead.
NPY201.py:42:5: NPY201 [*] `np.Inf` will be removed in NumPy 2.0. Use `numpy.inf` instead.
|
40 | np.geterrobj
41 |
42 | np.Inf
| ^^^^^^ NPY201
43 |
44 | np.Infinity
|
= help: Use `numpy.inf` instead.
Fix
39 39 |
40 40 | np.geterrobj
41 41 |
42 |- np.Inf
42 |+ np.inf
43 43 |
44 44 | np.Infinity
45 45 |
NPY201.py:44:5: NPY201 [*] `np.Infinity` will be removed in NumPy 2.0. Use `numpy.inf` instead.
|
42 | np.Inf
43 |
44 | np.Infinity
| ^^^^^^^^^^^ NPY201
45 |
46 | np.infty
|
= help: Use `numpy.inf` instead.
Fix
41 41 |
42 42 | np.Inf
43 43 |
44 |- np.Infinity
44 |+ np.inf
45 45 |
46 46 | np.infty
47 47 |
NPY201.py:46:5: NPY201 [*] `np.infty` will be removed in NumPy 2.0. Use `numpy.inf` instead.
|
44 | np.Infinity
45 |
46 | np.infty
| ^^^^^^^^ NPY201
47 |
48 | np.issctype
|
= help: Use `numpy.inf` instead.
Fix
43 43 |
44 44 | np.Infinity
45 45 |
46 |- np.infty
46 |+ np.inf
47 47 |
48 48 | np.issctype
49 49 |
NPY201.py:48:5: NPY201 `np.issctype` will be removed without replacement in NumPy 2.0.
|
46 | np.infty
47 |
48 | np.issctype
| ^^^^^^^^^^^ NPY201
49 |
50 | np.issubclass_(np.int32, np.integer)
|
NPY201.py:50:5: NPY201 [*] `np.issubclass_` will be removed in NumPy 2.0. Use `issubclass` instead.
|
48 | np.issctype
49 |
50 | np.issubclass_(np.int32, np.integer)
| ^^^^^^^^^^^^^^ NPY201
51 |
52 | np.issubsctype
|
= help: Use `issubclass` instead.
Fix
47 47 |
48 48 | np.issctype
49 49 |
50 |- np.issubclass_(np.int32, np.integer)
50 |+ issubclass(np.int32, np.integer)
51 51 |
52 52 | np.issubsctype
53 53 |
NPY201.py:52:5: NPY201 [*] `np.issubsctype` will be removed in NumPy 2.0. Use `numpy.issubdtype` instead.
|
50 | np.issubclass_(np.int32, np.integer)
51 |
52 | np.issubsctype
| ^^^^^^^^^^^^^^ NPY201
53 |
54 | np.mat
|
= help: Use `numpy.issubdtype` instead.
Fix
49 49 |
50 50 | np.issubclass_(np.int32, np.integer)
51 51 |
52 |- np.issubsctype
52 |+ np.issubdtype
53 53 |
54 54 | np.mat
55 55 |
NPY201.py:54:5: NPY201 [*] `np.mat` will be removed in NumPy 2.0. Use `numpy.asmatrix` instead.
|
52 | np.issubsctype
53 |
54 | np.mat
| ^^^^^^ NPY201
55 |
56 | np.maximum_sctype
|
= help: Use `numpy.asmatrix` instead.
Fix
51 51 |
52 52 | np.issubsctype
53 53 |
54 |- np.mat
54 |+ np.asmatrix
55 55 |
56 56 | np.maximum_sctype
57 57 |
NPY201.py:56:5: NPY201 `np.maximum_sctype` will be removed without replacement in NumPy 2.0.
|
54 | np.mat
55 |
56 | np.maximum_sctype
| ^^^^^^^^^^^^^^^^^ NPY201
57 |
58 | np.NaN
|
NPY201.py:58:5: NPY201 [*] `np.NaN` will be removed in NumPy 2.0. Use `numpy.nan` instead.
|
56 | np.maximum_sctype
57 |
58 | np.NaN
| ^^^^^^ NPY201
59 |
60 | np.nbytes[np.int64]
|
= help: Use `numpy.nan` instead.
Fix
55 55 |
56 56 | np.maximum_sctype
57 57 |
58 |- np.NaN
58 |+ np.nan
59 59 |
60 60 | np.nbytes[np.int64]
61 61 |
NPY201.py:60:5: NPY201 `np.nbytes` will be removed in NumPy 2.0. Use `np.dtype(<dtype>).itemsize` instead.
|
58 | np.NaN
59 |
60 | np.nbytes[np.int64]
| ^^^^^^^^^ NPY201
61 |
62 | np.NINF
|
= help: Use `np.dtype(<dtype>).itemsize` instead.
NPY201.py:62:5: NPY201 [*] `np.NINF` will be removed in NumPy 2.0. Use `-np.inf` instead.
|
60 | np.nbytes[np.int64]
61 |
62 | np.NINF
| ^^^^^^^ NPY201
63 |
64 | np.NZERO
|
= help: Use `-np.inf` instead.
Fix
59 59 |
60 60 | np.nbytes[np.int64]
61 61 |
62 |- np.NINF
62 |+ -np.inf
63 63 |
64 64 | np.NZERO
65 65 |
NPY201.py:64:5: NPY201 [*] `np.NZERO` will be removed in NumPy 2.0. Use `-0.0` instead.
|
62 | np.NINF
63 |
64 | np.NZERO
| ^^^^^^^^ NPY201
65 |
66 | np.longcomplex(12+34j)
|
= help: Use `-0.0` instead.
Fix
61 61 |
62 62 | np.NINF
63 63 |
64 |- np.NZERO
64 |+ -0.0
65 65 |
66 66 | np.longcomplex(12+34j)
67 67 |
NPY201.py:66:5: NPY201 [*] `np.longcomplex` will be removed in NumPy 2.0. Use `numpy.clongdouble` instead.
|
64 | np.NZERO
65 |
66 | np.longcomplex(12+34j)
| ^^^^^^^^^^^^^^ NPY201
67 |
68 | np.longfloat(12+34j)
|
= help: Use `numpy.clongdouble` instead.
Fix
63 63 |
64 64 | np.NZERO
65 65 |
66 |- np.longcomplex(12+34j)
66 |+ np.clongdouble(12+34j)
67 67 |
68 68 | np.longfloat(12+34j)
69 69 |
NPY201.py:68:5: NPY201 [*] `np.longfloat` will be removed in NumPy 2.0. Use `numpy.longdouble` instead.
|
66 | np.longcomplex(12+34j)
67 |
68 | np.longfloat(12+34j)
| ^^^^^^^^^^^^ NPY201
69 |
70 | np.lookfor
|
= help: Use `numpy.longdouble` instead.
Fix
65 65 |
66 66 | np.longcomplex(12+34j)
67 67 |
68 |- np.longfloat(12+34j)
68 |+ np.longdouble(12+34j)
69 69 |
70 70 | np.lookfor
71 71 |
NPY201.py:70:5: NPY201 `np.lookfor` will be removed in NumPy 2.0. Search NumPys documentation directly.
|
68 | np.longfloat(12+34j)
69 |
70 | np.lookfor
| ^^^^^^^^^^ NPY201
71 |
72 | np.obj2sctype(int)
|
= help: Search NumPys documentation directly.
NPY201.py:72:5: NPY201 `np.obj2sctype` will be removed without replacement in NumPy 2.0.
|
70 | np.lookfor
71 |
72 | np.obj2sctype(int)
| ^^^^^^^^^^^^^ NPY201
73 |
74 | np.PINF
|
NPY201.py:74:5: NPY201 [*] `np.PINF` will be removed in NumPy 2.0. Use `numpy.inf` instead.
|
72 | np.obj2sctype(int)
73 |
74 | np.PINF
| ^^^^^^^ NPY201
75 |
76 | np.PZERO
|
= help: Use `numpy.inf` instead.
Fix
71 71 |
72 72 | np.obj2sctype(int)
73 73 |
74 |- np.PINF
74 |+ np.inf
75 75 |
76 76 | np.PZERO
77 77 |
NPY201.py:76:5: NPY201 [*] `np.PZERO` will be removed in NumPy 2.0. Use `0.0` instead.
|
74 | np.PINF
75 |
76 | np.PZERO
| ^^^^^^^^ NPY201
77 |
78 | np.recfromcsv
|
= help: Use `0.0` instead.
Fix
73 73 |
74 74 | np.PINF
75 75 |
76 |- np.PZERO
76 |+ 0.0
77 77 |
78 78 | np.recfromcsv
79 79 |
NPY201.py:78:5: NPY201 `np.recfromcsv` will be removed in NumPy 2.0. Use `np.genfromtxt` with comma delimiter instead.
|
76 | np.PZERO
77 |
78 | np.recfromcsv
| ^^^^^^^^^^^^^ NPY201
79 |
80 | np.recfromtxt
|
= help: Use `np.genfromtxt` with comma delimiter instead.
NPY201.py:80:5: NPY201 `np.recfromtxt` will be removed in NumPy 2.0. Use `np.genfromtxt` instead.
|
78 | np.recfromcsv
79 |
80 | np.recfromtxt
| ^^^^^^^^^^^^^ NPY201
81 |
82 | np.round_(12.34)
|
= help: Use `np.genfromtxt` instead.
NPY201.py:82:5: NPY201 [*] `np.round_` will be removed in NumPy 2.0. Use `numpy.round` instead.
|
80 | np.recfromtxt
81 |
82 | np.round_(12.34)
| ^^^^^^^^^ NPY201
83 |
84 | np.safe_eval
|
= help: Use `numpy.round` instead.
Fix
79 79 |
80 80 | np.recfromtxt
81 81 |
82 |- np.round_(12.34)
82 |+ np.round(12.34)
83 83 |
84 84 | np.safe_eval
85 85 |
NPY201.py:84:5: NPY201 [*] `np.safe_eval` will be removed in NumPy 2.0. Use `ast.literal_eval` instead.
|
82 | np.round_(12.34)
83 |
84 | np.safe_eval
| ^^^^^^^^^^^^ NPY201
85 |
86 | np.sctype2char
|
= help: Use `ast.literal_eval` instead.
Fix
1 |+from ast import literal_eval
1 2 | def func():
2 3 | import numpy as np
3 4 |
--------------------------------------------------------------------------------
81 82 |
82 83 | np.round_(12.34)
83 84 |
84 |- np.safe_eval
85 |+ literal_eval
85 86 |
86 87 | np.sctype2char
87 88 |
NPY201.py:86:5: NPY201 `np.sctype2char` will be removed without replacement in NumPy 2.0.
|
84 | np.safe_eval
85 |
86 | np.sctype2char
| ^^^^^^^^^^^^^^ NPY201
87 |
88 | np.sctypes
|
NPY201.py:88:5: NPY201 `np.sctypes` will be removed without replacement in NumPy 2.0.
|
86 | np.sctype2char
87 |
88 | np.sctypes
| ^^^^^^^^^^ NPY201
89 |
90 | np.seterrobj
|
NPY201.py:90:5: NPY201 `np.seterrobj` will be removed in NumPy 2.0. Use the `np.errstate` context manager instead.
|
88 | np.sctypes
89 |
90 | np.seterrobj
| ^^^^^^^^^^^^ NPY201
91 |
92 | np.set_numeric_ops
|
= help: Use the `np.errstate` context manager instead.
NPY201.py:94:5: NPY201 `np.set_string_function` will be removed in NumPy 2.0. Use `np.set_printoptions` for custom printing of NumPy objects.
|
92 | np.set_numeric_ops
93 |
94 | np.set_string_function
| ^^^^^^^^^^^^^^^^^^^^^^ NPY201
95 |
96 | np.singlecomplex(12+1j)
|
= help: Use `np.set_printoptions` for custom printing of NumPy objects.
NPY201.py:96:5: NPY201 [*] `np.singlecomplex` will be removed in NumPy 2.0. Use `numpy.complex64` instead.
|
94 | np.set_string_function
95 |
96 | np.singlecomplex(12+1j)
| ^^^^^^^^^^^^^^^^ NPY201
97 |
98 | np.string_("asdf")
|
= help: Use `numpy.complex64` instead.
Fix
93 93 |
94 94 | np.set_string_function
95 95 |
96 |- np.singlecomplex(12+1j)
96 |+ np.complex64(12+1j)
97 97 |
98 98 | np.string_("asdf")
99 99 |
NPY201.py:98:5: NPY201 [*] `np.string_` will be removed in NumPy 2.0. Use `numpy.bytes_` instead.
|
96 | np.singlecomplex(12+1j)
97 |
98 | np.string_("asdf")
| ^^^^^^^^^^ NPY201
99 |
100 | np.source
|
= help: Use `numpy.bytes_` instead.
Fix
95 95 |
96 96 | np.singlecomplex(12+1j)
97 97 |
98 |- np.string_("asdf")
98 |+ np.bytes_("asdf")
99 99 |
100 100 | np.source
101 101 |
NPY201.py:100:5: NPY201 [*] `np.source` will be removed in NumPy 2.0. Use `inspect.getsource` instead.
|
98 | np.string_("asdf")
99 |
100 | np.source
| ^^^^^^^^^ NPY201
101 |
102 | np.tracemalloc_domain
|
= help: Use `inspect.getsource` instead.
Fix
1 |+from inspect import getsource
1 2 | def func():
2 3 | import numpy as np
3 4 |
--------------------------------------------------------------------------------
97 98 |
98 99 | np.string_("asdf")
99 100 |
100 |- np.source
101 |+ getsource
101 102 |
102 103 | np.tracemalloc_domain
103 104 |
NPY201.py:102:5: NPY201 [*] `np.tracemalloc_domain` will be removed in NumPy 2.0. Use `numpy.lib.tracemalloc_domain` instead.
|
100 | np.source
101 |
102 | np.tracemalloc_domain
| ^^^^^^^^^^^^^^^^^^^^^ NPY201
103 |
104 | np.unicode_("asf")
|
= help: Use `numpy.lib.tracemalloc_domain` instead.
Fix
1 |+from numpy.lib import tracemalloc_domain
1 2 | def func():
2 3 | import numpy as np
3 4 |
--------------------------------------------------------------------------------
99 100 |
100 101 | np.source
101 102 |
102 |- np.tracemalloc_domain
103 |+ tracemalloc_domain
103 104 |
104 105 | np.unicode_("asf")
105 106 |
NPY201.py:104:5: NPY201 [*] `np.unicode_` will be removed in NumPy 2.0. Use `numpy.str_` instead.
|
102 | np.tracemalloc_domain
103 |
104 | np.unicode_("asf")
| ^^^^^^^^^^^ NPY201
105 |
106 | np.who()
|
= help: Use `numpy.str_` instead.
Fix
101 101 |
102 102 | np.tracemalloc_domain
103 103 |
104 |- np.unicode_("asf")
104 |+ np.str_("asf")
105 105 |
106 106 | np.who()
NPY201.py:106:5: NPY201 `np.who` will be removed in NumPy 2.0. Use an IDE variable explorer or `locals()` instead.
|
104 | np.unicode_("asf")
105 |
106 | np.who()
| ^^^^^^ NPY201
|
= help: Use an IDE variable explorer or `locals()` instead.

View File

@@ -119,23 +119,12 @@ fn collect_specs(formats: &[CFormatStrOrBytes<String>]) -> Vec<&CFormatSpec> {
/// Return `true` if the format string is equivalent to the constant type
fn equivalent(format: &CFormatSpec, value: &Expr) -> bool {
let format_type = FormatType::from(format.format_char);
let format = FormatType::from(format.format_char);
match ResolvedPythonType::from(value) {
ResolvedPythonType::Atom(atom) => {
// Special case where `%c` allows single character strings to be formatted
if format.format_char == 'c' {
if let Expr::StringLiteral(string) = value {
let mut chars = string.chars();
if chars.next().is_some() && chars.next().is_none() {
return true;
}
}
}
format_type.is_compatible_with(atom)
ResolvedPythonType::Atom(atom) => format.is_compatible_with(atom),
ResolvedPythonType::Union(atoms) => {
atoms.iter().all(|atom| format.is_compatible_with(*atom))
}
ResolvedPythonType::Union(atoms) => atoms
.iter()
.all(|atom| format_type.is_compatible_with(*atom)),
ResolvedPythonType::Unknown => true,
ResolvedPythonType::TypeError => true,
}

View File

@@ -181,9 +181,6 @@ fn create_fields_from_fields_arg(fields: &Expr) -> Option<Vec<Stmt>> {
let [field, annotation] = elts.as_slice() else {
return None;
};
if annotation.is_starred_expr() {
return None;
}
let ast::ExprStringLiteral { value: field, .. } = field.as_string_literal_expr()?;
if !is_identifier(field) {
return None;

View File

@@ -1,7 +1,8 @@
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::generate_comparison;
use ruff_python_ast::{self as ast, CmpOp, Expr, ExprStringLiteral};
use ruff_python_ast::ExprStringLiteral;
use ruff_python_ast::{CmpOp, Expr};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
@@ -94,17 +95,13 @@ pub(crate) fn single_item_membership_test(
checker.diagnostics.push(diagnostic);
}
/// Return the single item wrapped in `Some` if the expression contains a single
/// item, otherwise return `None`.
/// Return the single item wrapped in Some if the expression contains a single
/// item, otherwise return None.
fn single_item(expr: &Expr) -> Option<&Expr> {
match expr {
Expr::List(ast::ExprList { elts, .. })
| Expr::Tuple(ast::ExprTuple { elts, .. })
| Expr::Set(ast::ExprSet { elts, .. }) => match elts.as_slice() {
[Expr::Starred(_)] => None,
[item] => Some(item),
_ => None,
},
Expr::List(list) if list.elts.len() == 1 => Some(&list.elts[0]),
Expr::Tuple(tuple) if tuple.elts.len() == 1 => Some(&tuple.elts[0]),
Expr::Set(set) if set.elts.len() == 1 => Some(&set.elts[0]),
string_expr @ Expr::StringLiteral(ExprStringLiteral { value: string, .. })
if string.chars().count() == 1 =>
{

View File

@@ -163,7 +163,7 @@ pub(crate) fn ambiguous_unicode_character(
let candidate = Candidate::new(
TextSize::try_from(relative_offset).unwrap() + range.start(),
current_char,
char::from_u32(representant).unwrap(),
representant as char,
);
if let Some(diagnostic) = candidate.into_diagnostic(context, settings) {
diagnostics.push(diagnostic);
@@ -178,7 +178,7 @@ pub(crate) fn ambiguous_unicode_character(
word_candidates.push(Candidate::new(
TextSize::try_from(relative_offset).unwrap() + range.start(),
current_char,
char::from_u32(representant).unwrap(),
representant as char,
));
} else {
// The current word contains at least one unambiguous unicode character.

View File

@@ -2,7 +2,7 @@
/// Via: <https://github.com/hediet/vscode-unicode-data/blob/main/out/ambiguous.json>
/// See: <https://github.com/microsoft/vscode/blob/095ddabc52b82498ee7f718a34f9dd11d59099a8/src/vs/base/common/strings.ts#L1094>
pub(crate) fn confusable(c: u32) -> Option<u32> {
pub(crate) fn confusable(c: u32) -> Option<u8> {
let result = match c {
160u32 => 32,
180u32 => 96,
@@ -1586,9 +1586,6 @@ pub(crate) fn confusable(c: u32) -> Option<u32> {
130_039_u32 => 55,
130_040_u32 => 56,
130_041_u32 => 57,
0x212B => 0x00C5,
0x2126 => 0x03A9,
0x00B5 => 0x03BC,
_ => return None,
};
Some(result)

View File

@@ -155,10 +155,4 @@ confusables.py:46:62: RUF003 Comment contains ambiguous `` (PHILIPPINE SINGLE
47 | }"
|
confusables.py:55:28: RUF001 String contains ambiguous `µ` (MICRO SIGN). Did you mean `μ` (GREEK SMALL LETTER MU)?
|
55 | assert getattr(Labware(), "µL") == 1.5
| ^ RUF001
|

View File

@@ -4,7 +4,8 @@ use ruff_macros::CacheKey;
use crate::registry::{Rule, RuleSet, RuleSetIterator};
/// A table to keep track of which rules are enabled and whether they should be fixed.
/// A table to keep track of which rules are enabled
/// and Whether they should be fixed.
#[derive(Debug, CacheKey, Default)]
pub struct RuleTable {
/// Maps rule codes to a boolean indicating if the rule should be fixed.

View File

@@ -1,8 +0,0 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": ["pip install requests"]
}

View File

@@ -1,8 +0,0 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": ["x = 1\n", "pip install requests"]
}

View File

@@ -1,8 +0,0 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": ["pip install requests\n", "x = 1"]
}

View File

@@ -1,8 +0,0 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": ["pip install requests\n", "pip install requests"]
}

View File

@@ -80,126 +80,14 @@ impl Cell {
// Ignore cells containing cell magic as they act on the entire cell
// as compared to line magic which acts on a single line.
!match source {
SourceValue::String(string) => Self::is_magic_cell(string.lines()),
SourceValue::StringArray(string_array) => {
Self::is_magic_cell(string_array.iter().map(String::as_str))
}
SourceValue::String(string) => string
.lines()
.any(|line| line.trim_start().starts_with("%%")),
SourceValue::StringArray(string_array) => string_array
.iter()
.any(|line| line.trim_start().starts_with("%%")),
}
}
/// Returns `true` if a cell should be ignored due to the use of cell magics.
fn is_magic_cell<'a>(lines: impl Iterator<Item = &'a str>) -> bool {
let mut lines = lines.peekable();
// Detect automatic line magics (automagic), which aren't supported by the parser. If a line
// magic uses automagic, Jupyter doesn't allow following it with non-magic lines anyway, so
// we aren't missing out on any valid Python code.
//
// For example, this is valid:
// ```jupyter
// cat /path/to/file
// cat /path/to/file
// ```
//
// But this is invalid:
// ```jupyter
// cat /path/to/file
// x = 1
// ```
//
// See: https://ipython.readthedocs.io/en/stable/interactive/magics.html
if lines
.peek()
.and_then(|line| line.split_whitespace().next())
.is_some_and(|token| {
matches!(
token,
"alias"
| "alias_magic"
| "autoawait"
| "autocall"
| "automagic"
| "bookmark"
| "cd"
| "code_wrap"
| "colors"
| "conda"
| "config"
| "debug"
| "dhist"
| "dirs"
| "doctest_mode"
| "edit"
| "env"
| "gui"
| "history"
| "killbgscripts"
| "load"
| "load_ext"
| "loadpy"
| "logoff"
| "logon"
| "logstart"
| "logstate"
| "logstop"
| "lsmagic"
| "macro"
| "magic"
| "mamba"
| "matplotlib"
| "micromamba"
| "notebook"
| "page"
| "pastebin"
| "pdb"
| "pdef"
| "pdoc"
| "pfile"
| "pinfo"
| "pinfo2"
| "pip"
| "popd"
| "pprint"
| "precision"
| "prun"
| "psearch"
| "psource"
| "pushd"
| "pwd"
| "pycat"
| "pylab"
| "quickref"
| "recall"
| "rehashx"
| "reload_ext"
| "rerun"
| "reset"
| "reset_selective"
| "run"
| "save"
| "sc"
| "set_env"
| "sx"
| "system"
| "tb"
| "time"
| "timeit"
| "unalias"
| "unload_ext"
| "who"
| "who_ls"
| "whos"
| "xdel"
| "xmode"
)
})
{
return true;
}
// Detect cell magics (which operate on multiple lines).
lines.any(|line| line.trim_start().starts_with("%%"))
}
}
/// An error that can occur while deserializing a Jupyter Notebook.
@@ -593,10 +481,6 @@ mod tests {
#[test_case(Path::new("code_and_magic.json"), true; "code_and_magic")]
#[test_case(Path::new("only_code.json"), true; "only_code")]
#[test_case(Path::new("cell_magic.json"), false; "cell_magic")]
#[test_case(Path::new("automagic.json"), false; "automagic")]
#[test_case(Path::new("automagics.json"), false; "automagics")]
#[test_case(Path::new("automagic_before_code.json"), false; "automagic_before_code")]
#[test_case(Path::new("automagic_after_code.json"), true; "automagic_after_code")]
fn test_is_valid_code_cell(path: &Path, expected: bool) -> Result<()> {
/// Read a Jupyter cell from the `resources/test/fixtures/jupyter/cell` directory.
fn read_jupyter_cell(path: impl AsRef<Path>) -> Result<Cell> {

View File

@@ -649,8 +649,8 @@ pub enum ComparableLiteral<'a> {
None,
Ellipsis,
Bool(&'a bool),
Str(&'a str),
Bytes(&'a [u8]),
Str { value: &'a str, unicode: &'a bool },
Bytes { value: &'a [u8] },
Number(ComparableNumber<'a>),
}
@@ -662,11 +662,13 @@ impl<'a> From<ast::LiteralExpressionRef<'a>> for ComparableLiteral<'a> {
ast::LiteralExpressionRef::BooleanLiteral(ast::ExprBooleanLiteral {
value, ..
}) => Self::Bool(value),
ast::LiteralExpressionRef::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
Self::Str(value)
}
ast::LiteralExpressionRef::StringLiteral(ast::ExprStringLiteral {
value,
unicode,
..
}) => Self::Str { value, unicode },
ast::LiteralExpressionRef::BytesLiteral(ast::ExprBytesLiteral { value, .. }) => {
Self::Bytes(value)
Self::Bytes { value }
}
ast::LiteralExpressionRef::NumberLiteral(ast::ExprNumberLiteral { value, .. }) => {
Self::Number(value.into())
@@ -678,6 +680,7 @@ impl<'a> From<ast::LiteralExpressionRef<'a>> for ComparableLiteral<'a> {
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct ExprStringLiteral<'a> {
value: &'a str,
unicode: &'a bool,
}
#[derive(Debug, PartialEq, Eq, Hash)]
@@ -945,9 +948,9 @@ impl<'a> From<&'a ast::Expr> for ComparableExpr<'a> {
// Compare strings based on resolved value, not representation (i.e., ignore whether
// the string was implicitly concatenated).
implicit_concatenated: _,
unicode: _,
unicode,
range: _,
}) => Self::StringLiteral(ExprStringLiteral { value }),
}) => Self::StringLiteral(ExprStringLiteral { value, unicode }),
ast::Expr::BytesLiteral(ast::ExprBytesLiteral {
value,
// Compare bytes based on resolved value, not representation (i.e., ignore whether

View File

@@ -2313,14 +2313,6 @@ impl Parameters {
&& self.vararg.is_none()
&& self.kwarg.is_none()
}
pub fn len(&self) -> usize {
self.posonlyargs.len()
+ self.args.len()
+ usize::from(self.vararg.is_some())
+ self.kwonlyargs.len()
+ usize::from(self.kwarg.is_some())
}
}
/// An alternative type of AST `arg`. This is used for each function argument that might have a default value.
@@ -2524,10 +2516,33 @@ impl Parameters {
}
}
#[allow(clippy::borrowed_box)] // local utility
fn clone_boxed_expr(expr: &Box<Expr>) -> Box<Expr> {
let expr: &Expr = expr.as_ref();
Box::new(expr.clone())
}
impl ParameterWithDefault {
pub fn as_parameter(&self) -> &Parameter {
&self.parameter
}
pub fn to_parameter(&self) -> (Parameter, Option<Box<Expr>>) {
let ParameterWithDefault {
range: _,
parameter,
default,
} = self;
(parameter.clone(), default.as_ref().map(clone_boxed_expr))
}
pub fn into_parameter(self) -> (Parameter, Option<Box<Expr>>) {
let ParameterWithDefault {
range: _,
parameter,
default,
} = self;
(parameter, default)
}
}
impl Parameters {

View File

@@ -117,11 +117,10 @@ quote-style = "single"
```
The Ruff formatter also respects Ruff's [`line-length`](https://docs.astral.sh/ruff/settings/#line-length)
setting, which also can be provided via a `pyproject.toml` or `ruff.toml` file, or on the CLI, as
in:
setting, which also can be provided via a `pyproject.toml` or `ruff.toml` file.
```console
ruff format --line-length 100 /path/to/file.py
```toml
line-length = 80
```
### Excluding code from formatting

View File

@@ -1,8 +0,0 @@
[
{
"preview": "disabled"
},
{
"preview": "enabled"
}
]

View File

@@ -125,13 +125,6 @@ lambda a, /, c: a
*x: x
)
(
lambda
# comment
*x,
**y: x
)
(
lambda
# comment 1
@@ -142,17 +135,6 @@ lambda a, /, c: a
x
)
(
lambda
# comment 1
*
# comment 2
x,
**y:
# comment 3
x
)
(
lambda # comment 1
* # comment 2
@@ -160,14 +142,6 @@ lambda a, /, c: a
x
)
(
lambda # comment 1
* # comment 2
x,
y: # comment 3
x
)
lambda *x\
:x
@@ -222,17 +196,6 @@ lambda: ( # comment
x
)
(
lambda # 1
# 2
x, # 3
# 4
y
: # 5
# 6
x
)
(
lambda
x,
@@ -240,93 +203,3 @@ lambda: ( # comment
y:
z
)
# Leading
lambda x: (
lambda y: lambda z: x
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ z # Trailing
) # Trailing
# Leading
lambda x: lambda y: lambda z: [
x,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
z
] # Trailing
# Trailing
lambda self, araa, kkkwargs=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(*args, **kwargs), e=1, f=2, g=2: d
# Regression tests for https://github.com/astral-sh/ruff/issues/8179
def a():
return b(
c,
d,
e,
f=lambda self, *args, **kwargs: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(
*args, **kwargs
),
)
def a():
return b(
c,
d,
e,
f=lambda self, araa, kkkwargs,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
args,kwargs,
e=1, f=2, g=2: d,
g = 10
)

View File

@@ -1,206 +0,0 @@
comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top.
# 88 characters unparenthesized
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
# 89 characters parenthesized (collapse)
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
## Parenthesized
# 88 characters unparenthesized
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
)
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
## Expression and statement comments
# 88 characters unparenthesized
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbb # c
) # d
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvv # c
) # d
# 89 characters parenthesized (collapse)
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
) # d
## Strings
# 88 characters unparenthesized
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv" # c
# 88 characters
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv" # c
# 89 characters parenthesized (collapse)
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv" # c
# Always parenthesize if implicit concatenated
____aaa = (
"aaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvvvvv"
) # c
## Numbers
# 88 characters unparenthesized
____aaa = 1111111111111111111111111111111111111111111111111111111111111111111111111 # c
# 88 characters
____aaa = 1111111111111111111111111111111111111111111111111111111111111111111111111111111 # c
# 89 characters parenthesized (collapse)
____aaa = 11111111111111111111111111111111111111111111111111111111111111111111111111111111 # c
## Breaking left
# Should break `[a]` first
____[a] = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
____[
a
] = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # cc
)
(
# some weird comments
____[aaaaaaaaa]
# some weird comments 2
) = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
# Preserve trailing assignment comments when the expression has own line comments
____aaa = (
# leading
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv
# trailing
) # cc
def setUpTestData(cls):
cls.happening = (
Happening.objects.create()
) # make sure the defaults are working (#20158)
def setUpTestData(cls):
cls.happening = (
Happening.objects.create # make sure the defaults are working (#20158)
)
if True:
if True:
if True:
# Black layout
model.config.use_cache = (
False # FSTM still requires this hack -> FSTM should probably be refactored s
)
## Annotated Assign
# 88 characters unparenthesized
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
# 88 characters
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
# 89 characters parenthesized (collapse)
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
# 88 characters unparenthesized
____a : a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
)
# 88 characters
____a: a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____a: a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
_a: a[b] = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
## Augmented Assign
# 88 characters unparenthesized
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
# 89 characters parenthesized (collapse)
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
# 88 characters unparenthesized
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
)
# 88 characters
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
## Return
def test():
# 88 characters unparenthesized
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
def test2():
# 88 characters
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
def test3():
# 89 characters parenthesized (collapse)
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvv # c
## Return Parenthesized
def test4():
# 88 characters unparenthesized
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
)
def test5():
# 88 characters
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
)
def test6():
# 89 characters parenthesized (collapse)
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvv # c
)

View File

@@ -43,14 +43,6 @@ impl<'a> PyFormatContext<'a> {
pub(crate) fn comments(&self) -> &Comments<'a> {
&self.comments
}
pub(crate) const fn is_preview(&self) -> bool {
self.options.preview().is_enabled()
}
pub(crate) const fn is_stable(&self) -> bool {
!self.is_preview()
}
}
impl FormatContext for PyFormatContext<'_> {

View File

@@ -20,7 +20,7 @@ impl FormatNodeRule<ExprAwait> for FormatExprAwait {
[
token("await"),
space(),
maybe_parenthesize_expression(value, item, Parenthesize::IfRequired)
maybe_parenthesize_expression(value, item, Parenthesize::IfBreaks)
]
)
}
@@ -39,7 +39,6 @@ impl NeedsParentheses for ExprAwait {
context.comments().ranges(),
context.source(),
) {
// Prefer splitting the value if it is parenthesized.
OptionalParentheses::Never
} else {
self.value.needs_parentheses(self.into(), context)

View File

@@ -1,11 +1,10 @@
use ruff_formatter::{format_args, write};
use ruff_formatter::write;
use ruff_python_ast::AnyNodeRef;
use ruff_python_ast::ExprLambda;
use ruff_text_size::Ranged;
use crate::comments::{dangling_comments, leading_comments, SourceComment};
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses, Parenthesize};
use crate::expression::{has_own_parentheses, maybe_parenthesize_expression};
use crate::comments::{dangling_comments, SourceComment};
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses};
use crate::other::parameters::ParametersParentheses;
use crate::prelude::*;
@@ -26,49 +25,31 @@ impl FormatNodeRule<ExprLambda> for FormatExprLambda {
write!(f, [token("lambda")])?;
if let Some(parameters) = parameters {
// In this context, a dangling comment can either be a comment between the `lambda` and the
// In this context, a dangling comment can either be a comment between the `lambda` the
// parameters, or a comment between the parameters and the body.
let (dangling_before_parameters, dangling_after_parameters) = dangling
.split_at(dangling.partition_point(|comment| comment.end() < parameters.start()));
if dangling_before_parameters.is_empty() {
write!(f, [space()])?;
} else {
write!(f, [dangling_comments(dangling_before_parameters)])?;
}
group(&format_with(|f: &mut PyFormatter| {
if f.context().node_level().is_parenthesized()
&& (parameters.len() > 1 || !dangling_before_parameters.is_empty())
{
let end_of_line_start = dangling_before_parameters
.partition_point(|comment| comment.line_position().is_end_of_line());
let (same_line_comments, own_line_comments) =
dangling_before_parameters.split_at(end_of_line_start);
write!(
f,
[parameters
.format()
.with_options(ParametersParentheses::Never)]
)?;
dangling_comments(same_line_comments).fmt(f)?;
write!(f, [token(":")])?;
soft_block_indent(&format_args![
leading_comments(own_line_comments),
parameters
.format()
.with_options(ParametersParentheses::Never),
])
.fmt(f)
} else {
parameters
.format()
.with_options(ParametersParentheses::Never)
.fmt(f)
}?;
token(":").fmt(f)?;
if dangling_after_parameters.is_empty() {
space().fmt(f)
} else {
dangling_comments(dangling_after_parameters).fmt(f)
}
}))
.fmt(f)?;
if dangling_after_parameters.is_empty() {
write!(f, [space()])?;
} else {
write!(f, [dangling_comments(dangling_after_parameters)])?;
}
} else {
write!(f, [token(":")])?;
@@ -80,12 +61,7 @@ impl FormatNodeRule<ExprLambda> for FormatExprLambda {
}
}
// Avoid parenthesizing lists, dictionaries, etc.
if f.context().is_stable() || has_own_parentheses(body, f.context()).is_some() {
body.format().fmt(f)
} else {
maybe_parenthesize_expression(body, item, Parenthesize::IfBreaksOrIfRequired).fmt(f)
}
write!(f, [body.format()])
}
fn fmt_dangling_comments(

View File

@@ -69,7 +69,6 @@ impl NeedsParentheses for ExprNamedExpr {
|| parent.is_stmt_delete()
|| parent.is_stmt_for()
|| parent.is_stmt_function_def()
|| parent.is_expr_lambda()
{
OptionalParentheses::Always
} else {

View File

@@ -59,10 +59,7 @@ impl NeedsParentheses for AnyExpressionYield<'_> {
OptionalParentheses::Never
} else {
// Ex) `x = yield f(1, 2, 3)`
match value.needs_parentheses(self.into(), context) {
OptionalParentheses::BestFit => OptionalParentheses::Never,
parentheses => parentheses,
}
value.needs_parentheses(self.into(), context)
}
} else {
// Ex) `x = yield`

View File

@@ -12,9 +12,7 @@ use ruff_python_trivia::CommentRanges;
use ruff_text_size::Ranged;
use crate::builders::parenthesize_if_expands;
use crate::comments::{
leading_comments, trailing_comments, LeadingDanglingTrailingComments, SourceComment,
};
use crate::comments::{leading_comments, trailing_comments, LeadingDanglingTrailingComments};
use crate::context::{NodeLevel, WithNodeLevel};
use crate::expression::expr_generator_exp::is_generator_parenthesized;
use crate::expression::expr_tuple::is_tuple_parenthesized;
@@ -376,8 +374,10 @@ impl Format<PyFormatContext<'_>> for MaybeParenthesizeExpression<'_> {
return expression.format().with_options(Parentheses::Always).fmt(f);
}
let comments = f.context().comments().clone();
let node_comments = comments.leading_dangling_trailing(*expression);
let node_comments = f
.context()
.comments()
.leading_dangling_trailing(*expression);
// If the expression has comments, we always want to preserve the parentheses. This also
// ensures that we correctly handle parenthesized comments, and don't need to worry about
@@ -426,106 +426,15 @@ impl Format<PyFormatContext<'_>> for MaybeParenthesizeExpression<'_> {
expression.format().with_options(Parentheses::Never).fmt(f)
}
Parenthesize::IfBreaks => {
// Is the expression the last token in the parent statement.
// Excludes `await` and `yield` for which Black doesn't seem to apply the layout?
let last_expression = parent.is_stmt_assign()
|| parent.is_stmt_ann_assign()
|| parent.is_stmt_aug_assign()
|| parent.is_stmt_return();
// Format the statements and value's trailing end of line comments:
// * after the expression if the expression needs no parentheses (necessary or the `expand_parent` makes the group never fit).
// * inside the parentheses if the expression exceeds the line-width.
//
// ```python
// a = long # with_comment
// b = (
// short # with_comment
// )
//
// # formatted
// a = (
// long # with comment
// )
// b = short # with comment
// ```
// This matches Black's formatting with the exception that ruff applies this style also for
// attribute chains and non-fluent call expressions. See https://github.com/psf/black/issues/4001#issuecomment-1786681792
//
// This logic isn't implemented in [`place_comment`] by associating trailing statement comments to the expression because
// doing so breaks the suite empty lines formatting that relies on trailing comments to be stored on the statement.
let (inline_comments, expression_trailing_comments) = if last_expression
&& !(
// Ignore non-fluent attribute chains for black compatibility.
// See https://github.com/psf/black/issues/4001#issuecomment-1786681792
expression.is_attribute_expr()
|| expression.is_call_expr()
|| expression.is_yield_from_expr()
|| expression.is_yield_expr()
|| expression.is_await_expr()
) {
let parent_trailing_comments = comments.trailing(*parent);
let after_end_of_line = parent_trailing_comments
.partition_point(|comment| comment.line_position().is_end_of_line());
let (stmt_inline_comments, _) =
parent_trailing_comments.split_at(after_end_of_line);
let after_end_of_line = node_comments
.trailing
.partition_point(|comment| comment.line_position().is_end_of_line());
let (expression_inline_comments, expression_trailing_comments) =
node_comments.trailing.split_at(after_end_of_line);
(
OptionalParenthesesInlinedComments {
expression: expression_inline_comments,
statement: stmt_inline_comments,
},
expression_trailing_comments,
)
if node_comments.has_trailing() {
expression.format().with_options(Parentheses::Always).fmt(f)
} else {
(
OptionalParenthesesInlinedComments::default(),
node_comments.trailing,
)
};
if expression_trailing_comments.is_empty() {
// The group id is necessary because the nested expressions may reference it.
let group_id = f.group_id("optional_parentheses");
let f = &mut WithNodeLevel::new(NodeLevel::Expression(Some(group_id)), f);
best_fit_parenthesize(&format_with(|f| {
inline_comments.mark_formatted();
expression
.format()
.with_options(Parentheses::Never)
.fmt(f)?;
if !inline_comments.is_empty() {
// If the expressions exceeds the line width, format the comments in the parentheses
if_group_breaks(&inline_comments)
.with_group_id(Some(group_id))
.fmt(f)?;
}
Ok(())
}))
.with_group_id(Some(group_id))
.fmt(f)?;
if !inline_comments.is_empty() {
// If the line fits into the line width, format the comments after the parenthesized expression
if_group_fits_on_line(&inline_comments)
.with_group_id(Some(group_id))
.fmt(f)?;
}
Ok(())
} else {
expression.format().with_options(Parentheses::Always).fmt(f)
best_fit_parenthesize(&expression.format().with_options(Parentheses::Never))
.with_group_id(Some(group_id))
.fmt(f)
}
}
},
@@ -1160,41 +1069,3 @@ impl From<ast::Operator> for OperatorPrecedence {
}
}
}
#[derive(Debug, Default)]
struct OptionalParenthesesInlinedComments<'a> {
expression: &'a [SourceComment],
statement: &'a [SourceComment],
}
impl<'a> OptionalParenthesesInlinedComments<'a> {
fn is_empty(&self) -> bool {
self.expression.is_empty() && self.statement.is_empty()
}
fn iter_comments(&self) -> impl Iterator<Item = &'a SourceComment> {
self.expression.iter().chain(self.statement)
}
fn mark_formatted(&self) {
for comment in self.iter_comments() {
comment.mark_formatted();
}
}
}
impl Format<PyFormatContext<'_>> for OptionalParenthesesInlinedComments<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
for comment in self.iter_comments() {
comment.mark_unformatted();
}
write!(
f,
[
trailing_comments(self.expression),
trailing_comments(self.statement)
]
)
}
}

View File

@@ -108,7 +108,7 @@ impl PyFormatOptions {
self.line_ending
}
pub const fn preview(&self) -> PreviewMode {
pub fn preview(&self) -> PreviewMode {
self.preview
}

View File

@@ -62,7 +62,7 @@ impl FormatNodeRule<ParameterWithDefault> for FormatParameterWithDefault {
token("="),
(!needs_line_break).then_some(space),
needs_line_break.then_some(hard_line_break()),
default.format()
group(&default.format())
]
)?;
}

View File

@@ -102,15 +102,7 @@ impl FormatNodeRule<Parameters> for FormatParameters {
dangling.split_at(parenthesis_comments_end);
let format_inner = format_with(|f: &mut PyFormatter| {
let separator = format_with(|f: &mut PyFormatter| {
token(",").fmt(f)?;
if f.context().node_level().is_parenthesized() {
soft_line_break_or_space().fmt(f)
} else {
space().fmt(f)
}
});
let separator = format_with(|f| write!(f, [token(","), soft_line_break_or_space()]));
let mut joiner = f.join_with(separator);
let mut last_node: Option<AnyNodeRef> = None;
@@ -240,19 +232,23 @@ impl FormatNodeRule<Parameters> for FormatParameters {
Ok(())
});
let num_parameters = item.len();
let mut f = WithNodeLevel::new(NodeLevel::ParenthesizedExpression, f);
let num_parameters = posonlyargs.len()
+ args.len()
+ usize::from(vararg.is_some())
+ kwonlyargs.len()
+ usize::from(kwarg.is_some());
if self.parentheses == ParametersParentheses::Never {
write!(f, [format_inner, dangling_comments(dangling)])
write!(f, [group(&format_inner), dangling_comments(dangling)])
} else if num_parameters == 0 {
let mut f = WithNodeLevel::new(NodeLevel::ParenthesizedExpression, f);
// No parameters, format any dangling comments between `()`
write!(f, [empty_parenthesized("(", dangling, ")")])
} else {
// Intentionally avoid `parenthesized`, which groups the entire formatted contents.
// We want parameters to be grouped alongside return types, one level up, so we
// format them "inline" here.
let mut f = WithNodeLevel::new(NodeLevel::ParenthesizedExpression, f);
write!(
f,
[

View File

@@ -93,7 +93,7 @@ async def main():
```diff
--- Black
+++ Ruff
@@ -21,7 +21,9 @@
@@ -21,11 +21,15 @@
# Check comments
async def main():
@@ -103,6 +103,13 @@ async def main():
+ )
async def main():
- await asyncio.sleep(1) # Hello
+ await (
+ asyncio.sleep(1) # Hello
+ )
async def main():
```
@@ -138,7 +145,9 @@ async def main():
async def main():
await asyncio.sleep(1) # Hello
await (
asyncio.sleep(1) # Hello
)
async def main():

View File

@@ -131,13 +131,6 @@ lambda a, /, c: a
*x: x
)
(
lambda
# comment
*x,
**y: x
)
(
lambda
# comment 1
@@ -148,17 +141,6 @@ lambda a, /, c: a
x
)
(
lambda
# comment 1
*
# comment 2
x,
**y:
# comment 3
x
)
(
lambda # comment 1
* # comment 2
@@ -166,14 +148,6 @@ lambda a, /, c: a
x
)
(
lambda # comment 1
* # comment 2
x,
y: # comment 3
x
)
lambda *x\
:x
@@ -228,17 +202,6 @@ lambda: ( # comment
x
)
(
lambda # 1
# 2
x, # 3
# 4
y
: # 5
# 6
x
)
(
lambda
x,
@@ -246,109 +209,9 @@ lambda: ( # comment
y:
z
)
# Leading
lambda x: (
lambda y: lambda z: x
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ z # Trailing
) # Trailing
# Leading
lambda x: lambda y: lambda z: [
x,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
z
] # Trailing
# Trailing
lambda self, araa, kkkwargs=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(*args, **kwargs), e=1, f=2, g=2: d
# Regression tests for https://github.com/astral-sh/ruff/issues/8179
def a():
return b(
c,
d,
e,
f=lambda self, *args, **kwargs: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(
*args, **kwargs
),
)
def a():
return b(
c,
d,
e,
f=lambda self, araa, kkkwargs,aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
args,kwargs,
e=1, f=2, g=2: d,
g = 10
)
```
## Outputs
### Output 1
```
indent-style = space
line-width = 88
indent-width = 4
quote-style = Double
magic-trailing-comma = Respect
preview = Disabled
```
## Output
```py
# Leading
lambda x: x # Trailing
@@ -412,10 +275,8 @@ a = (
)
a = (
lambda
x, # Dangling
y
: 1
lambda x, # Dangling
y: 1
)
# Regression test: lambda empty arguments ranges were too long, leading to unstable
@@ -476,54 +337,23 @@ lambda a, /, c: a
(
lambda
# comment
*x
: x
# comment
*x: x
)
(
lambda
# comment
*x,
**y
: x
)
(
lambda
# comment 1
# comment 2
*x
:
# comment 3
x
)
(
lambda
# comment 1
# comment 2
*x,
**y
:
# comment 1
# comment 2
*x:
# comment 3
x
)
(
lambda # comment 1
# comment 2
*x
: # comment 3
x
)
(
lambda # comment 1
# comment 2
*x,
y
: # comment 3
# comment 2
*x: # comment 3
x
)
@@ -531,9 +361,8 @@ lambda *x: x
(
lambda
# comment
*x
: x
# comment
*x: x
)
lambda: ( # comment
@@ -571,356 +400,8 @@ lambda: ( # comment
(
lambda # 1
# 2
x
: # 3
# 4
# 5
# 6
x
)
(
lambda # 1
# 2
x, # 3
# 4
y
: # 5
# 6
x
)
(
lambda
x,
# comment
y
: z
)
# Leading
lambda x: (
lambda y: lambda z: x
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ z # Trailing
) # Trailing
# Leading
lambda x: lambda y: lambda z: [
x,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
z,
] # Trailing
# Trailing
lambda self, araa, kkkwargs=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(
*args, **kwargs
), e=1, f=2, g=2: d
# Regression tests for https://github.com/astral-sh/ruff/issues/8179
def a():
return b(
c,
d,
e,
f=lambda
self,
*args,
**kwargs
: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(*args, **kwargs),
)
def a():
return b(
c,
d,
e,
f=lambda
self,
araa,
kkkwargs,
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
args,
kwargs,
e=1,
f=2,
g=2
: d,
g=10,
)
```
### Output 2
```
indent-style = space
line-width = 88
indent-width = 4
quote-style = Double
magic-trailing-comma = Respect
preview = Enabled
```
```py
# Leading
lambda x: x # Trailing
# Trailing
# Leading
lambda x, y: x # Trailing
# Trailing
# Leading
lambda x, y: x, y # Trailing
# Trailing
# Leading
lambda x, /, y: x # Trailing
# Trailing
# Leading
lambda x: lambda y: lambda z: x # Trailing
# Trailing
# Leading
lambda x: lambda y: lambda z: (x, y, z) # Trailing
# Trailing
# Leading
lambda x: lambda y: lambda z: (x, y, z) # Trailing
# Trailing
# Leading
lambda x: (
lambda y: (
lambda z: (x, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, y, z)
)
) # Trailing
# Trailing
a = (
lambda: # Dangling
1
)
a = (
lambda
x, # Dangling
y
: 1
)
# Regression test: lambda empty arguments ranges were too long, leading to unstable
# formatting
(
lambda: ( #
),
)
# lambda arguments don't have parentheses, so we never add a magic trailing comma ...
def f(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = lambda x: (
y
),
):
pass
# ...but we do preserve a trailing comma after the arguments
a = lambda b,: 0
lambda a,: 0
lambda *args,: 0
lambda **kwds,: 0
lambda a, *args,: 0
lambda a, **kwds,: 0
lambda *args, b,: 0
lambda *, b,: 0
lambda *args, **kwds,: 0
lambda a, *args, b,: 0
lambda a, *, b,: 0
lambda a, *args, **kwds,: 0
lambda *args, b, **kwds,: 0
lambda *, b, **kwds,: 0
lambda a, *args, b, **kwds,: 0
lambda a, *, b, **kwds,: 0
lambda a, /: a
lambda a, /, c: a
# Dangling comments without parameters.
(
lambda: # 3
None
)
(
lambda:
# 3
None
)
(
lambda: # 1
# 2
# 3
# 4
None # 5
)
(
lambda
# comment
*x
: x
)
(
lambda
# comment
*x,
**y
: x
)
(
lambda
# comment 1
# comment 2
*x
:
# comment 3
x
)
(
lambda
# comment 1
# comment 2
*x,
**y
:
# comment 3
x
)
(
lambda # comment 1
# comment 2
*x
: # comment 3
x
)
(
lambda # comment 1
# comment 2
*x,
y
: # comment 3
x
)
lambda *x: x
(
lambda
# comment
*x
: x
)
lambda: ( # comment
x
)
(
lambda: # comment
x
)
(
lambda:
# comment
x
)
(
lambda: # comment
x
)
(
lambda:
# comment
x
)
(
lambda: # comment
( # comment
x
)
)
(
lambda # 1
# 2
x
: # 3
x: # 3
# 4
# 5
# 6
@@ -928,134 +409,10 @@ lambda: ( # comment
)
(
lambda # 1
# 2
x, # 3
# 4
y
: # 5
# 6
x
lambda x,
# comment
y: z
)
(
lambda
x,
# comment
y
: z
)
# Leading
lambda x: (
lambda y: (
lambda z: (
x
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ y
+ z
)
) # Trailing
) # Trailing
# Leading
lambda x: (
lambda y: (
lambda z: [
x,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
y,
z,
]
)
) # Trailing
# Trailing
lambda self, araa, kkkwargs=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(
*args, **kwargs
), e=1, f=2, g=2: d
# Regression tests for https://github.com/astral-sh/ruff/issues/8179
def a():
return b(
c,
d,
e,
f=lambda
self,
*args,
**kwargs
: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(*args, **kwargs),
)
def a():
return b(
c,
d,
e,
f=lambda
self,
araa,
kkkwargs,
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
args,
kwargs,
e=1,
f=2,
g=2
: d,
g=10,
)
```

View File

@@ -1,424 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/expression/optional_parentheses_comments.py
---
## Input
```py
comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top.
# 88 characters unparenthesized
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
# 89 characters parenthesized (collapse)
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
## Parenthesized
# 88 characters unparenthesized
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
)
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
## Expression and statement comments
# 88 characters unparenthesized
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbb # c
) # d
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvv # c
) # d
# 89 characters parenthesized (collapse)
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
) # d
## Strings
# 88 characters unparenthesized
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv" # c
# 88 characters
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv" # c
# 89 characters parenthesized (collapse)
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv" # c
# Always parenthesize if implicit concatenated
____aaa = (
"aaaaaaaaaaaaaaaaaaaaa" "aaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvvvvv"
) # c
## Numbers
# 88 characters unparenthesized
____aaa = 1111111111111111111111111111111111111111111111111111111111111111111111111 # c
# 88 characters
____aaa = 1111111111111111111111111111111111111111111111111111111111111111111111111111111 # c
# 89 characters parenthesized (collapse)
____aaa = 11111111111111111111111111111111111111111111111111111111111111111111111111111111 # c
## Breaking left
# Should break `[a]` first
____[a] = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
____[
a
] = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # cc
)
(
# some weird comments
____[aaaaaaaaa]
# some weird comments 2
) = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
# Preserve trailing assignment comments when the expression has own line comments
____aaa = (
# leading
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv
# trailing
) # cc
def setUpTestData(cls):
cls.happening = (
Happening.objects.create()
) # make sure the defaults are working (#20158)
def setUpTestData(cls):
cls.happening = (
Happening.objects.create # make sure the defaults are working (#20158)
)
if True:
if True:
if True:
# Black layout
model.config.use_cache = (
False # FSTM still requires this hack -> FSTM should probably be refactored s
)
## Annotated Assign
# 88 characters unparenthesized
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
# 88 characters
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
# 89 characters parenthesized (collapse)
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
# 88 characters unparenthesized
____a : a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
)
# 88 characters
____a: a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____a: a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
_a: a[b] = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
## Augmented Assign
# 88 characters unparenthesized
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
# 89 characters parenthesized (collapse)
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
# 88 characters unparenthesized
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
)
# 88 characters
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
)
## Return
def test():
# 88 characters unparenthesized
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
def test2():
# 88 characters
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
def test3():
# 89 characters parenthesized (collapse)
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvv # c
## Return Parenthesized
def test4():
# 88 characters unparenthesized
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
)
def test5():
# 88 characters
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
)
def test6():
# 89 characters parenthesized (collapse)
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvv # c
)
```
## Output
```py
comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top.
# 88 characters unparenthesized
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
## Parenthesized
# 88 characters unparenthesized
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
## Expression and statement comments
# 88 characters unparenthesized
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbb # c # d
# 88 characters
____aaa = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvv # c # d
)
# 89 characters parenthesized (collapse)
____aaa = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c # d
## Strings
# 88 characters unparenthesized
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv" # c
# 88 characters
____aaa = (
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv" # c
)
# 89 characters parenthesized (collapse)
____aaa = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv" # c
# Always parenthesize if implicit concatenated
____aaa = (
"aaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvvvvv"
) # c
## Numbers
# 88 characters unparenthesized
____aaa = 1111111111111111111111111111111111111111111111111111111111111111111111111 # c
# 88 characters
____aaa = (
1111111111111111111111111111111111111111111111111111111111111111111111111111111 # c
)
# 89 characters parenthesized (collapse)
____aaa = 11111111111111111111111111111111111111111111111111111111111111111111111111111111 # c
## Breaking left
# Should break `[a]` first
____[
a
] = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
____[
a
] = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # cc
(
# some weird comments
____[aaaaaaaaa]
# some weird comments 2
) = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
# Preserve trailing assignment comments when the expression has own line comments
____aaa = (
# leading
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv
# trailing
) # cc
def setUpTestData(cls):
cls.happening = (
Happening.objects.create()
) # make sure the defaults are working (#20158)
def setUpTestData(cls):
cls.happening = (
Happening.objects.create # make sure the defaults are working (#20158)
)
if True:
if True:
if True:
# Black layout
model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored s
## Annotated Assign
# 88 characters unparenthesized
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
# 88 characters
____a: a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
# 88 characters unparenthesized
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
# 88 characters
____a: a = (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____a: a = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
_a: a[
b
] = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
## Augmented Assign
# 88 characters unparenthesized
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
# 88 characters unparenthesized
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvv # c
# 88 characters
____aa += (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvv # c
)
# 89 characters parenthesized (collapse)
____aa += aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvvvvvv # c
## Return
def test():
# 88 characters unparenthesized
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
def test2():
# 88 characters
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
)
def test3():
# 89 characters parenthesized (collapse)
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvv # c
## Return Parenthesized
def test4():
# 88 characters unparenthesized
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvv # c
def test5():
# 88 characters
return (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvv # c
)
def test6():
# 89 characters parenthesized (collapse)
return aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbvvvvvvvv # c
```

View File

@@ -164,7 +164,9 @@ for converter in connection.ops.get_db_converters(
pass
aaa = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb # awkward comment
aaa = (
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb # awkward comment
)
def test():
@@ -200,9 +202,13 @@ if True:
if True:
if True:
# Black layout
model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored s
model.config.use_cache = (
False # FSTM still requires this hack -> FSTM should probably be refactored s
)
# Ruff layout
model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored s
model.config.use_cache = (
False
) # FSTM still requires this hack -> FSTM should probably be refactored s
# Regression test for https://github.com/astral-sh/ruff/issues/7463

View File

@@ -146,7 +146,9 @@ list_with_parenthesized_elements5 = [
(2), # trailing outer
]
nested_parentheses1 = 1 # i # j # k
nested_parentheses1 = (
1 # i # j
) # k
nested_parentheses2 = [
(
1 # i

View File

@@ -402,9 +402,6 @@ File selection:
--exclude <FILE_PATTERN> List of paths, used to omit files and/or directories from analysis
--force-exclude Enforce exclusions, even for paths passed to Ruff directly on the command-line. Use `--no-force-exclude` to disable
Format configuration:
--line-length <LINE_LENGTH> Set the line-length
Log levels:
-v, --verbose Enable verbose logging
-q, --quiet Print diagnostics, but nothing else

View File

@@ -81,7 +81,6 @@ natively, including:
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-trio](https://pypi.org/project/flake8-trio/) ([#8451](https://github.com/astral-sh/ruff/issues/8451))
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/astral-sh/ruff/issues/2102))
@@ -186,7 +185,6 @@ Today, Ruff can be used to replace Flake8 when used with any of the following pl
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-trio](https://pypi.org/project/flake8-trio/) ([#8451](https://github.com/astral-sh/ruff/issues/8451))
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/astral-sh/ruff/issues/2102))

View File

@@ -277,8 +277,6 @@ Ruff will also respect variants of these action comments with a `# ruff:` prefix
convey that the action comment is intended for Ruff, but are functionally equivalent to the
isort variants.
Unlike isort, Ruff does not respect action comments within docstrings.
See the [isort documentation](https://pycqa.github.io/isort/docs/configuration/action_comments.html)
for more.

View File

@@ -1,4 +1,4 @@
PyYAML==6.0.1
PyYAML==6.0
black==23.10.0
mkdocs==1.5.0
git+ssh://git@github.com/astral-sh/mkdocs-material-insiders.git@38c0b8187325c3bab386b666daf3518ac036f2f4

View File

@@ -1,4 +1,4 @@
PyYAML==6.0.1
PyYAML==6.0
black==23.10.0
mkdocs==1.5.0
mkdocs-material==9.1.18

View File

@@ -107,7 +107,6 @@ export default function SourceEditor({
<Editor
beforeMount={handleMount}
options={{
fixedOverflowWidgets: true,
readOnly: false,
minimap: { enabled: false },
fontSize: 14,

View File

@@ -28,7 +28,8 @@ html,
@font-face {
font-family: "Alliance Text";
src: url("../fonts/Alliance-TextRegular.woff2") format("woff2"),
src:
url("../fonts/Alliance-TextRegular.woff2") format("woff2"),
url("../fonts/Alliance-TextRegular.woff") format("woff");
font-weight: normal;
font-style: normal;
@@ -37,7 +38,8 @@ html,
@font-face {
font-family: "Alliance Text";
src: url("../fonts/Alliance-TextMedium.woff2") format("woff2"),
src:
url("../fonts/Alliance-TextMedium.woff2") format("woff2"),
url("../fonts/Alliance-TextMedium.woff") format("woff");
font-weight: 500;
font-style: normal;
@@ -46,7 +48,8 @@ html,
@font-face {
font-family: "Alliance Platt";
src: url("../fonts/Alliance-PlattMedium.woff2") format("woff2"),
src:
url("../fonts/Alliance-PlattMedium.woff2") format("woff2"),
url("../fonts/Alliance-PlattMedium.woff") format("woff");
font-weight: 500;
font-style: normal;
@@ -55,7 +58,8 @@ html,
@font-face {
font-family: "Alliance Platt";
src: url("../fonts/Alliance-PlattRegular.woff2") format("woff2"),
src:
url("../fonts/Alliance-PlattRegular.woff2") format("woff2"),
url("../fonts/Alliance-PlattRegular.woff") format("woff");
font-weight: normal;
font-style: normal;

View File

@@ -52,9 +52,6 @@ exclude = [
"crates/ruff_linter/resources/test/fixtures/**/*",
"crates/ruff_linter/src/rules/*/snapshots/**/*"
]
include = [
"rust-toolchain.toml"
]
[tool.ruff]
extend-exclude = [

View File

@@ -31,21 +31,6 @@ Run `ruff format` ecosystem checks comparing your debug build to your system Ruf
ruff-ecosystem format ruff "./target/debug/ruff"
```
Run `ruff format` ecosystem checks comparing with changes to code that is already formatted:
```shell
ruff-ecosystem format ruff "./target/debug/ruff" --format-comparison ruff-then-ruff
```
Run `ruff format` ecosystem checks comparing with the Black formatter:
```shell
ruff-ecosystem format black ruff -v --cache python/checkouts --format-comparison black-and-ruff
```
The default output format is markdown, which includes nice summaries of the changes. You can use `--output-format json` to display the raw data — this is
particularly useful when making changes to the ecosystem checks.
## Development
When developing, it can be useful to set the `--pdb` flag to drop into a debugger on failure:

View File

@@ -24,11 +24,12 @@ from ruff_ecosystem.types import (
Comparison,
Diff,
Result,
ToolError,
RuffError,
Serializable,
)
if TYPE_CHECKING:
from ruff_ecosystem.projects import CheckOptions, ClonedRepository, Project
from ruff_ecosystem.projects import ClonedRepository, Project
# Matches lines that are summaries rather than diagnostics
@@ -500,8 +501,8 @@ async def ruff_check(
*, executable: Path, path: Path, name: str, options: CheckOptions
) -> Sequence[str]:
"""Run the given ruff binary against the specified path."""
ruff_args = options.to_ruff_args()
logger.debug(f"Checking {name} with {executable} " + " ".join(ruff_args))
logger.debug(f"Checking {name} with {executable}")
ruff_args = options.to_cli_args()
start = time.time()
proc = await create_subprocess_exec(
@@ -518,7 +519,7 @@ async def ruff_check(
logger.debug(f"Finished checking {name} with {executable} in {end - start:.2f}s")
if proc.returncode != 0:
raise ToolError(err.decode("utf8"))
raise RuffError(err.decode("utf8"))
# Strip summary lines so the diff is only diagnostic lines
lines = [
@@ -528,3 +529,35 @@ async def ruff_check(
]
return lines
@dataclass(frozen=True)
class CheckOptions(Serializable):
"""
Ruff check options
"""
select: str = ""
ignore: str = ""
exclude: str = ""
# Generating fixes is slow and verbose
show_fixes: bool = False
# Limit the number of reported lines per rule
max_lines_per_rule: int | None = 50
def markdown(self) -> str:
return f"select {self.select} ignore {self.ignore} exclude {self.exclude}"
def to_cli_args(self) -> list[str]:
args = ["check", "--no-cache", "--exit-zero"]
if self.select:
args.extend(["--select", self.select])
if self.ignore:
args.extend(["--ignore", self.ignore])
if self.exclude:
args.extend(["--exclude", self.exclude])
if self.show_fixes:
args.extend(["--show-fixes", "--ecosystem-ci"])
return args

View File

@@ -12,7 +12,6 @@ from signal import SIGINT, SIGTERM
from ruff_ecosystem import logger
from ruff_ecosystem.defaults import DEFAULT_TARGETS
from ruff_ecosystem.format import FormatComparison
from ruff_ecosystem.main import OutputFormat, main
from ruff_ecosystem.projects import RuffCommand
@@ -46,58 +45,45 @@ def entrypoint():
tempfile.TemporaryDirectory() if not args.cache else nullcontext(args.cache)
)
baseline_executable = args.baseline_executable
if not args.baseline_executable.exists():
baseline_executable = get_executable_path(str(args.baseline_executable))
if not baseline_executable:
ruff_baseline = args.ruff_baseline
if not args.ruff_baseline.exists():
ruff_baseline = get_executable_path(str(args.ruff_baseline))
if not ruff_baseline:
print(
f"Could not find ruff baseline executable: {args.baseline_executable}",
f"Could not find ruff baseline executable: {args.ruff_baseline}",
sys.stderr,
)
exit(1)
logger.info(
"Resolved baseline executable %s to %s",
args.baseline_executable,
baseline_executable,
"Resolved baseline executable %s to %s", args.ruff_baseline, ruff_baseline
)
comparison_executable = args.comparison_executable
if not args.comparison_executable.exists():
comparison_executable = get_executable_path(str(args.comparison_executable))
if not comparison_executable:
ruff_comparison = args.ruff_comparison
if not args.ruff_comparison.exists():
ruff_comparison = get_executable_path(str(args.ruff_comparison))
if not ruff_comparison:
print(
f"Could not find ruff comparison executable: {args.comparison_executable}",
f"Could not find ruff comparison executable: {args.ruff_comparison}",
sys.stderr,
)
exit(1)
logger.info(
"Resolved comparison executable %s to %s",
args.comparison_executable,
comparison_executable,
args.ruff_comparison,
ruff_comparison,
)
targets = DEFAULT_TARGETS
if args.force_preview:
targets = [target.with_preview_enabled() for target in targets]
format_comparison = (
FormatComparison(args.format_comparison)
if args.ruff_command == RuffCommand.format.value
else None
)
with cache_context as cache:
loop = asyncio.get_event_loop()
main_task = asyncio.ensure_future(
main(
command=RuffCommand(args.ruff_command),
baseline_executable=baseline_executable,
comparison_executable=comparison_executable,
targets=targets,
ruff_baseline_executable=ruff_baseline,
ruff_comparison_executable=ruff_comparison,
targets=DEFAULT_TARGETS,
format=OutputFormat(args.output_format),
project_dir=Path(cache),
raise_on_failure=args.pdb,
format_comparison=format_comparison,
)
)
# https://stackoverflow.com/a/58840987/3549270
@@ -130,8 +116,8 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument(
"--output-format",
choices=[option.value for option in OutputFormat],
default="markdown",
choices=[option.name for option in OutputFormat],
default="json",
help="Location for caching cloned repositories",
)
parser.add_argument(
@@ -145,28 +131,17 @@ def parse_args() -> argparse.Namespace:
action="store_true",
help="Enable debugging on failure",
)
parser.add_argument(
"--force-preview",
action="store_true",
help="Force preview mode to be enabled for all projects",
)
parser.add_argument(
"--format-comparison",
choices=[option.value for option in FormatComparison],
default=FormatComparison.ruff_and_ruff,
help="Type of comparison to make when checking formatting.",
)
parser.add_argument(
"ruff_command",
choices=[option.value for option in RuffCommand],
choices=[option.name for option in RuffCommand],
help="The Ruff command to test",
)
parser.add_argument(
"baseline_executable",
"ruff_baseline",
type=Path,
)
parser.add_argument(
"comparison_executable",
"ruff_comparison",
type=Path,
)

View File

@@ -6,7 +6,7 @@ from __future__ import annotations
import time
from asyncio import create_subprocess_exec
from enum import Enum
from dataclasses import dataclass
from pathlib import Path
from subprocess import PIPE
from typing import TYPE_CHECKING, Sequence
@@ -15,10 +15,10 @@ from unidiff import PatchSet
from ruff_ecosystem import logger
from ruff_ecosystem.markdown import markdown_project_section
from ruff_ecosystem.types import Comparison, Diff, Result, ToolError
from ruff_ecosystem.types import Comparison, Diff, Result, RuffError
if TYPE_CHECKING:
from ruff_ecosystem.projects import ClonedRepository, FormatOptions
from ruff_ecosystem.projects import ClonedRepository
def markdown_format_result(result: Result) -> str:
@@ -124,89 +124,28 @@ async def compare_format(
ruff_comparison_executable: Path,
options: FormatOptions,
cloned_repo: ClonedRepository,
format_comparison: FormatComparison,
):
args = (ruff_baseline_executable, ruff_comparison_executable, options, cloned_repo)
match format_comparison:
case FormatComparison.ruff_then_ruff:
coro = format_then_format(Formatter.ruff, *args)
case FormatComparison.ruff_and_ruff:
coro = format_and_format(Formatter.ruff, *args)
case FormatComparison.black_then_ruff:
coro = format_then_format(Formatter.black, *args)
case FormatComparison.black_and_ruff:
coro = format_and_format(Formatter.black, *args)
case _:
raise ValueError(f"Unknown format comparison type {format_comparison!r}.")
diff = await coro
return Comparison(diff=Diff(diff), repo=cloned_repo)
async def format_then_format(
baseline_formatter: Formatter,
ruff_baseline_executable: Path,
ruff_comparison_executable: Path,
options: FormatOptions,
cloned_repo: ClonedRepository,
) -> Sequence[str]:
# Run format to get the baseline
await format(
formatter=baseline_formatter,
# Run format without diff to get the baseline
await ruff_format(
executable=ruff_baseline_executable.resolve(),
path=cloned_repo.path,
name=cloned_repo.fullname,
options=options,
)
# Then get the diff from stdout
diff = await format(
formatter=Formatter.ruff,
diff = await ruff_format(
executable=ruff_comparison_executable.resolve(),
path=cloned_repo.path,
name=cloned_repo.fullname,
options=options,
diff=True,
)
return diff
return Comparison(diff=Diff(diff), repo=cloned_repo)
async def format_and_format(
baseline_formatter: Formatter,
ruff_baseline_executable: Path,
ruff_comparison_executable: Path,
options: FormatOptions,
cloned_repo: ClonedRepository,
) -> Sequence[str]:
# Run format without diff to get the baseline
await format(
formatter=baseline_formatter,
executable=ruff_baseline_executable.resolve(),
path=cloned_repo.path,
name=cloned_repo.fullname,
options=options,
)
# Commit the changes
commit = await cloned_repo.commit(
message=f"Formatted with baseline {ruff_baseline_executable}"
)
# Then reset
await cloned_repo.reset()
# Then run format again
await format(
formatter=Formatter.ruff,
executable=ruff_comparison_executable.resolve(),
path=cloned_repo.path,
name=cloned_repo.fullname,
options=options,
)
# Then get the diff from the commit
diff = await cloned_repo.diff(commit)
return diff
async def format(
async def ruff_format(
*,
formatter: Formatter,
executable: Path,
path: Path,
name: str,
@@ -214,20 +153,16 @@ async def format(
diff: bool = False,
) -> Sequence[str]:
"""Run the given ruff binary against the specified path."""
args = (
options.to_ruff_args()
if formatter == Formatter.ruff
else options.to_black_args()
)
logger.debug(f"Formatting {name} with {executable} " + " ".join(args))
logger.debug(f"Formatting {name} with {executable}")
ruff_args = options.to_cli_args()
if diff:
args.append("--diff")
ruff_args.append("--diff")
start = time.time()
proc = await create_subprocess_exec(
executable.absolute(),
*args,
*ruff_args,
".",
stdout=PIPE,
stderr=PIPE,
@@ -239,34 +174,22 @@ async def format(
logger.debug(f"Finished formatting {name} with {executable} in {end - start:.2f}s")
if proc.returncode not in [0, 1]:
raise ToolError(err.decode("utf8"))
raise RuffError(err.decode("utf8"))
lines = result.decode("utf8").splitlines()
return lines
class FormatComparison(Enum):
ruff_then_ruff = "ruff-then-ruff"
@dataclass(frozen=True)
class FormatOptions:
"""
Run Ruff baseline then Ruff comparison; checks for changes in behavior when formatting previously "formatted" code
Ruff format options.
"""
ruff_and_ruff = "ruff-and-ruff"
"""
Run Ruff baseline then reset and run Ruff comparison; checks changes in behavior when formatting "unformatted" code
"""
exclude: str = ""
black_then_ruff = "black-then-ruff"
"""
Run Black baseline then Ruff comparison; checks for changes in behavior when formatting previously "formatted" code
"""
black_and_ruff = "black-and-ruff"
""""
Run Black baseline then reset and run Ruff comparison; checks changes in behavior when formatting "unformatted" code
"""
class Formatter(Enum):
black = "black"
ruff = "ruff"
def to_cli_args(self) -> list[str]:
args = ["format"]
if self.exclude:
args.extend(["--exclude", self.exclude])
return args

View File

@@ -7,11 +7,7 @@ from typing import Awaitable, TypeVar
from ruff_ecosystem import logger
from ruff_ecosystem.check import compare_check, markdown_check_result
from ruff_ecosystem.format import (
FormatComparison,
compare_format,
markdown_format_result,
)
from ruff_ecosystem.format import compare_format, markdown_format_result
from ruff_ecosystem.projects import (
Project,
RuffCommand,
@@ -29,21 +25,18 @@ class OutputFormat(Enum):
async def main(
command: RuffCommand,
baseline_executable: Path,
comparison_executable: Path,
ruff_baseline_executable: Path,
ruff_comparison_executable: Path,
targets: list[Project],
project_dir: Path,
format: OutputFormat,
format_comparison: FormatComparison | None,
max_parallelism: int = 50,
raise_on_failure: bool = False,
) -> None:
logger.debug("Using command %s", command.value)
logger.debug("Using baseline executable at %s", baseline_executable)
logger.debug("Using comparison executable at %s", comparison_executable)
logger.debug("Using baseline executable at %s", ruff_baseline_executable)
logger.debug("Using comparison executable at %s", ruff_comparison_executable)
logger.debug("Using checkout_dir directory %s", project_dir)
if format_comparison:
logger.debug("Using format comparison type %s", format_comparison.value)
logger.debug("Checking %s targets", len(targets))
# Limit parallelism to avoid high memory consumption
@@ -58,11 +51,10 @@ async def main(
limited_parallelism(
clone_and_compare(
command,
baseline_executable,
comparison_executable,
ruff_baseline_executable,
ruff_comparison_executable,
target,
project_dir,
format_comparison,
)
)
for target in targets
@@ -100,11 +92,10 @@ async def main(
async def clone_and_compare(
command: RuffCommand,
baseline_executable: Path,
comparison_executable: Path,
ruff_baseline_executable: Path,
ruff_comparison_executable: Path,
target: Project,
project_dir: Path,
format_comparison: FormatComparison | None,
) -> Comparison:
"""Check a specific repository against two versions of ruff."""
assert ":" not in target.repo.owner
@@ -112,12 +103,14 @@ async def clone_and_compare(
match command:
case RuffCommand.check:
compare, options, kwargs = (compare_check, target.check_options, {})
compare, options = (
compare_check,
target.check_options,
)
case RuffCommand.format:
compare, options, kwargs = (
compare, options = (
compare_format,
target.format_options,
{"format_comparison": format_comparison},
)
case _:
raise ValueError(f"Unknown target Ruff command {command}")
@@ -127,11 +120,10 @@ async def clone_and_compare(
try:
return await compare(
baseline_executable,
comparison_executable,
ruff_baseline_executable,
ruff_comparison_executable,
options,
cloned_repo,
**kwargs,
)
except ExceptionGroup as e:
raise e.exceptions[0] from e

View File

@@ -11,13 +11,9 @@ def markdown_project_section(
) -> list[str]:
return markdown_details(
summary=f'<a href="{project.repo.url}">{project.repo.fullname}</a> ({title})',
# Show the command used for the check
preface="<pre>ruff " + " ".join(options.to_cli_args()) + "</pre>",
content=content,
preface=(
# Show the command used for the check if the options are non-default
"<pre>ruff " + " ".join(options.to_ruff_args()) + "</pre>"
if options != type(options)()
else None
),
)
@@ -28,13 +24,12 @@ def markdown_plus_minus(added: int, removed: int) -> str:
return f"+{added} -{removed}"
def markdown_details(summary: str, content: str | list[str], preface: str):
def markdown_details(summary: str, preface: str, content: str | list[str]):
lines = []
lines.append(f"<details><summary>{summary}</summary>")
if preface:
lines.append("<p>")
lines.append(preface)
lines.append("</p>")
lines.append("<p>")
lines.append(preface)
lines.append("</p>")
lines.append("<p>")
lines.append("")

View File

@@ -4,16 +4,16 @@ Abstractions and utilities for working with projects to run ecosystem checks on.
from __future__ import annotations
import abc
import dataclasses
from asyncio import create_subprocess_exec
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from subprocess import DEVNULL, PIPE
from subprocess import PIPE
from typing import Self
from ruff_ecosystem import logger
from ruff_ecosystem.check import CheckOptions
from ruff_ecosystem.format import FormatOptions
from ruff_ecosystem.types import Serializable
@@ -27,90 +27,12 @@ class Project(Serializable):
check_options: CheckOptions = field(default_factory=lambda: CheckOptions())
format_options: FormatOptions = field(default_factory=lambda: FormatOptions())
def with_preview_enabled(self: Self) -> Self:
return type(self)(
repo=self.repo,
check_options=self.check_options.with_options(preview=True),
format_options=self.format_options.with_options(preview=True),
)
class RuffCommand(Enum):
check = "check"
format = "format"
@dataclass(frozen=True)
class CommandOptions(Serializable, abc.ABC):
def with_options(self: Self, **kwargs) -> Self:
"""
Return a copy of self with the given options set.
"""
return type(self)(**{**dataclasses.asdict(self), **kwargs})
@abc.abstractmethod
def to_ruff_args(self) -> list[str]:
pass
@dataclass(frozen=True)
class CheckOptions(CommandOptions):
"""
Ruff check options
"""
select: str = ""
ignore: str = ""
exclude: str = ""
preview: bool = False
# Generating fixes is slow and verbose
show_fixes: bool = False
# Limit the number of reported lines per rule
max_lines_per_rule: int | None = 50
def to_ruff_args(self) -> list[str]:
args = ["check", "--no-cache", "--exit-zero"]
if self.select:
args.extend(["--select", self.select])
if self.ignore:
args.extend(["--ignore", self.ignore])
if self.exclude:
args.extend(["--exclude", self.exclude])
if self.show_fixes:
args.extend(["--show-fixes", "--ecosystem-ci"])
if self.preview:
args.append("--preview")
return args
@dataclass(frozen=True)
class FormatOptions(CommandOptions):
"""
Format ecosystem check options.
"""
preview: bool = False
exclude: str = ""
def to_ruff_args(self) -> list[str]:
args = ["format"]
if self.exclude:
args.extend(["--exclude", self.exclude])
if self.preview:
args.append("--preview")
return args
def to_black_args(self) -> list[str]:
args = []
if self.exclude:
args.extend(["--exclude", self.exclude])
if self.preview:
args.append("--preview")
return args
class ProjectSetupError(Exception):
"""An error setting up a project."""
@@ -138,11 +60,10 @@ class Repository(Serializable):
Shallow clone this repository
"""
if checkout_dir.exists():
logger.debug(f"Reusing cached {self.fullname}")
logger.debug(f"Reusing {self.owner}:{self.name}")
if self.ref:
logger.debug(f"Checking out {self.fullname} @ {self.ref}")
logger.debug(f"Checking out ref {self.ref}")
process = await create_subprocess_exec(
*["git", "checkout", "-f", self.ref],
cwd=checkout_dir,
@@ -156,13 +77,7 @@ class Repository(Serializable):
f"Failed to checkout {self.ref}: {stderr.decode()}"
)
cloned_repo = await ClonedRepository.from_path(checkout_dir, self)
await cloned_repo.reset()
logger.debug(f"Pulling latest changes for {self.fullname} @ {self.ref}")
await cloned_repo.pull()
return cloned_repo
return await ClonedRepository.from_path(checkout_dir, self)
logger.debug(f"Cloning {self.owner}:{self.name} to {checkout_dir}")
command = [
@@ -194,28 +109,6 @@ class Repository(Serializable):
logger.debug(
f"Finished cloning {self.fullname} with status {status_code}",
)
# Configure git user — needed for `self.commit` to work
await (
await create_subprocess_exec(
*["git", "config", "user.email", "ecosystem@astral.sh"],
cwd=checkout_dir,
env={"GIT_TERMINAL_PROMPT": "0"},
stdout=DEVNULL,
stderr=DEVNULL,
)
).wait()
await (
await create_subprocess_exec(
*["git", "config", "user.name", "Ecosystem Bot"],
cwd=checkout_dir,
env={"GIT_TERMINAL_PROMPT": "0"},
stdout=DEVNULL,
stderr=DEVNULL,
)
).wait()
return await ClonedRepository.from_path(checkout_dir, self)
@@ -273,73 +166,3 @@ class ClonedRepository(Repository, Serializable):
raise ProjectSetupError(f"Failed to retrieve commit sha at {checkout_dir}")
return stdout.decode().strip()
async def reset(self: Self) -> None:
"""
Reset the cloned repository to the ref it started at.
"""
process = await create_subprocess_exec(
*["git", "reset", "--hard", "origin/" + self.ref] if self.ref else [],
cwd=self.path,
env={"GIT_TERMINAL_PROMPT": "0"},
stdout=PIPE,
stderr=PIPE,
)
_, stderr = await process.communicate()
if await process.wait() != 0:
raise RuntimeError(f"Failed to reset: {stderr.decode()}")
async def pull(self: Self) -> None:
"""
Pull the latest changes.
Typically `reset` should be run first.
"""
process = await create_subprocess_exec(
*["git", "pull"],
cwd=self.path,
env={"GIT_TERMINAL_PROMPT": "0"},
stdout=PIPE,
stderr=PIPE,
)
_, stderr = await process.communicate()
if await process.wait() != 0:
raise RuntimeError(f"Failed to pull: {stderr.decode()}")
async def commit(self: Self, message: str) -> str:
"""
Commit all current changes.
Empty commits are allowed.
"""
process = await create_subprocess_exec(
*["git", "commit", "--allow-empty", "-a", "-m", message],
cwd=self.path,
env={"GIT_TERMINAL_PROMPT": "0"},
stdout=PIPE,
stderr=PIPE,
)
_, stderr = await process.communicate()
if await process.wait() != 0:
raise RuntimeError(f"Failed to commit: {stderr.decode()}")
return await self._get_head_commit(self.path)
async def diff(self: Self, *args: str) -> list[str]:
"""
Get the current diff from git.
Arguments are passed to `git diff ...`
"""
process = await create_subprocess_exec(
*["git", "diff", *args],
cwd=self.path,
env={"GIT_TERMINAL_PROMPT": "0"},
stdout=PIPE,
stderr=PIPE,
)
stdout, stderr = await process.communicate()
if await process.wait() != 0:
raise RuntimeError(f"Failed to commit: {stderr.decode()}")
return stdout.decode().splitlines()

View File

@@ -89,5 +89,5 @@ class Comparison(Serializable):
repo: ClonedRepository
class ToolError(Exception):
"""An error reported by the checked executable."""
class RuffError(Exception):
"""An error reported by Ruff."""

7
ruff.schema.json generated
View File

@@ -2903,9 +2903,6 @@
"NPY001",
"NPY002",
"NPY003",
"NPY2",
"NPY20",
"NPY201",
"NURSERY",
"PD",
"PD0",
@@ -3468,10 +3465,6 @@
"TID251",
"TID252",
"TID253",
"TRIO",
"TRIO1",
"TRIO10",
"TRIO100",
"TRY",
"TRY0",
"TRY00",

View File

@@ -36,44 +36,16 @@ def get_mapping_data() -> dict:
return json.loads(json.loads(content))
def format_number(number: int) -> str:
"""Underscore-separate the digits of a number."""
# For unknown historical reasons, numbers greater than 100,000 were
# underscore-delimited in the generated file, so we now preserve that property to
# avoid unnecessary churn.
if number > 100000:
number = str(number)
number = "_".join(number[i : i + 3] for i in range(0, len(number), 3))
return f"{number}_u32"
return f"{number}u32"
def format_confusables_rs(raw_data: dict[str, list[int]]) -> str:
"""Format the downloaded data into a Rust source file."""
# The input data contains duplicate entries.
# The input data contains duplicate entries
flattened_items: set[tuple[int, int]] = set()
for _category, items in raw_data.items():
assert len(items) % 2 == 0, "Expected pairs of items"
for i in range(0, len(items), 2):
flattened_items.add((items[i], items[i + 1]))
tuples = [
f" {format_number(left)} => {right},\n"
for left, right in sorted(flattened_items)
]
# Add some additional confusable pairs that are not included in the VS Code data,
# as they're unicode-to-unicode confusables, not unicode-to-ASCII confusables.
confusable_units = [
# ANGSTROM SIGN → LATIN CAPITAL LETTER A WITH RING ABOVE
("0x212B", "0x00C5"),
# OHM SIGN → GREEK CAPITAL LETTER OMEGA
("0x2126", "0x03A9"),
# MICRO SIGN → GREEK SMALL LETTER MU
("0x00B5", "0x03BC"),
]
tuples += [f" {left} => {right},\n" for left, right in confusable_units]
tuples = [f" {left}u32 => {right},\n" for left, right in sorted(flattened_items)]
print(f"{len(tuples)} confusable tuples.")