Compare commits

..

1 Commits

Author SHA1 Message Date
Zanie
0e36b9e709 Allow globs in external codes setting 2023-10-24 12:22:04 -05:00
122 changed files with 619 additions and 4032 deletions

View File

@@ -43,7 +43,6 @@ jobs:
- "!crates/ruff_dev/**"
- "!crates/ruff_shrinking/**"
- scripts/*
- .github/workflows/ci.yaml
formatter:
- Cargo.toml
@@ -58,7 +57,6 @@ jobs:
- crates/ruff_python_parser/**
- crates/ruff_dev/**
- scripts/*
- .github/workflows/ci.yaml
cargo-fmt:
name: "cargo fmt"
@@ -84,9 +82,12 @@ jobs:
- name: "Clippy (wasm)"
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
cargo-test-linux:
runs-on: ubuntu-latest
name: "cargo test (linux)"
cargo-test:
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
runs-on: ${{ matrix.os }}
name: "cargo test | ${{ matrix.os }}"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
@@ -96,34 +97,40 @@ jobs:
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
- name: "Run tests (Ubuntu)"
if: ${{ matrix.os == 'ubuntu-latest' }}
run: cargo insta test --all --all-features --unreferenced reject
- name: "Run tests (Windows)"
if: ${{ matrix.os == 'windows-latest' }}
shell: bash
# We can't reject unreferenced snapshots on windows because flake8_executable can't run on windows
run: cargo insta test --all --all-features
# Check for broken links in the documentation.
- run: cargo doc --all --no-deps
env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@v3
if: ${{ matrix.os == 'ubuntu-latest' }}
with:
name: ruff
path: target/debug/ruff
cargo-test-windows:
runs-on: windows-latest
name: "cargo test (windows)"
cargo-fuzz:
runs-on: ubuntu-latest
name: "cargo fuzz"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- name: "Install cargo insta"
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install cargo-fuzz"
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- uses: Swatinem/rust-cache@v2
- name: "Run tests"
shell: bash
# We can't reject unreferenced snapshots on windows because flake8_executable can't run on windows
run: cargo insta test --all --all-features
tool: cargo-fuzz@0.11
- run: cargo fuzz build -s none
cargo-test-wasm:
runs-on: ubuntu-latest
@@ -144,22 +151,6 @@ jobs:
cd crates/ruff_wasm
wasm-pack test --node
cargo-fuzz:
runs-on: ubuntu-latest
name: "cargo fuzz"
steps:
- uses: actions/checkout@v4
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
with:
workspaces: "fuzz -> target"
- name: "Install cargo-fuzz"
uses: taiki-e/install-action@v2
with:
tool: cargo-fuzz@0.11
- run: cargo fuzz build -s none
scripts:
name: "test scripts"
runs-on: ubuntu-latest
@@ -181,10 +172,10 @@ jobs:
name: "ecosystem"
runs-on: ubuntu-latest
needs:
- cargo-test-linux
- cargo-test
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison.
if: github.event_name == 'pull_request'
if: github.event_name == 'pull_request' && needs.determine_changes.outputs.linter == 'true'
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
@@ -192,48 +183,27 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v3
name: Download comparison Ruff binary
name: Download Ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v2
name: Download baseline Ruff binary
name: Download base results
with:
name: ruff
branch: ${{ github.event.pull_request.base.ref }}
check_artifacts: true
- name: Install ruff-ecosystem
run: |
pip install ./python/ruff-ecosystem
- name: Run `ruff check` ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }}
- name: Run ecosystem check
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
chmod +x ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
ruff-ecosystem check ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check
scripts/check_ecosystem.py ruff ${{ steps.ruff-target.outputs.download-path }}/ruff | tee ecosystem-result
cat ecosystem-result > $GITHUB_STEP_SUMMARY
cat ecosystem-result-check > $GITHUB_STEP_SUMMARY
cat ecosystem-result-check > ecosystem-result
echo "" >> ecosystem-result
- name: Run `ruff format` ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff
ruff-ecosystem format ./ruff ${{ steps.ruff-target.outputs.download-path }}/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format
cat ecosystem-result-format > $GITHUB_STEP_SUMMARY
cat ecosystem-result-format >> ecosystem-result
- name: Export pull request number
run: |
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@v3
@@ -370,7 +340,7 @@ jobs:
check-ruff-lsp:
name: "test ruff-lsp"
runs-on: ubuntu-latest
needs: cargo-test-linux
needs: cargo-test
steps:
- uses: extractions/setup-just@v1
env:

View File

@@ -1,53 +1,9 @@
# Changelog
## 0.1.3
This release includes a variety of improvements to the Ruff formatter, removing several known and
unintentional deviations from Black.
### Formatter
- Avoid space around pow for `None`, `True` and `False` ([#8189](https://github.com/astral-sh/ruff/pull/8189))
- Avoid sorting all paths in the format command ([#8181](https://github.com/astral-sh/ruff/pull/8181))
- Insert necessary blank line between class and leading comments ([#8224](https://github.com/astral-sh/ruff/pull/8224))
- Avoid introducing new parentheses in annotated assignments ([#8233](https://github.com/astral-sh/ruff/pull/8233))
- Refine the warnings about incompatible linter options ([#8196](https://github.com/astral-sh/ruff/pull/8196))
- Add test and basic implementation for formatter preview mode ([#8044](https://github.com/astral-sh/ruff/pull/8044))
- Refine warning about incompatible `isort` settings ([#8192](https://github.com/astral-sh/ruff/pull/8192))
- Only omit optional parentheses for starting or ending with parentheses ([#8238](https://github.com/astral-sh/ruff/pull/8238))
- Use source type to determine parser mode for formatting ([#8205](https://github.com/astral-sh/ruff/pull/8205))
- Don't warn about magic trailing comma when `isort.force-single-line` is true ([#8244](https://github.com/astral-sh/ruff/pull/8244))
- Use `SourceKind::diff` for formatter ([#8240](https://github.com/astral-sh/ruff/pull/8240))
- Fix `fmt:off` with trailing child comment ([#8234](https://github.com/astral-sh/ruff/pull/8234))
- Formatter parentheses support for `IpyEscapeCommand` ([#8207](https://github.com/astral-sh/ruff/pull/8207))
### Linter
- \[`pylint`\] Add buffer methods to `bad-dunder-method-name` (`PLW3201`) exclusions ([#8190](https://github.com/astral-sh/ruff/pull/8190))
- Match rule prefixes from `external` codes setting in `unused-noqa` ([#8177](https://github.com/astral-sh/ruff/pull/8177))
- Use `line-length` setting for isort in lieu of `pycodestyle.max-line-length` ([#8235](https://github.com/astral-sh/ruff/pull/8235))
- Update fix for `unnecessary-paren-on-raise-exception` to unsafe for unknown types ([#8231](https://github.com/astral-sh/ruff/pull/8231))
- Correct quick fix message for `W605` ([#8255](https://github.com/astral-sh/ruff/pull/8255))
### Documentation
- Fix typo in max-doc-length documentation ([#8201](https://github.com/astral-sh/ruff/pull/8201))
- Improve documentation around linter-formatter conflicts ([#8257](https://github.com/astral-sh/ruff/pull/8257))
- Fix link to error suppression documentation in `unused-noqa` ([#8172](https://github.com/astral-sh/ruff/pull/8172))
- Add `external` option to `unused-noqa` documentation ([#8171](https://github.com/astral-sh/ruff/pull/8171))
- Add title attribute to icons ([#8060](https://github.com/astral-sh/ruff/pull/8060))
- Clarify unsafe case in RSE102 ([#8256](https://github.com/astral-sh/ruff/pull/8256))
- Fix skipping formatting examples ([#8210](https://github.com/astral-sh/ruff/pull/8210))
- docs: fix name of `magic-trailing-comma` option in README ([#8200](https://github.com/astral-sh/ruff/pull/8200))
- Add note about scope of rule changing in versioning policy ([#8169](https://github.com/astral-sh/ruff/pull/8169))
- Document: Fix default lint rules ([#8218](https://github.com/astral-sh/ruff/pull/8218))
- Fix a wrong setting in configuration.md ([#8186](https://github.com/astral-sh/ruff/pull/8186))
- Fix misspelled TOML headers in the tutorial ([#8209](https://github.com/astral-sh/ruff/pull/8209))
## 0.1.2
This release includes the Beta version of the Ruff formatter — an extremely fast, Black-compatible Python formatter.
Try it today with `ruff format`! [Check out the blog post](https://astral.sh/blog/the-ruff-formatter) and [read the docs](https://docs.astral.sh/ruff/formatter/).
Try it today with `ruff format`.
### Preview features

9
Cargo.lock generated
View File

@@ -810,7 +810,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flake8-to-ruff"
version = "0.1.3"
version = "0.1.2"
dependencies = [
"anyhow",
"clap",
@@ -2051,7 +2051,7 @@ dependencies = [
[[package]]
name = "ruff_cli"
version = "0.1.3"
version = "0.1.2"
dependencies = [
"annotate-snippets 0.9.1",
"anyhow",
@@ -2188,7 +2188,7 @@ dependencies = [
[[package]]
name = "ruff_linter"
version = "0.1.3"
version = "0.1.2"
dependencies = [
"aho-corasick",
"annotate-snippets 0.9.1",
@@ -2381,7 +2381,6 @@ dependencies = [
"itertools 0.11.0",
"lalrpop",
"lalrpop-util",
"memchr",
"ruff_python_ast",
"ruff_text_size",
"rustc-hash",
@@ -2439,7 +2438,7 @@ dependencies = [
[[package]]
name = "ruff_shrinking"
version = "0.1.3"
version = "0.1.2"
dependencies = [
"anyhow",
"clap",

View File

@@ -151,7 +151,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
# Run the Ruff linter.
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.1.3
rev: v0.1.2
hooks:
# Run the Ruff linter.
- id: ruff
@@ -238,7 +238,7 @@ quote-style = "double"
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
magic-trailing-comma = "respect"
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
@@ -409,7 +409,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Mypy](https://github.com/python/mypy)
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [NoneBot](https://github.com/nonebot/nonebot2)
- [ONNX](https://github.com/onnx/onnx)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [PDM](https://github.com/pdm-project/pdm)

View File

@@ -1,6 +1,6 @@
[package]
name = "flake8-to-ruff"
version = "0.1.3"
version = "0.1.2"
description = """
Convert Flake8 configuration files to Ruff configuration files.
"""

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_cli"
version = "0.1.3"
version = "0.1.2"
publish = false
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -11,41 +11,6 @@
"maths = (numpy.arange(100)**2).sum()\n",
"stats= numpy.asarray([1,2,3,4]).median()"
]
},
{
"cell_type": "markdown",
"id": "83a0b1b8",
"metadata": {},
"source": [
"A markdown cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ae12f012",
"metadata": {},
"outputs": [],
"source": [
"# A cell with IPython escape command\n",
"def some_function(foo, bar):\n",
" pass\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "10f3bbf9",
"metadata": {},
"outputs": [],
"source": [
"foo = %pwd\n",
"def some_function(foo,bar,):\n",
" # Another cell with IPython escape command\n",
" foo = %pwd\n",
" print(foo)"
]
}
],
"metadata": {

View File

@@ -11,6 +11,7 @@ use itertools::Itertools;
use log::{error, warn};
use rayon::iter::Either::{Left, Right};
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use similar::TextDiff;
use thiserror::Error;
use tracing::debug;
@@ -18,11 +19,12 @@ use ruff_diagnostics::SourceMap;
use ruff_linter::fs;
use ruff_linter::logging::LogLevel;
use ruff_linter::registry::Rule;
use ruff_linter::rules::flake8_quotes::settings::Quote;
use ruff_linter::rules::isort;
use ruff_linter::settings::rule_table::RuleTable;
use ruff_linter::source_kind::{SourceError, SourceKind};
use ruff_linter::warn_user_once;
use ruff_python_ast::{PySourceType, SourceType};
use ruff_python_formatter::{format_module_source, FormatModuleError, QuoteStyle};
use ruff_python_formatter::{format_module_source, FormatModuleError};
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_workspace::resolver::{
match_exclusion, python_files_in_path, PyprojectConfig, ResolvedFile, Resolver,
@@ -105,7 +107,7 @@ pub(crate) fn format(
};
let start = Instant::now();
let (results, mut errors): (Vec<_>, Vec<_>) = paths
let (mut results, mut errors): (Vec<_>, Vec<_>) = paths
.par_iter()
.filter_map(|entry| {
match entry {
@@ -166,6 +168,27 @@ pub(crate) fn format(
});
let duration = start.elapsed();
// Make output deterministic, at least as long as we have a path
results.sort_unstable_by(|x, y| x.path.cmp(&y.path));
errors.sort_by(|x, y| {
fn get_key(error: &FormatCommandError) -> Option<&PathBuf> {
match &error {
FormatCommandError::Ignore(ignore) => {
if let ignore::Error::WithPath { path, .. } = ignore {
Some(path)
} else {
None
}
}
FormatCommandError::Panic(path, _)
| FormatCommandError::Read(path, _)
| FormatCommandError::Format(path, _)
| FormatCommandError::Write(path, _) => path.as_ref(),
}
}
get_key(x).cmp(&get_key(y))
});
debug!(
"Formatted {} files in {:.2?}",
results.len() + errors.len(),
@@ -175,21 +198,15 @@ pub(crate) fn format(
caches.persist()?;
// Report on any errors.
errors.sort_unstable_by(|a, b| a.path().cmp(&b.path()));
for error in &errors {
error!("{error}");
}
results.sort_unstable_by(|a, b| a.path.cmp(&b.path));
let results = FormatResults::new(results.as_slice(), mode);
match mode {
FormatMode::Write => {}
FormatMode::Check => {
results.write_changed(&mut stdout().lock())?;
}
FormatMode::Diff => {
results.write_diff(&mut stdout().lock())?;
}
if mode.is_diff() {
results.write_diff(&mut stdout().lock())?;
}
// Report on the formatting changes.
@@ -453,51 +470,27 @@ impl<'a> FormatResults<'a> {
})
}
/// Write a diff of the formatting changes to the given writer.
fn write_diff(&self, f: &mut impl Write) -> io::Result<()> {
for (path, unformatted, formatted) in self
.results
.iter()
.filter_map(|result| {
if let FormatResult::Diff {
unformatted,
formatted,
} = &result.result
{
Some((result.path.as_path(), unformatted, formatted))
} else {
None
}
})
.sorted_unstable_by_key(|(path, _, _)| *path)
{
unformatted.diff(formatted, Some(path), f)?;
for result in self.results {
if let FormatResult::Diff {
unformatted,
formatted,
} = &result.result
{
let text_diff =
TextDiff::from_lines(unformatted.source_code(), formatted.source_code());
let mut unified_diff = text_diff.unified_diff();
unified_diff.header(
&fs::relativize_path(&result.path),
&fs::relativize_path(&result.path),
);
unified_diff.to_writer(&mut *f)?;
}
}
Ok(())
}
/// Write a list of the files that would be changed to the given writer.
fn write_changed(&self, f: &mut impl Write) -> io::Result<()> {
for path in self
.results
.iter()
.filter_map(|result| {
if result.result.is_formatted() {
Some(result.path.as_path())
} else {
None
}
})
.sorted_unstable()
{
writeln!(f, "Would reformat: {}", fs::relativize_path(path).bold())?;
}
Ok(())
}
/// Write a summary of the formatting results to the given writer.
fn write_summary(&self, f: &mut impl Write) -> io::Result<()> {
// Compute the number of changed and unchanged files.
let mut changed = 0u32;
@@ -505,6 +498,14 @@ impl<'a> FormatResults<'a> {
for result in self.results {
match &result.result {
FormatResult::Formatted => {
// If we're running in check mode, report on any files that would be formatted.
if self.mode.is_check() {
writeln!(
f,
"Would reformat: {}",
fs::relativize_path(&result.path).bold()
)?;
}
changed += 1;
}
FormatResult::Unchanged => unchanged += 1,
@@ -561,26 +562,6 @@ pub(crate) enum FormatCommandError {
Read(Option<PathBuf>, SourceError),
Format(Option<PathBuf>, FormatModuleError),
Write(Option<PathBuf>, SourceError),
Diff(Option<PathBuf>, io::Error),
}
impl FormatCommandError {
fn path(&self) -> Option<&Path> {
match self {
Self::Ignore(err) => {
if let ignore::Error::WithPath { path, .. } = err {
Some(path.as_path())
} else {
None
}
}
Self::Panic(path, _)
| Self::Read(path, _)
| Self::Format(path, _)
| Self::Write(path, _)
| Self::Diff(path, _) => path.as_deref(),
}
}
}
impl Display for FormatCommandError {
@@ -646,24 +627,6 @@ impl Display for FormatCommandError {
write!(f, "{}{} {err}", "Failed to format".bold(), ":".bold())
}
}
Self::Diff(path, err) => {
if let Some(path) = path {
write!(
f,
"{}{}{} {err}",
"Failed to generate diff for ".bold(),
fs::relativize_path(path).bold(),
":".bold()
)
} else {
write!(
f,
"{}{} {err}",
"Failed to generate diff".bold(),
":".bold()
)
}
}
Self::Panic(path, err) => {
let message = r#"This indicates a bug in Ruff. If you could open an issue at:
@@ -700,110 +663,59 @@ pub(super) fn warn_incompatible_formatter_settings(
{
let mut incompatible_rules = Vec::new();
for rule in [
// The formatter might collapse implicit string concatenation on a single line.
for incompatible_rule in RuleTable::from_iter([
Rule::LineTooLong,
Rule::TabIndentation,
Rule::IndentationWithInvalidMultiple,
Rule::IndentationWithInvalidMultipleComment,
Rule::OverIndented,
Rule::IndentWithSpaces,
Rule::SingleLineImplicitStringConcatenation,
// Flags missing trailing commas when all arguments are on its own line:
// ```python
// def args(
// aaaaaaaa, bbbbbbbbb, cccccccccc, ddddddddd, eeeeeeee, ffffff, gggggggggggg, hhhh
// ):
// pass
// ```
Rule::MissingTrailingComma,
] {
if setting.linter.rules.enabled(rule) {
incompatible_rules.push(rule);
}
}
// Rules asserting for space indentation
if setting.formatter.indent_style.is_tab() {
for rule in [Rule::TabIndentation, Rule::IndentWithSpaces] {
if setting.linter.rules.enabled(rule) {
incompatible_rules.push(rule);
}
}
}
// Rules asserting for indent-width=4
if setting.formatter.indent_width.value() != 4 {
for rule in [
Rule::IndentationWithInvalidMultiple,
Rule::IndentationWithInvalidMultipleComment,
] {
if setting.linter.rules.enabled(rule) {
incompatible_rules.push(rule);
}
Rule::ProhibitedTrailingComma,
Rule::BadQuotesInlineString,
Rule::BadQuotesMultilineString,
Rule::BadQuotesDocstring,
Rule::AvoidableEscapedQuote,
])
.iter_enabled()
{
if setting.linter.rules.enabled(incompatible_rule) {
incompatible_rules.push(format!("'{}'", incompatible_rule.noqa_code()));
}
}
if !incompatible_rules.is_empty() {
let mut rule_names: Vec<_> = incompatible_rules
.into_iter()
.map(|rule| format!("`{}`", rule.noqa_code()))
.collect();
rule_names.sort();
warn!("The following rules may cause conflicts when used with the formatter: {}. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.", rule_names.join(", "));
incompatible_rules.sort();
warn!("The following rules may cause conflicts when used with the formatter: {}. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.", incompatible_rules.join(", "));
}
// Rules with different quote styles.
if setting
.linter
.rules
.any_enabled(&[Rule::BadQuotesInlineString, Rule::AvoidableEscapedQuote])
{
match (
setting.linter.flake8_quotes.inline_quotes,
setting.formatter.quote_style,
) {
(Quote::Double, QuoteStyle::Single) => {
warn!("The `flake8-quotes.inline-quotes=\"double\"` option is incompatible with the formatter's `format.quote-style=\"single\"`. We recommend disabling `Q000` and `Q003` when using the formatter, which enforces a consistent quote style. Alternatively, set both options to either `\"single\"` or `\"double\"`.");
}
(Quote::Single, QuoteStyle::Double) => {
warn!("The `flake8-quotes.inline-quotes=\"single\"` option is incompatible with the formatter's `format.quote-style=\"double\"`. We recommend disabling `Q000` and `Q003` when using the formatter, which enforces a consistent quote style. Alternatively, set both options to either `\"single\"` or `\"double\"`.");
}
_ => {}
}
let mut incompatible_options = Vec::new();
let isort_defaults = isort::settings::Settings::default();
if setting.linter.isort.force_single_line != isort_defaults.force_single_line {
incompatible_options.push("'isort.force-single-line'");
}
if setting.linter.rules.enabled(Rule::BadQuotesMultilineString)
&& setting.linter.flake8_quotes.multiline_quotes == Quote::Single
{
warn!("The `flake8-quotes.multiline-quotes=\"single\"` option is incompatible with the formatter. We recommend disabling `Q001` when using the formatter, which enforces double quotes for multiline strings. Alternatively, set the `flake8-quotes.multiline-quotes` option to `\"double\"`.`");
if setting.linter.isort.force_wrap_aliases != isort_defaults.force_wrap_aliases {
incompatible_options.push("'isort.force-wrap-aliases'");
}
if setting.linter.rules.enabled(Rule::BadQuotesDocstring)
&& setting.linter.flake8_quotes.docstring_quotes == Quote::Single
{
warn!("The `flake8-quotes.multiline-quotes=\"single\"` option is incompatible with the formatter. We recommend disabling `Q002` when using the formatter, which enforces double quotes for docstrings. Alternatively, set the `flake8-quotes.docstring-quotes` option to `\"double\"`.`");
if setting.linter.isort.lines_after_imports != isort_defaults.lines_after_imports {
incompatible_options.push("'isort.lines-after-imports'");
}
if setting.linter.rules.enabled(Rule::UnsortedImports) {
// The formatter removes empty lines if the value is larger than 2 but always inserts a empty line after imports.
// Two empty lines are okay because `isort` only uses this setting for top-level imports (not in nested blocks).
if !matches!(setting.linter.isort.lines_after_imports, 1 | 2 | -1) {
warn!("The isort option `isort.lines-after-imports` with a value other than `-1`, `1` or `2` is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `2`, `1`, or `-1` (default).");
}
if setting.linter.isort.lines_between_types != isort_defaults.lines_between_types {
incompatible_options.push("'isort.lines_between_types'");
}
// Values larger than two get reduced to one line by the formatter if the import is in a nested block.
if setting.linter.isort.lines_between_types > 1 {
warn!("The isort option `isort.lines-between-types` with a value greater than 1 is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `1` or `0` (default).");
}
if setting.linter.isort.split_on_trailing_comma != isort_defaults.split_on_trailing_comma {
incompatible_options.push("'isort.split_on_trailing_comma'");
}
// isort inserts a trailing comma which the formatter preserves, but only if `skip-magic-trailing-comma` isn't false.
// This isn't relevant when using `force-single-line`, since isort will never include a trailing comma in that case.
if setting.formatter.magic_trailing_comma.is_ignore()
&& !setting.linter.isort.force_single_line
{
if setting.linter.isort.force_wrap_aliases {
warn!("The isort option `isort.force-wrap-aliases` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.force-wrap-aliases=false` or `format.skip-magic-trailing-comma=false`.");
}
if setting.linter.isort.split_on_trailing_comma {
warn!("The isort option `isort.split-on-trailing-comma` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.split-on-trailing-comma=false` or `format.skip-magic-trailing-comma=false`.");
}
}
if !incompatible_options.is_empty() {
warn!("The following isort options may cause conflicts when used with the formatter: {}. To avoid unexpected behavior, we recommend disabling these options by removing them from the configuration.", incompatible_options.join(", "));
}
}
}

View File

@@ -3,6 +3,8 @@ use std::path::Path;
use anyhow::Result;
use log::error;
use ruff_linter::fs;
use similar::TextDiff;
use ruff_linter::source_kind::SourceKind;
use ruff_python_ast::{PySourceType, SourceType};
@@ -107,9 +109,14 @@ fn format_source_code(
}
FormatMode::Check => {}
FormatMode::Diff => {
source_kind
.diff(formatted, path, &mut stdout().lock())
.map_err(|err| FormatCommandError::Diff(path.map(Path::to_path_buf), err))?;
let mut writer = stdout().lock();
let text_diff =
TextDiff::from_lines(source_kind.source_code(), formatted.source_code());
let mut unified_diff = text_diff.unified_diff();
if let Some(path) = path {
unified_diff.header(&fs::relativize_path(path), &fs::relativize_path(path));
}
unified_diff.to_writer(&mut writer).unwrap();
}
},
FormattedSource::Unchanged => {

View File

@@ -230,9 +230,7 @@ fn format_option_inheritance() -> Result<()> {
&ruff_toml,
r#"
extend = "base.toml"
[lint]
extend-select = ["COM812"]
extend-select = ["Q000"]
[format]
quote-style = "single"
@@ -275,7 +273,7 @@ if condition:
print('Should change quotes')
----- stderr -----
warning: The following rules may cause conflicts when used with the formatter: `COM812`. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
warning: The following rules may cause conflicts when used with the formatter: 'Q000'. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
"###);
Ok(())
}
@@ -361,27 +359,15 @@ fn conflicting_options() -> Result<()> {
fs::write(
&ruff_toml,
r#"
indent-width = 2
[lint]
select = ["ALL"]
ignore = ["D203", "D212"]
[lint.isort]
lines-after-imports = 3
lines-between-types = 2
[isort]
force-single-line = true
force-wrap-aliases = true
combine-as-imports = true
lines-after-imports = 0
lines-between-types = 2
split-on-trailing-comma = true
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "single"
multiline-quotes = "single"
[format]
skip-magic-trailing-comma = true
indent-style = "tab"
"#,
)?;
@@ -403,14 +389,8 @@ def say_hy(name: str):
1 file reformatted
----- stderr -----
warning: The following rules may cause conflicts when used with the formatter: `COM812`, `D206`, `ISC001`, `W191`. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
warning: The `flake8-quotes.inline-quotes="single"` option is incompatible with the formatter's `format.quote-style="double"`. We recommend disabling `Q000` and `Q003` when using the formatter, which enforces a consistent quote style. Alternatively, set both options to either `"single"` or `"double"`.
warning: The `flake8-quotes.multiline-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q001` when using the formatter, which enforces double quotes for multiline strings. Alternatively, set the `flake8-quotes.multiline-quotes` option to `"double"`.`
warning: The `flake8-quotes.multiline-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q002` when using the formatter, which enforces double quotes for docstrings. Alternatively, set the `flake8-quotes.docstring-quotes` option to `"double"`.`
warning: The isort option `isort.lines-after-imports` with a value other than `-1`, `1` or `2` is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `2`, `1`, or `-1` (default).
warning: The isort option `isort.lines-between-types` with a value greater than 1 is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `1` or `0` (default).
warning: The isort option `isort.force-wrap-aliases` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.force-wrap-aliases=false` or `format.skip-magic-trailing-comma=false`.
warning: The isort option `isort.split-on-trailing-comma` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.split-on-trailing-comma=false` or `format.skip-magic-trailing-comma=false`.
warning: The following rules may cause conflicts when used with the formatter: 'COM812', 'COM819', 'D206', 'E501', 'ISC001', 'Q000', 'Q001', 'Q002', 'Q003', 'W191'. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
warning: The following isort options may cause conflicts when used with the formatter: 'isort.force-single-line', 'isort.force-wrap-aliases', 'isort.lines-after-imports', 'isort.lines_between_types'. To avoid unexpected behavior, we recommend disabling these options by removing them from the configuration.
"###);
Ok(())
}
@@ -422,27 +402,15 @@ fn conflicting_options_stdin() -> Result<()> {
fs::write(
&ruff_toml,
r#"
indent-width = 2
[lint]
select = ["ALL"]
ignore = ["D203", "D212"]
[lint.isort]
lines-after-imports = 3
lines-between-types = 2
[isort]
force-single-line = true
force-wrap-aliases = true
combine-as-imports = true
lines-after-imports = 0
lines-between-types = 2
split-on-trailing-comma = true
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "single"
multiline-quotes = "single"
[format]
skip-magic-trailing-comma = true
indent-style = "tab"
"#,
)?;
@@ -457,110 +425,14 @@ def say_hy(name: str):
exit_code: 0
----- stdout -----
def say_hy(name: str):
print(f"Hy {name}")
print(f"Hy {name}")
----- stderr -----
warning: The following rules may cause conflicts when used with the formatter: `COM812`, `D206`, `ISC001`, `W191`. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
warning: The `flake8-quotes.inline-quotes="single"` option is incompatible with the formatter's `format.quote-style="double"`. We recommend disabling `Q000` and `Q003` when using the formatter, which enforces a consistent quote style. Alternatively, set both options to either `"single"` or `"double"`.
warning: The `flake8-quotes.multiline-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q001` when using the formatter, which enforces double quotes for multiline strings. Alternatively, set the `flake8-quotes.multiline-quotes` option to `"double"`.`
warning: The `flake8-quotes.multiline-quotes="single"` option is incompatible with the formatter. We recommend disabling `Q002` when using the formatter, which enforces double quotes for docstrings. Alternatively, set the `flake8-quotes.docstring-quotes` option to `"double"`.`
warning: The isort option `isort.lines-after-imports` with a value other than `-1`, `1` or `2` is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `2`, `1`, or `-1` (default).
warning: The isort option `isort.lines-between-types` with a value greater than 1 is incompatible with the formatter. To avoid unexpected behavior, we recommend setting the option to one of: `1` or `0` (default).
warning: The isort option `isort.force-wrap-aliases` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.force-wrap-aliases=false` or `format.skip-magic-trailing-comma=false`.
warning: The isort option `isort.split-on-trailing-comma` is incompatible with the formatter `format.skip-magic-trailing-comma=true` option. To avoid unexpected behavior, we recommend either setting `isort.split-on-trailing-comma=false` or `format.skip-magic-trailing-comma=false`.
warning: The following rules may cause conflicts when used with the formatter: 'COM812', 'COM819', 'D206', 'E501', 'ISC001', 'Q000', 'Q001', 'Q002', 'Q003', 'W191'. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
warning: The following isort options may cause conflicts when used with the formatter: 'isort.force-single-line', 'isort.force-wrap-aliases', 'isort.lines-after-imports', 'isort.lines_between_types'. To avoid unexpected behavior, we recommend disabling these options by removing them from the configuration.
"###);
Ok(())
}
#[test]
fn valid_linter_options() -> Result<()> {
let tempdir = TempDir::new()?;
let ruff_toml = tempdir.path().join("ruff.toml");
fs::write(
&ruff_toml,
r#"
[lint]
select = ["ALL"]
ignore = ["D203", "D212", "COM812", "ISC001"]
[lint.isort]
lines-after-imports = 2
lines-between-types = 1
force-wrap-aliases = true
combine-as-imports = true
split-on-trailing-comma = true
[lint.flake8-quotes]
inline-quotes = "single"
docstring-quotes = "double"
multiline-quotes = "double"
[format]
skip-magic-trailing-comma = false
quote-style = "single"
"#,
)?;
let test_path = tempdir.path().join("test.py");
fs::write(
&test_path,
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["format", "--no-cache", "--config"])
.arg(&ruff_toml)
.arg(test_path), @r###"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
"###);
Ok(())
}
#[test]
fn all_rules_default_options() -> Result<()> {
let tempdir = TempDir::new()?;
let ruff_toml = tempdir.path().join("ruff.toml");
fs::write(
&ruff_toml,
r#"
[lint]
select = ["ALL"]
"#,
)?;
let test_path = tempdir.path().join("test.py");
fs::write(
&test_path,
r#"
def say_hy(name: str):
print(f"Hy {name}")"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["format", "--no-cache", "--config"])
.arg(&ruff_toml)
.arg(test_path), @r###"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
warning: `one-blank-line-before-class` (D203) and `no-blank-line-before-class` (D211) are incompatible. Ignoring `one-blank-line-before-class`.
warning: `multi-line-summary-first-line` (D212) and `multi-line-summary-second-line` (D213) are incompatible. Ignoring `multi-line-summary-second-line`.
warning: The following rules may cause conflicts when used with the formatter: `COM812`, `ISC001`. To avoid unexpected behavior, we recommend disabling these rules, either by removing them from the `select` or `extend-select` configuration, or adding then to the `ignore` configuration.
"###);
Ok(())
}
#[test]
fn test_diff() {
let args = ["format", "--no-cache", "--isolated", "--diff"];
@@ -580,8 +452,8 @@ fn test_diff() {
success: false
exit_code: 1
----- stdout -----
--- resources/test/fixtures/unformatted.ipynb:cell 1
+++ resources/test/fixtures/unformatted.ipynb:cell 1
--- resources/test/fixtures/unformatted.ipynb
+++ resources/test/fixtures/unformatted.ipynb
@@ -1,3 +1,4 @@
import numpy
-maths = (numpy.arange(100)**2).sum()
@@ -589,30 +461,6 @@ fn test_diff() {
+
+maths = (numpy.arange(100) ** 2).sum()
+stats = numpy.asarray([1, 2, 3, 4]).median()
--- resources/test/fixtures/unformatted.ipynb:cell 3
+++ resources/test/fixtures/unformatted.ipynb:cell 3
@@ -1,4 +1,6 @@
# A cell with IPython escape command
def some_function(foo, bar):
pass
+
+
%matplotlib inline
--- resources/test/fixtures/unformatted.ipynb:cell 4
+++ resources/test/fixtures/unformatted.ipynb:cell 4
@@ -1,5 +1,10 @@
foo = %pwd
-def some_function(foo,bar,):
+
+
+def some_function(
+ foo,
+ bar,
+):
# Another cell with IPython escape command
foo = %pwd
print(foo)
--- resources/test/fixtures/unformatted.py
+++ resources/test/fixtures/unformatted.py
@@ -1,3 +1,3 @@
@@ -621,7 +469,6 @@ fn test_diff() {
+y = 2
z = 3
----- stderr -----
2 files would be reformatted, 1 file left unchanged
"###);
@@ -651,7 +498,6 @@ fn test_diff_no_change() {
+y = 2
z = 3
----- stderr -----
1 file would be reformatted
"###
@@ -685,7 +531,6 @@ fn test_diff_stdin_unformatted() {
+y = 2
z = 3
----- stderr -----
"###);
}

View File

@@ -33,7 +33,7 @@ use ruff_formatter::{FormatError, LineWidth, PrintError};
use ruff_linter::logging::LogLevel;
use ruff_linter::settings::types::{FilePattern, FilePatternSet};
use ruff_python_formatter::{
format_module_source, FormatModuleError, MagicTrailingComma, PreviewMode, PyFormatOptions,
format_module_source, FormatModuleError, MagicTrailingComma, PyFormatOptions,
};
use ruff_workspace::resolver::{python_files_in_path, PyprojectConfig, ResolvedFile, Resolver};
@@ -871,7 +871,9 @@ struct BlackOptions {
line_length: NonZeroU16,
#[serde(alias = "skip-magic-trailing-comma")]
skip_magic_trailing_comma: bool,
preview: bool,
#[allow(unused)]
#[serde(alias = "force-exclude")]
force_exclude: Option<String>,
}
impl Default for BlackOptions {
@@ -879,7 +881,7 @@ impl Default for BlackOptions {
Self {
line_length: NonZeroU16::new(88).unwrap(),
skip_magic_trailing_comma: false,
preview: false,
force_exclude: None,
}
}
}
@@ -927,11 +929,6 @@ impl BlackOptions {
} else {
MagicTrailingComma::Respect
})
.with_preview(if self.preview {
PreviewMode::Enabled
} else {
PreviewMode::Disabled
})
}
}

View File

@@ -22,16 +22,14 @@ fn generate_table(table_out: &mut String, rules: impl IntoIterator<Item = Rule>,
for rule in rules {
let fix_token = match rule.fixable() {
FixAvailability::Always | FixAvailability::Sometimes => {
format!("<span title='Automatic fix available'>{FIX_SYMBOL}</span>")
}
FixAvailability::None => {
format!("<span style='opacity: 0.1' aria-hidden='true'>{FIX_SYMBOL}</span>")
format!("<span style='opacity: 1'>{FIX_SYMBOL}</span>")
}
FixAvailability::None => format!("<span style='opacity: 0.1'>{FIX_SYMBOL}</span>"),
};
let preview_token = if rule.is_preview() || rule.is_nursery() {
format!("<span title='Rule is in preview'>{PREVIEW_SYMBOL}</span>")
format!("<span style='opacity: 1'>{PREVIEW_SYMBOL}</span>")
} else {
format!("<span style='opacity: 0.1' aria-hidden='true'>{PREVIEW_SYMBOL}</span>")
format!("<span style='opacity: 0.1'>{PREVIEW_SYMBOL}</span>")
};
let status_token = format!("{fix_token} {preview_token}");

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_linter"
version = "0.1.3"
version = "0.1.2"
publish = false
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -79,6 +79,3 @@ from ZeroDivisionError
raise IndexError() from ZeroDivisionError
raise IndexError();
# RSE102
raise Foo()

View File

@@ -49,13 +49,6 @@ class Apples:
def __doc__(self):
return "Docstring"
# Added in Python 3.12
def __buffer__(self):
return memoryview(b'')
def __release_buffer__(self, buf):
pass
# Allow dunder methods recommended by attrs.
def __attrs_post_init__(self):
pass

View File

@@ -131,7 +131,7 @@ pub(crate) fn check_noqa(
|| settings
.external
.iter()
.any(|external| code.starts_with(external))
.any(|pattern| pattern.matches(code))
{
valid_codes.push(code);
} else {

View File

@@ -110,27 +110,18 @@ pub(crate) fn implicit(
{
let (a_range, b_range) = match (a_tok, b_tok) {
(Tok::String { .. }, Tok::String { .. }) => (*a_range, *b_range),
(Tok::String { .. }, Tok::FStringStart) => {
match indexer.fstring_ranges().innermost(b_range.start()) {
Some(b_range) => (*a_range, b_range),
None => continue,
}
}
(Tok::FStringEnd, Tok::String { .. }) => {
match indexer.fstring_ranges().innermost(a_range.start()) {
Some(a_range) => (a_range, *b_range),
None => continue,
}
}
(Tok::FStringEnd, Tok::FStringStart) => {
match (
indexer.fstring_ranges().innermost(a_range.start()),
indexer.fstring_ranges().innermost(b_range.start()),
) {
(Some(a_range), Some(b_range)) => (a_range, b_range),
_ => continue,
}
}
(Tok::String { .. }, Tok::FStringStart) => (
*a_range,
indexer.fstring_ranges().innermost(b_range.start()).unwrap(),
),
(Tok::FStringEnd, Tok::String { .. }) => (
indexer.fstring_ranges().innermost(a_range.start()).unwrap(),
*b_range,
),
(Tok::FStringEnd, Tok::FStringStart) => (
indexer.fstring_ranges().innermost(a_range.start()).unwrap(),
indexer.fstring_ranges().innermost(b_range.start()).unwrap(),
),
_ => continue,
};

View File

@@ -27,13 +27,6 @@ use crate::settings::LinterSettings;
/// ```python
/// foo = "bar's"
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter automatically removes unnecessary escapes, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[violation]
pub struct AvoidableEscapedQuote;

View File

@@ -32,13 +32,6 @@ use super::super::settings::Quote;
///
/// ## Options
/// - `flake8-quotes.inline-quotes`
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent quotes for inline strings, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[violation]
pub struct BadQuotesInlineString {
preferred_quote: Quote,
@@ -88,13 +81,6 @@ impl AlwaysFixableViolation for BadQuotesInlineString {
///
/// ## Options
/// - `flake8-quotes.multiline-quotes`
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces double quotes for multiline strings, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[violation]
pub struct BadQuotesMultilineString {
preferred_quote: Quote,
@@ -143,13 +129,6 @@ impl AlwaysFixableViolation for BadQuotesMultilineString {
///
/// ## Options
/// - `flake8-quotes.docstring-quotes`
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces double quotes for docstrings, making the rule
/// redundant.
///
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[violation]
pub struct BadQuotesDocstring {
preferred_quote: Quote,

View File

@@ -1,7 +1,6 @@
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::BindingKind;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
@@ -16,16 +15,6 @@ use crate::checkers::ast::Checker;
///
/// Removing the parentheses makes the code more concise.
///
/// ## Known problems
/// Parentheses can only be omitted if the exception is a class, as opposed to
/// a function call. This rule isn't always capable of distinguishing between
/// the two.
///
/// For example, if you import a function `module.get_exception` from another
/// module, and `module.get_exception` returns an exception object, this rule will
/// incorrectly mark the parentheses in `raise module.get_exception()` as
/// unnecessary.
///
/// ## Example
/// ```python
/// raise TypeError()
@@ -65,32 +54,25 @@ pub(crate) fn unnecessary_paren_on_raise_exception(checker: &mut Checker, expr:
if arguments.is_empty() {
// `raise func()` still requires parentheses; only `raise Class()` does not.
let exception_type = if let Some(id) = checker.semantic().lookup_attribute(func) {
match checker.semantic().binding(id).kind {
BindingKind::FunctionDefinition(_) => return,
BindingKind::ClassDefinition(_) => Some(ExceptionType::Class),
BindingKind::Builtin => Some(ExceptionType::Builtin),
_ => None,
}
} else {
None
};
if checker
.semantic()
.lookup_attribute(func)
.is_some_and(|id| checker.semantic().binding(id).kind.is_function_definition())
{
return;
}
// `ctypes.WinError()` is a function, not a class. It's part of the standard library, so
// we might as well get it right.
if exception_type
.as_ref()
.is_some_and(ExceptionType::is_builtin)
&& checker
.semantic()
.resolve_call_path(func)
.is_some_and(|call_path| matches!(call_path.as_slice(), ["ctypes", "WinError"]))
if checker
.semantic()
.resolve_call_path(func)
.is_some_and(|call_path| matches!(call_path.as_slice(), ["ctypes", "WinError"]))
{
return;
}
let mut diagnostic = Diagnostic::new(UnnecessaryParenOnRaiseException, arguments.range());
// If the arguments are immediately followed by a `from`, insert whitespace to avoid
// a syntax error, as in:
// ```python
@@ -103,25 +85,13 @@ pub(crate) fn unnecessary_paren_on_raise_exception(checker: &mut Checker, expr:
.next()
.is_some_and(char::is_alphanumeric)
{
diagnostic.set_fix(if exception_type.is_some() {
Fix::safe_edit(Edit::range_replacement(" ".to_string(), arguments.range()))
} else {
Fix::unsafe_edit(Edit::range_replacement(" ".to_string(), arguments.range()))
});
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
" ".to_string(),
arguments.range(),
)));
} else {
diagnostic.set_fix(if exception_type.is_some() {
Fix::safe_edit(Edit::range_deletion(arguments.range()))
} else {
Fix::unsafe_edit(Edit::range_deletion(arguments.range()))
});
diagnostic.set_fix(Fix::safe_edit(Edit::range_deletion(arguments.range())));
}
checker.diagnostics.push(diagnostic);
}
}
#[derive(Debug, is_macro::Is)]
enum ExceptionType {
Class,
Builtin,
}

View File

@@ -238,7 +238,6 @@ RSE102.py:79:17: RSE102 [*] Unnecessary parentheses on raised exception
79 |+raise IndexError from ZeroDivisionError
80 80 |
81 81 | raise IndexError();
82 82 |
RSE102.py:81:17: RSE102 [*] Unnecessary parentheses on raised exception
|
@@ -246,8 +245,6 @@ RSE102.py:81:17: RSE102 [*] Unnecessary parentheses on raised exception
80 |
81 | raise IndexError();
| ^^ RSE102
82 |
83 | # RSE102
|
= help: Remove unnecessary parentheses
@@ -257,23 +254,5 @@ RSE102.py:81:17: RSE102 [*] Unnecessary parentheses on raised exception
80 80 |
81 |-raise IndexError();
81 |+raise IndexError;
82 82 |
83 83 | # RSE102
84 84 | raise Foo()
RSE102.py:84:10: RSE102 [*] Unnecessary parentheses on raised exception
|
83 | # RSE102
84 | raise Foo()
| ^^ RSE102
|
= help: Remove unnecessary parentheses
Suggested fix
81 81 | raise IndexError();
82 82 |
83 83 | # RSE102
84 |-raise Foo()
84 |+raise Foo

View File

@@ -120,7 +120,7 @@ pub(crate) fn organize_imports(
block,
comments,
locator,
settings.line_length,
settings.pycodestyle.max_line_length,
LineWidthBuilder::new(settings.tab_size).add_str(indentation),
stylist,
&settings.src,

View File

@@ -5,7 +5,7 @@ use ruff_macros::{derive_message_formats, violation};
use ruff_python_index::Indexer;
use ruff_python_parser::Tok;
use ruff_source_file::Locator;
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::fix::edits::pad_start;
@@ -25,51 +25,23 @@ use crate::fix::edits::pad_start;
/// regex = r"\.png$"
/// ```
///
/// Or, if the string already contains a valid escape sequence:
/// ```python
/// value = "new line\nand invalid escape \_ here"
/// ```
///
/// Use instead:
/// ```python
/// value = "new line\nand invalid escape \\_ here"
/// ```
///
/// ## References
/// - [Python documentation: String and Bytes literals](https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals)
#[violation]
pub struct InvalidEscapeSequence {
ch: char,
fix_title: FixTitle,
}
pub struct InvalidEscapeSequence(char);
impl AlwaysFixableViolation for InvalidEscapeSequence {
#[derive_message_formats]
fn message(&self) -> String {
let InvalidEscapeSequence { ch, .. } = self;
format!("Invalid escape sequence: `\\{ch}`")
let InvalidEscapeSequence(char) = self;
format!("Invalid escape sequence: `\\{char}`")
}
fn fix_title(&self) -> String {
match self.fix_title {
FixTitle::AddBackslash => format!("Add backslash to escape sequence"),
FixTitle::UseRawStringLiteral => format!("Use a raw string literal"),
}
"Add backslash to escape sequence".to_string()
}
}
#[derive(Debug, PartialEq, Eq)]
enum FixTitle {
AddBackslash,
UseRawStringLiteral,
}
#[derive(Debug)]
struct InvalidEscapeChar {
ch: char,
range: TextRange,
}
/// W605
pub(crate) fn invalid_escape_sequence(
diagnostics: &mut Vec<Diagnostic>,
@@ -78,27 +50,24 @@ pub(crate) fn invalid_escape_sequence(
token: &Tok,
token_range: TextRange,
) {
let (token_source_code, string_start_location) = match token {
let token_source_code = match token {
Tok::FStringMiddle { value, is_raw } => {
if *is_raw {
return;
}
let Some(range) = indexer.fstring_ranges().innermost(token_range.start()) else {
return;
};
(value.as_str(), range.start())
value.as_str()
}
Tok::String { kind, .. } => {
if kind.is_raw() {
return;
}
(locator.slice(token_range), token_range.start())
locator.slice(token_range)
}
_ => return,
};
let mut contains_valid_escape_sequence = false;
let mut invalid_escape_chars = Vec::new();
let mut invalid_escape_sequence = Vec::new();
let mut prev = None;
let bytes = token_source_code.as_bytes();
@@ -185,49 +154,38 @@ pub(crate) fn invalid_escape_sequence(
let location = token_range.start() + TextSize::try_from(i).unwrap();
let range = TextRange::at(location, next_char.text_len() + TextSize::from(1));
invalid_escape_chars.push(InvalidEscapeChar {
ch: next_char,
range,
});
invalid_escape_sequence.push(Diagnostic::new(InvalidEscapeSequence(next_char), range));
}
let mut invalid_escape_sequence = Vec::new();
if contains_valid_escape_sequence {
// Escape with backslash.
for invalid_escape_char in &invalid_escape_chars {
let diagnostic = Diagnostic::new(
InvalidEscapeSequence {
ch: invalid_escape_char.ch,
fix_title: FixTitle::AddBackslash,
},
invalid_escape_char.range,
)
.with_fix(Fix::safe_edit(Edit::insertion(
for diagnostic in &mut invalid_escape_sequence {
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
r"\".to_string(),
invalid_escape_char.range.start() + TextSize::from(1),
diagnostic.start() + TextSize::from(1),
)));
invalid_escape_sequence.push(diagnostic);
}
} else {
let tok_start = if token.is_f_string_middle() {
// SAFETY: If this is a `FStringMiddle` token, then the indexer
// must have the f-string range.
indexer
.fstring_ranges()
.innermost(token_range.start())
.unwrap()
.start()
} else {
token_range.start()
};
// Turn into raw string.
for invalid_escape_char in &invalid_escape_chars {
let diagnostic = Diagnostic::new(
InvalidEscapeSequence {
ch: invalid_escape_char.ch,
fix_title: FixTitle::UseRawStringLiteral,
},
invalid_escape_char.range,
)
.with_fix(
// If necessary, add a space between any leading keyword (`return`, `yield`,
// `assert`, etc.) and the string. For example, `return"foo"` is valid, but
// `returnr"foo"` is not.
Fix::safe_edit(Edit::insertion(
pad_start("r".to_string(), string_start_location, locator),
string_start_location,
)),
);
invalid_escape_sequence.push(diagnostic);
for diagnostic in &mut invalid_escape_sequence {
// If necessary, add a space between any leading keyword (`return`, `yield`,
// `assert`, etc.) and the string. For example, `return"foo"` is valid, but
// `returnr"foo"` is not.
diagnostic.set_fix(Fix::safe_edit(Edit::insertion(
pad_start("r".to_string(), tok_start, locator),
tok_start,
)));
}
}

View File

@@ -23,16 +23,7 @@ use super::LogicalLine;
/// a = 1
/// ```
///
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `indent-width` with a value other than `4`.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[violation]
pub struct IndentationWithInvalidMultiple {
indent_size: usize,
@@ -64,15 +55,7 @@ impl Violation for IndentationWithInvalidMultiple {
/// # a = 1
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `indent-width` with a value other than `4`.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#indentation
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[violation]
pub struct IndentationWithInvalidMultipleComment {
indent_size: usize,

View File

@@ -26,15 +26,7 @@ use ruff_text_size::{TextLen, TextRange, TextSize};
/// a = 1
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `format.indent-style="tab"`.
///
/// [PEP 8]: https://peps.python.org/pep-0008/#tabs-or-spaces
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[violation]
pub struct TabIndentation;

View File

@@ -9,7 +9,7 @@ W605_0.py:2:10: W605 [*] Invalid escape sequence: `\.`
3 |
4 | #: W605:2:1
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
1 1 | #: W605:1:10
@@ -27,7 +27,7 @@ W605_0.py:6:1: W605 [*] Invalid escape sequence: `\.`
| ^^ W605
7 | '''
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
2 2 | regex = '\.png$'
@@ -47,7 +47,7 @@ W605_0.py:11:6: W605 [*] Invalid escape sequence: `\_`
| ^^ W605
12 | )
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
8 8 |
@@ -68,7 +68,7 @@ W605_0.py:18:6: W605 [*] Invalid escape sequence: `\_`
19 | in the middle
20 | """
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
12 12 | )
@@ -107,7 +107,7 @@ W605_0.py:28:12: W605 [*] Invalid escape sequence: `\.`
29 |
30 | #: Okay
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
25 25 |

View File

@@ -9,7 +9,7 @@ W605_1.py:2:10: W605 [*] Invalid escape sequence: `\.`
3 |
4 | #: W605:2:1
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
1 1 | #: W605:1:10
@@ -27,7 +27,7 @@ W605_1.py:6:1: W605 [*] Invalid escape sequence: `\.`
| ^^ W605
7 | '''
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
2 2 | regex = '\.png$'
@@ -47,7 +47,7 @@ W605_1.py:11:6: W605 [*] Invalid escape sequence: `\_`
| ^^ W605
12 | )
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
8 8 |
@@ -68,7 +68,7 @@ W605_1.py:18:6: W605 [*] Invalid escape sequence: `\_`
19 | in the middle
20 | """
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
12 12 | )
@@ -89,7 +89,7 @@ W605_1.py:25:12: W605 [*] Invalid escape sequence: `\.`
26 |
27 | #: Okay
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
22 22 |

View File

@@ -1,5 +1,5 @@
---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
source: crates/ruff/src/rules/pycodestyle/mod.rs
---
W605_2.py:4:11: W605 [*] Invalid escape sequence: `\.`
|
@@ -9,7 +9,7 @@ W605_2.py:4:11: W605 [*] Invalid escape sequence: `\.`
5 |
6 | #: W605:2:1
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
1 1 | # Same as `W605_0.py` but using f-strings instead.
@@ -29,7 +29,7 @@ W605_2.py:8:1: W605 [*] Invalid escape sequence: `\.`
| ^^ W605
9 | '''
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
4 4 | regex = f'\.png$'
@@ -49,7 +49,7 @@ W605_2.py:13:7: W605 [*] Invalid escape sequence: `\_`
| ^^ W605
14 | )
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
10 10 |
@@ -70,7 +70,7 @@ W605_2.py:20:6: W605 [*] Invalid escape sequence: `\_`
21 | in the middle
22 | """
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
14 14 | )
@@ -129,7 +129,7 @@ W605_2.py:44:11: W605 [*] Invalid escape sequence: `\{`
45 | value = f'\{1}'
46 | value = f'{1:\}'
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
41 41 | ''' # noqa
@@ -150,7 +150,7 @@ W605_2.py:45:11: W605 [*] Invalid escape sequence: `\{`
46 | value = f'{1:\}'
47 | value = f"{f"\{1}"}"
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
42 42 |
@@ -171,7 +171,7 @@ W605_2.py:46:14: W605 [*] Invalid escape sequence: `\}`
47 | value = f"{f"\{1}"}"
48 | value = rf"{f"\{1}"}"
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
43 43 | regex = f'\\\_'
@@ -191,7 +191,7 @@ W605_2.py:47:14: W605 [*] Invalid escape sequence: `\{`
| ^^ W605
48 | value = rf"{f"\{1}"}"
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
44 44 | value = f'\{{1}}'
@@ -212,7 +212,7 @@ W605_2.py:48:15: W605 [*] Invalid escape sequence: `\{`
49 |
50 | # Okay
|
= help: Use a raw string literal
= help: Add backslash to escape sequence
Fix
45 45 | value = f'\{1}'

View File

@@ -14,7 +14,9 @@ use crate::registry::Rule;
/// Checks for docstrings that are indented with tabs.
///
/// ## Why is this bad?
/// [PEP 8] recommends using spaces over tabs for indentation.
/// [PEP 8](https://peps.python.org/pep-0008/#tabs-or-spaces) recommends using
/// spaces over tabs for indentation.
///
///
/// ## Example
/// ```python
@@ -36,20 +38,10 @@ use crate::registry::Rule;
/// """
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// The rule is also incompatible with the [formatter] when using
/// `format.indent-style="tab"`.
///
/// ## References
/// - [PEP 257 Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 8]: https://peps.python.org/pep-0008/#tabs-or-spaces
/// [formatter]: https://docs.astral.sh/ruff/formatter
#[violation]
pub struct IndentWithSpaces;
@@ -134,17 +126,12 @@ impl AlwaysFixableViolation for UnderIndentation {
/// """
/// ```
///
/// ## Formatter compatibility
/// We recommend against using this rule alongside the [formatter]. The
/// formatter enforces consistent indentation, making the rule redundant.
///
/// ## References
/// - [PEP 257 Docstring Conventions](https://peps.python.org/pep-0257/)
/// - [NumPy Style Guide](https://numpydoc.readthedocs.io/en/latest/format.html)
/// - [Google Python Style Guide - Docstrings](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings)
///
/// [PEP 257]: https://peps.python.org/pep-0257/
/// [formatter]:https://docs.astral.sh/ruff/formatter/
#[violation]
pub struct OverIndentation;

View File

@@ -88,7 +88,6 @@ fn is_known_dunder_method(method: &str) -> bool {
| "__attrs_pre_init__"
| "__await__"
| "__bool__"
| "__buffer__"
| "__bytes__"
| "__call__"
| "__ceil__"
@@ -167,7 +166,6 @@ fn is_known_dunder_method(method: &str) -> bool {
| "__rdivmod__"
| "__reduce__"
| "__reduce_ex__"
| "__release_buffer__"
| "__repr__"
| "__reversed__"
| "__rfloordiv__"

View File

@@ -15,7 +15,7 @@ use crate::{checkers::ast::Checker, rules::flake8_unused_arguments::helpers};
///
/// ## Why is this bad?
/// Unused `self` parameters are usually a sign of a method that could be
/// replaced by a function, class method, or static method.
/// replaced by a function or a static method.
///
/// ## Example
/// ```python
@@ -26,8 +26,10 @@ use crate::{checkers::ast::Checker, rules::flake8_unused_arguments::helpers};
///
/// Use instead:
/// ```python
/// def greeting():
/// print("Greetings friend!")
/// class Person:
/// @staticmethod
/// def greeting():
/// print("Greetings friend!")
/// ```
#[violation]
pub struct NoSelfUse {
@@ -38,7 +40,7 @@ impl Violation for NoSelfUse {
#[derive_message_formats]
fn message(&self) -> String {
let NoSelfUse { method_name } = self;
format!("Method `{method_name}` could be a function, class method, or static method")
format!("Method `{method_name}` could be a function or static method")
}
}

View File

@@ -1,7 +1,7 @@
---
source: crates/ruff_linter/src/rules/pylint/mod.rs
---
no_self_use.py:7:28: PLR6301 Method `developer_greeting` could be a function, class method, or static method
no_self_use.py:7:28: PLR6301 Method `developer_greeting` could be a function or static method
|
6 | class Person:
7 | def developer_greeting(self, name): # [no-self-use]
@@ -9,7 +9,7 @@ no_self_use.py:7:28: PLR6301 Method `developer_greeting` could be a function, cl
8 | print(f"Greetings {name}!")
|
no_self_use.py:10:20: PLR6301 Method `greeting_1` could be a function, class method, or static method
no_self_use.py:10:20: PLR6301 Method `greeting_1` could be a function or static method
|
8 | print(f"Greetings {name}!")
9 |
@@ -18,7 +18,7 @@ no_self_use.py:10:20: PLR6301 Method `greeting_1` could be a function, class met
11 | print("Hello!")
|
no_self_use.py:13:20: PLR6301 Method `greeting_2` could be a function, class method, or static method
no_self_use.py:13:20: PLR6301 Method `greeting_2` could be a function or static method
|
11 | print("Hello!")
12 |

View File

@@ -17,7 +17,7 @@ mod tests {
use crate::pyproject_toml::lint_pyproject_toml;
use crate::registry::Rule;
use crate::settings::resolve_per_file_ignores;
use crate::settings::types::{PerFileIgnore, PythonVersion};
use crate::settings::types::{CodePattern, PerFileIgnore, PythonVersion};
use crate::test::{test_path, test_resource_path};
use crate::{assert_messages, settings};
@@ -106,7 +106,7 @@ mod tests {
let diagnostics = test_path(
Path::new("ruff/RUF100_0.py"),
&settings::LinterSettings {
external: vec!["V101".to_string()],
external: vec![CodePattern::new("V101")?],
..settings::LinterSettings::for_rules(vec![
Rule::UnusedNOQA,
Rule::LineTooLong,
@@ -121,11 +121,11 @@ mod tests {
}
#[test]
fn ruf100_0_prefix() -> Result<()> {
fn ruf100_0_glob() -> Result<()> {
let diagnostics = test_path(
Path::new("ruff/RUF100_0.py"),
&settings::LinterSettings {
external: vec!["V".to_string()],
external: vec![CodePattern::new("V*")?],
..settings::LinterSettings::for_rules(vec![
Rule::UnusedNOQA,
Rule::LineTooLong,

View File

@@ -35,11 +35,8 @@ pub struct UnusedCodes {
/// foo.bar()
/// ```
///
/// ## Options
/// - `external`
///
/// ## References
/// - [Ruff error suppression](https://docs.astral.sh/ruff/linter/#error-suppression)
/// - [Automatic `noqa` management](https://docs.astral.sh/ruff/configuration/#automatic-noqa-management)
#[violation]
pub struct UnusedNOQA {
pub codes: Option<UnusedCodes>,

View File

@@ -14,7 +14,6 @@ use rustc_hash::FxHashSet;
use crate::codes::RuleCodePrefix;
use ruff_macros::CacheKey;
use crate::line_width::LineLength;
use crate::registry::{Linter, Rule, RuleSet};
use crate::rules::{
flake8_annotations, flake8_bandit, flake8_bugbear, flake8_builtins, flake8_comprehensions,
@@ -29,7 +28,7 @@ use crate::{codes, RuleSelector};
use super::line_width::IndentWidth;
use self::rule_table::RuleTable;
use self::types::PreviewMode;
use self::types::{CodePattern, PreviewMode};
use crate::rule_selector::PreviewOptions;
pub mod flags;
@@ -54,13 +53,12 @@ pub struct LinterSettings {
pub allowed_confusables: FxHashSet<char>,
pub builtins: Vec<String>,
pub dummy_variable_rgx: Regex,
pub external: Vec<String>,
pub external: Vec<CodePattern>,
pub ignore_init_module_imports: bool,
pub logger_objects: Vec<String>,
pub namespace_packages: Vec<PathBuf>,
pub src: Vec<PathBuf>,
pub tab_size: IndentWidth,
pub line_length: LineLength,
pub task_tags: Vec<String>,
pub typing_modules: Vec<String>,
@@ -157,7 +155,6 @@ impl LinterSettings {
src: vec![path_dedot::CWD.clone()],
// Needs duplicating
tab_size: IndentWidth::default(),
line_length: LineLength::default(),
task_tags: TASK_TAGS.iter().map(ToString::to_string).collect(),
typing_modules: vec![],

View File

@@ -338,9 +338,18 @@ impl Deref for Version {
///
/// [`glob::Pattern`] matches a little differently than we ideally want to.
/// Specifically it uses `**` to match an arbitrary number of subdirectories,
/// luckily this not relevant since identifiers don't contains slashes.
/// however this not relevant since identifiers don't contains slashes.
///
/// For reference pep8-naming uses
/// [`fnmatch`](https://docs.python.org/3/library/fnmatch.html) for
/// pattern matching.
pub type IdentifierPattern = glob::Pattern;
/// Pattern to match a rule code.
///
/// # Notes
///
/// [`glob::Pattern`] matches a little differently than we ideally want to.
/// Specifically it uses `**` to match an arbitrary number of subdirectories,
/// however this not relevant since identifiers don't contains slashes.
pub type CodePattern = glob::Pattern;

View File

@@ -2,7 +2,7 @@ use std::io;
use std::io::Write;
use std::path::Path;
use anyhow::Result;
use anyhow::{bail, Result};
use similar::TextDiff;
use thiserror::Error;
@@ -88,12 +88,7 @@ impl SourceKind {
}
/// Write a diff of the transformed source file to `stdout`.
pub fn diff(
&self,
other: &Self,
path: Option<&Path>,
writer: &mut dyn Write,
) -> io::Result<()> {
pub fn diff(&self, other: &Self, path: Option<&Path>, writer: &mut dyn Write) -> Result<()> {
match (self, other) {
(SourceKind::Python(src), SourceKind::Python(dst)) => {
let text_diff = TextDiff::from_lines(src, dst);
@@ -159,7 +154,7 @@ impl SourceKind {
Ok(())
}
_ => panic!("cannot diff Python source code with Jupyter notebook source code"),
_ => bail!("cannot diff Python source code with Jupyter notebook source code"),
}
}
}

View File

@@ -537,6 +537,28 @@ def update_emission_strength():
value = self.emission_strength * 2
```
#### Type annotations may be parenthesized when expanded ([#7315](https://github.com/astral-sh/ruff/issues/7315))
Black will avoid parenthesizing type annotations in an annotated assignment, while Ruff will insert
parentheses in some cases.
For example:
```python
# Black
StartElementHandler: Callable[[str, dict[str, str]], Any] | Callable[[str, list[str]], Any] | Callable[
[str, dict[str, str], list[str]], Any
] | None
# Ruff
StartElementHandler: (
Callable[[str, dict[str, str]], Any]
| Callable[[str, list[str]], Any]
| Callable[[str, dict[str, str], list[str]], Any]
| None
)
```
#### Call chain calls break differently ([#7051](https://github.com/astral-sh/ruff/issues/7051))
Black occasionally breaks call chains differently than Ruff; in particular, Black occasionally

View File

@@ -1,4 +1,3 @@
# flags: --pyi
from typing import Union
@bird

View File

@@ -1,5 +1,4 @@
''''''
'\''
'"'
"'"

View File

@@ -1,16 +0,0 @@
# flags: --preview --skip-string-normalization
class C:
r"""Raw"""
def f():
r"""Raw"""
class SingleQuotes:
r'''Raw'''
class UpperCaseR:
R"""Raw"""

View File

@@ -1,14 +0,0 @@
class C:
r"""Raw"""
def f():
r"""Raw"""
class SingleQuotes:
r'''Raw'''
class UpperCaseR:
R"""Raw"""

View File

@@ -1,10 +0,0 @@
# No spacing
5 ** 5
5.0 ** 5.0
1e5 ** 2e5
True ** True
False ** False
None ** None
# Space
"a" ** "b"

View File

@@ -180,16 +180,3 @@ if "root" not in (
):
msg = "Could not find root. Please try a different forest."
raise ValueError(msg)
# Regression for https://github.com/astral-sh/ruff/issues/8183
def foo():
while (
not (aaaaaaaaaaaaaaaaaaaaa(bbbbbbbb, ccccccc)) and dddddddddd < eeeeeeeeeeeeeee
):
pass
def foo():
while (
not (aaaaaaaaaaaaaaaaaaaaa[bbbbbbbb, ccccccc]) and dddddddddd < eeeeeeeeeeeeeee
):
pass

View File

@@ -1,8 +0,0 @@
# Regression test for https://github.com/astral-sh/ruff/issues/8211
# fmt: off
from dataclasses import dataclass
if True:
if False:
x: int # Optional[int]

View File

@@ -1,8 +0,0 @@
# Regression test for https://github.com/astral-sh/ruff/issues/8211
# fmt: off
from dataclasses import dataclass
@dataclass
class A:
x: int # Optional[int]

View File

@@ -1,21 +1,6 @@
# Regression test: Don't forget the parentheses in the value when breaking
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: int = a + 1 * a
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = (
Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb()
)
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: (
Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
)= Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb()
JSONSerializable: TypeAlias = (
"str | int | float | bool | None | list | tuple | JSONMapping"
)
JSONSerializable: str | int | float | bool | None | list | tuple | JSONMapping = {1, 2, 3, 4}
JSONSerializable: str | int | float | bool | None | list | tuple | JSONMapping = aaaaaaaaaaaaaaaa
# Regression test: Don't forget the parentheses in the annotation when breaking
class DefaultRunner:

View File

@@ -1,39 +1,7 @@
"""
Black's `Preview.module_docstring_newlines`
"""
first_stmt_after_module_level_docstring = 1
# Below is black stable style
# In preview style, black always breaks the right side first
class CachedRepository:
# Black's `Preview.dummy_implementations`
def get_release_info(self): ...
def raw_docstring():
r"""Black's `Preview.accept_raw_docstrings`
a
b
"""
pass
def reference_docstring_newlines():
"""A regular docstring for comparison
a
b
"""
pass
class RemoveNewlineBeforeClassDocstring:
"""Black's `Preview.no_blank_line_before_class_docstring`"""
def f():
"""Black's `Preview.prefer_splitting_right_hand_side_of_assignments`"""
if True:
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc.cccccccc

View File

@@ -1,5 +1,3 @@
# comment
class Test(
Aaaaaaaaaaaaaaaaa,
Bbbbbbbbbbbbbbbb,

View File

@@ -14,35 +14,3 @@ return (
len(node.parents) for node in self.node_map.values()
)
)
# Regression tests for https://github.com/astral-sh/ruff/issues/8042
def f():
return (
self.get_filename() + ".csv" +
"text/csv" +
output.getvalue().encode("utf-8----------------"),
)
def f():
return (
self.get_filename() + ".csv" + "text/csv",
output.getvalue().encode("utf-8----------------")
)
def f():
return (
self.get_filename() + ".csv",
"text/csv",
output.getvalue().encode("utf-8----------------")
)
def f():
return self.get_filename() + ".csv" + "text/csv" + output.getvalue().encode("utf-8----------------"),
def f():
return self.get_filename() + ".csv" + "text/csv", output.getvalue().encode("utf-8----------------")
def f():
return self.get_filename() + ".csv", "text/csv", output.getvalue().encode("utf-8----------------")

View File

@@ -6,13 +6,12 @@ use anyhow::{format_err, Context, Result};
use clap::{command, Parser, ValueEnum};
use ruff_formatter::SourceCode;
use ruff_python_ast::PySourceType;
use ruff_python_index::tokens_and_ranges;
use ruff_python_parser::{parse_ok_tokens, AsMode};
use ruff_python_parser::{parse_ok_tokens, Mode};
use ruff_text_size::Ranged;
use crate::comments::collect_comments;
use crate::{format_module_ast, PreviewMode, PyFormatOptions};
use crate::{format_module_ast, PyFormatOptions};
#[derive(ValueEnum, Clone, Debug)]
pub enum Emit {
@@ -24,7 +23,6 @@ pub enum Emit {
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
#[allow(clippy::struct_excessive_bools)] // It's only the dev cli anyways
pub struct Cli {
/// Python files to format. If there are none, stdin will be used. `-` as stdin is not supported
pub files: Vec<PathBuf>,
@@ -35,27 +33,20 @@ pub struct Cli {
#[clap(long)]
pub check: bool,
#[clap(long)]
pub preview: bool,
#[clap(long)]
pub print_ir: bool,
#[clap(long)]
pub print_comments: bool,
}
pub fn format_and_debug_print(source: &str, cli: &Cli, source_path: &Path) -> Result<String> {
let source_type = PySourceType::from(source_path);
let (tokens, comment_ranges) = tokens_and_ranges(source, source_type)
pub fn format_and_debug_print(source: &str, cli: &Cli, source_type: &Path) -> Result<String> {
let (tokens, comment_ranges) = tokens_and_ranges(source)
.map_err(|err| format_err!("Source contains syntax errors {err:?}"))?;
// Parse the AST.
let module = parse_ok_tokens(tokens, source, source_type.as_mode(), "<filename>")
let module = parse_ok_tokens(tokens, source, Mode::Module, "<filename>")
.context("Syntax error in input")?;
let options = PyFormatOptions::from_extension(source_path).with_preview(if cli.preview {
PreviewMode::Enabled
} else {
PreviewMode::Disabled
});
let options = PyFormatOptions::from_extension(source_type);
let source_code = SourceCode::new(source);
let formatted = format_module_ast(&module, &comment_ranges, source, options)

View File

@@ -507,12 +507,13 @@ fn strip_comment_prefix(comment_text: &str) -> FormatResult<&str> {
///
/// For example, given:
/// ```python
/// class Class:
/// def func():
/// ...
/// # comment
/// ```
///
/// This builder will insert two empty lines before the comment.
/// ```
pub(crate) fn empty_lines_before_trailing_comments<'a>(
f: &PyFormatter,
comments: &'a [SourceComment],
@@ -554,69 +555,3 @@ impl Format<PyFormatContext<'_>> for FormatEmptyLinesBeforeTrailingComments<'_>
Ok(())
}
}
/// Format the empty lines between a node and its leading comments.
///
/// For example, given:
/// ```python
/// # comment
///
/// class Class:
/// ...
/// ```
///
/// While `leading_comments` will preserve the existing empty line, this builder will insert an
/// additional empty line before the comment.
pub(crate) fn empty_lines_after_leading_comments<'a>(
f: &PyFormatter,
comments: &'a [SourceComment],
) -> FormatEmptyLinesAfterLeadingComments<'a> {
// Black has different rules for stub vs. non-stub and top level vs. indented
let empty_lines = match (f.options().source_type(), f.context().node_level()) {
(PySourceType::Stub, NodeLevel::TopLevel) => 1,
(PySourceType::Stub, _) => 0,
(_, NodeLevel::TopLevel) => 2,
(_, _) => 1,
};
FormatEmptyLinesAfterLeadingComments {
comments,
empty_lines,
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct FormatEmptyLinesAfterLeadingComments<'a> {
/// The leading comments of the node.
comments: &'a [SourceComment],
/// The expected number of empty lines after the leading comments.
empty_lines: u32,
}
impl Format<PyFormatContext<'_>> for FormatEmptyLinesAfterLeadingComments<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext>) -> FormatResult<()> {
if let Some(comment) = self
.comments
.iter()
.rev()
.find(|comment| comment.line_position().is_own_line())
{
let actual = lines_after(comment.end(), f.context().source()).saturating_sub(1);
// If there are no empty lines, keep the comment tight to the node.
if actual == 0 {
return Ok(());
}
// If there are more than enough empty lines already, `leading_comments` will
// trim them as necessary.
if actual >= self.empty_lines {
return Ok(());
}
for _ in actual..self.empty_lines {
write!(f, [empty_line()])?;
}
}
Ok(())
}
}

View File

@@ -548,10 +548,10 @@ mod tests {
use insta::assert_debug_snapshot;
use ruff_formatter::SourceCode;
use ruff_python_ast::{Mod, PySourceType};
use ruff_python_ast::Mod;
use ruff_python_index::tokens_and_ranges;
use ruff_python_parser::{parse_ok_tokens, AsMode};
use ruff_python_parser::{parse_ok_tokens, Mode};
use ruff_python_trivia::CommentRanges;
use crate::comments::Comments;
@@ -565,10 +565,9 @@ mod tests {
impl<'a> CommentsTestCase<'a> {
fn from_code(source: &'a str) -> Self {
let source_code = SourceCode::new(source);
let source_type = PySourceType::Python;
let (tokens, comment_ranges) =
tokens_and_ranges(source, source_type).expect("Expect source to be valid Python");
let parsed = parse_ok_tokens(tokens, source, source_type.as_mode(), "test.py")
tokens_and_ranges(source).expect("Expect source to be valid Python");
let parsed = parse_ok_tokens(tokens, source, Mode::Module, "test.py")
.expect("Expect source to be valid Python");
CommentsTestCase {

View File

@@ -506,12 +506,7 @@ const fn is_simple_power_operand(expr: &Expr) -> bool {
op: UnaryOp::Not, ..
}) => false,
Expr::Constant(ExprConstant {
value:
Constant::Complex { .. }
| Constant::Float(_)
| Constant::Int(_)
| Constant::None
| Constant::Bool(_),
value: Constant::Complex { .. } | Constant::Float(_) | Constant::Int(_),
..
}) => true,
Expr::Name(_) => true,

View File

@@ -1,7 +1,6 @@
use ruff_python_ast::ExprIpyEscapeCommand;
use ruff_text_size::Ranged;
use crate::expression::parentheses::{NeedsParentheses, OptionalParentheses};
use crate::prelude::*;
#[derive(Default)]
@@ -12,13 +11,3 @@ impl FormatNodeRule<ExprIpyEscapeCommand> for FormatExprIpyEscapeCommand {
source_text_slice(item.range()).fmt(f)
}
}
impl NeedsParentheses for ExprIpyEscapeCommand {
fn needs_parentheses(
&self,
_parent: ruff_python_ast::AnyNodeRef,
_context: &PyFormatContext,
) -> OptionalParentheses {
OptionalParentheses::Never
}
}

View File

@@ -172,13 +172,10 @@ impl FormatNodeRule<ExprTuple> for FormatExprTuple {
.finish()
}
TupleParentheses::Preserve => group(&ExprSequence::new(item)).fmt(f),
TupleParentheses::NeverPreserve => {
TupleParentheses::NeverPreserve | TupleParentheses::OptionalParentheses => {
optional_parentheses(&ExprSequence::new(item)).fmt(f)
}
TupleParentheses::OptionalParentheses if item.elts.len() == 2 => {
optional_parentheses(&ExprSequence::new(item)).fmt(f)
}
TupleParentheses::Default | TupleParentheses::OptionalParentheses => {
TupleParentheses::Default => {
parenthesize_if_expands(&ExprSequence::new(item)).fmt(f)
}
},

View File

@@ -476,7 +476,7 @@ impl NeedsParentheses for Expr {
Expr::List(expr) => expr.needs_parentheses(parent, context),
Expr::Tuple(expr) => expr.needs_parentheses(parent, context),
Expr::Slice(expr) => expr.needs_parentheses(parent, context),
Expr::IpyEscapeCommand(expr) => expr.needs_parentheses(parent, context),
Expr::IpyEscapeCommand(_) => todo!(),
}
}
}
@@ -526,20 +526,16 @@ fn can_omit_optional_parentheses(expr: &Expr, context: &PyFormatContext) -> bool
&& has_parentheses(expr, context).is_some_and(OwnParentheses::is_non_empty)
}
// Only use the layout if the first expression starts with parentheses
// or the last expression ends with parentheses of some sort, and
// Only use the layout if the first or last expression has parentheses of some sort, and
// those parentheses are non-empty.
if visitor
let first_parenthesized = visitor
.first
.is_some_and(|first| is_parenthesized(first, context));
let last_parenthesized = visitor
.last
.is_some_and(|last| is_parenthesized(last, context))
{
true
} else {
visitor
.first
.expression()
.is_some_and(|first| is_parenthesized(first, context))
}
.is_some_and(|last| is_parenthesized(last, context));
first_parenthesized || last_parenthesized
}
}
@@ -549,7 +545,7 @@ struct CanOmitOptionalParenthesesVisitor<'input> {
max_precedence_count: u32,
any_parenthesized_expressions: bool,
last: Option<&'input Expr>,
first: First<'input>,
first: Option<&'input Expr>,
context: &'input PyFormatContext<'input>,
}
@@ -561,7 +557,7 @@ impl<'input> CanOmitOptionalParenthesesVisitor<'input> {
max_precedence_count: 0,
any_parenthesized_expressions: false,
last: None,
first: First::None,
first: None,
}
}
@@ -674,7 +670,6 @@ impl<'input> CanOmitOptionalParenthesesVisitor<'input> {
if op.is_invert() {
self.update_max_precedence(OperatorPrecedence::BitwiseInversion);
}
self.first.set_if_none(First::Token);
}
// `[a, b].test.test[300].dot`
@@ -711,25 +706,20 @@ impl<'input> CanOmitOptionalParenthesesVisitor<'input> {
self.update_max_precedence(OperatorPrecedence::String);
}
// Expressions with sub expressions but a preceding token
// Mark this expression as first expression and not the sub expression.
Expr::Lambda(_)
| Expr::Await(_)
| Expr::Yield(_)
| Expr::YieldFrom(_)
| Expr::Starred(_) => {
self.first.set_if_none(First::Token);
}
Expr::Tuple(_)
| Expr::NamedExpr(_)
| Expr::GeneratorExp(_)
| Expr::Lambda(_)
| Expr::Await(_)
| Expr::Yield(_)
| Expr::YieldFrom(_)
| Expr::FormattedValue(_)
| Expr::FString(_)
| Expr::Constant(_)
| Expr::Starred(_)
| Expr::Name(_)
| Expr::Slice(_)
| Expr::IpyEscapeCommand(_) => {}
| Expr::Slice(_) => {}
Expr::IpyEscapeCommand(_) => todo!(),
};
walk_expr(self, expr);
@@ -751,32 +741,8 @@ impl<'input> PreorderVisitor<'input> for CanOmitOptionalParenthesesVisitor<'inpu
self.visit_subexpression(expr);
}
self.first.set_if_none(First::Expression(expr));
}
}
#[derive(Copy, Clone, Debug)]
enum First<'a> {
None,
/// Expression starts with a non-parentheses token. E.g. `not a`
Token,
Expression(&'a Expr),
}
impl<'a> First<'a> {
#[inline]
fn set_if_none(&mut self, first: First<'a>) {
if matches!(self, First::None) {
*self = first;
}
}
fn expression(self) -> Option<&'a Expr> {
match self {
First::None | First::Token => None,
First::Expression(expr) => Some(expr),
if self.first.is_none() {
self.first = Some(expr);
}
}
}

View File

@@ -7,7 +7,7 @@ use ruff_python_ast::AstNode;
use ruff_python_ast::Mod;
use ruff_python_index::tokens_and_ranges;
use ruff_python_parser::lexer::LexicalError;
use ruff_python_parser::{parse_ok_tokens, AsMode, ParseError};
use ruff_python_parser::{parse_ok_tokens, Mode, ParseError};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::Locator;
@@ -130,9 +130,8 @@ pub fn format_module_source(
source: &str,
options: PyFormatOptions,
) -> Result<Printed, FormatModuleError> {
let source_type = options.source_type();
let (tokens, comment_ranges) = tokens_and_ranges(source, source_type)?;
let module = parse_ok_tokens(tokens, source, source_type.as_mode(), "<filename>")?;
let (tokens, comment_ranges) = tokens_and_ranges(source)?;
let module = parse_ok_tokens(tokens, source, Mode::Module, "<filename>")?;
let formatted = format_module_ast(&module, &comment_ranges, source, options)?;
Ok(formatted.print()?)
}
@@ -173,10 +172,9 @@ mod tests {
use anyhow::Result;
use insta::assert_snapshot;
use ruff_python_ast::PySourceType;
use ruff_python_index::tokens_and_ranges;
use ruff_python_parser::{parse_ok_tokens, AsMode};
use ruff_python_parser::{parse_ok_tokens, Mode};
use crate::{format_module_ast, format_module_source, PyFormatOptions};
@@ -215,12 +213,11 @@ def main() -> None:
]
"#;
let source_type = PySourceType::Python;
let (tokens, comment_ranges) = tokens_and_ranges(source, source_type).unwrap();
let (tokens, comment_ranges) = tokens_and_ranges(source).unwrap();
// Parse the AST.
let source_path = "code_inline.py";
let module = parse_ok_tokens(tokens, source, source_type.as_mode(), source_path).unwrap();
let module = parse_ok_tokens(tokens, source, Mode::Module, source_path).unwrap();
let options = PyFormatOptions::from_extension(Path::new(source_path));
let formatted = format_module_ast(&module, &comment_ranges, source, options).unwrap();

View File

@@ -250,10 +250,6 @@ impl MagicTrailingComma {
pub const fn is_respect(self) -> bool {
matches!(self, Self::Respect)
}
pub const fn is_ignore(self) -> bool {
matches!(self, Self::Ignore)
}
}
impl FromStr for MagicTrailingComma {

View File

@@ -391,9 +391,7 @@ pub(crate) fn clause_body<'a>(
impl Format<PyFormatContext<'_>> for FormatClauseBody<'_> {
fn fmt(&self, f: &mut Formatter<PyFormatContext<'_>>) -> FormatResult<()> {
// In stable, stubs are only collapsed in stub files, in preview this is consistently
// applied everywhere
if (f.options().source_type().is_stub() || f.options().preview().is_enabled())
if f.options().source_type().is_stub()
&& contains_only_an_ellipsis(self.body, f.context().comments())
&& self.trailing_comments.is_empty()
{

View File

@@ -21,7 +21,12 @@ impl FormatNodeRule<StmtAnnAssign> for FormatStmtAnnAssign {
write!(
f,
[target.format(), token(":"), space(), annotation.format(),]
[
target.format(),
token(":"),
space(),
maybe_parenthesize_expression(annotation, item, Parenthesize::IfBreaks)
]
)?;
if let Some(value) = value {

View File

@@ -3,9 +3,7 @@ use ruff_python_ast::{Decorator, StmtClassDef};
use ruff_python_trivia::lines_after_ignoring_end_of_line_trivia;
use ruff_text_size::Ranged;
use crate::comments::format::{
empty_lines_after_leading_comments, empty_lines_before_trailing_comments,
};
use crate::comments::format::empty_lines_before_trailing_comments;
use crate::comments::{leading_comments, trailing_comments, SourceComment};
use crate::prelude::*;
use crate::statement::clause::{clause_body, clause_header, ClauseHeader};
@@ -34,29 +32,6 @@ impl FormatNodeRule<StmtClassDef> for FormatStmtClassDef {
let (leading_definition_comments, trailing_definition_comments) =
dangling_comments.split_at(trailing_definition_comments_start);
// If the class contains leading comments, insert newlines before them.
// For example, given:
// ```python
// # comment
//
// class Class:
// ...
// ```
//
// At the top-level in a non-stub file, reformat as:
// ```python
// # comment
//
//
// class Class:
// ...
// ```
// Note that this is only really relevant for the specific case in which there's a single
// newline between the comment and the node, but we _require_ two newlines. If there are
// _no_ newlines between the comment and the node, we don't insert _any_ newlines; if there
// are more than two, then `leading_comments` will preserve the correct number of newlines.
empty_lines_after_leading_comments(f, comments.leading(item)).fmt(f)?;
write!(
f,
[

View File

@@ -1,9 +1,7 @@
use ruff_formatter::write;
use ruff_python_ast::StmtFunctionDef;
use crate::comments::format::{
empty_lines_after_leading_comments, empty_lines_before_trailing_comments,
};
use crate::comments::format::empty_lines_before_trailing_comments;
use crate::comments::SourceComment;
use crate::expression::maybe_parenthesize_expression;
use crate::expression::parentheses::{Parentheses, Parenthesize};
@@ -32,29 +30,6 @@ impl FormatNodeRule<StmtFunctionDef> for FormatStmtFunctionDef {
let (leading_definition_comments, trailing_definition_comments) =
dangling_comments.split_at(trailing_definition_comments_start);
// If the class contains leading comments, insert newlines before them.
// For example, given:
// ```python
// # comment
//
// def func():
// ...
// ```
//
// At the top-level in a non-stub file, reformat as:
// ```python
// # comment
//
//
// def func():
// ...
// ```
// Note that this is only really relevant for the specific case in which there's a single
// newline between the comment and the node, but we _require_ two newlines. If there are
// _no_ newlines between the comment and the node, we don't insert _any_ newlines; if there
// are more than two, then `leading_comments` will preserve the correct number of newlines.
empty_lines_after_leading_comments(f, comments.leading(item)).fmt(f)?;
write!(
f,
[

View File

@@ -536,7 +536,7 @@ impl<'ast> IntoFormat<PyFormatContext<'ast>> for Suite {
}
/// A statement representing a docstring.
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone)]
pub(crate) struct DocstringStmt<'a>(&'a Stmt);
impl<'a> DocstringStmt<'a> {
@@ -589,7 +589,7 @@ impl Format<PyFormatContext<'_>> for DocstringStmt<'_> {
}
/// A Child of a suite.
#[derive(Copy, Clone, Debug)]
#[derive(Copy, Clone)]
pub(crate) enum SuiteChildStatement<'a> {
/// A docstring documenting a class or function definition.
Docstring(DocstringStmt<'a>),

View File

@@ -327,7 +327,7 @@ fn write_suppressed_statements<'a>(
for range in CommentRangeIter::in_suppression(comments.trailing(statement), source) {
match range {
// All trailing comments are suppressed
// All leading comments are suppressed
// ```python
// statement
// # suppressed
@@ -394,14 +394,10 @@ fn write_suppressed_statements<'a>(
statement = SuiteChildStatement::Other(next_statement);
leading_node_comments = comments.leading(next_statement);
} else {
let mut nodes =
std::iter::successors(Some(AnyNodeRef::from(statement.statement())), |statement| {
statement.last_child_in_body()
});
let end = nodes
.find_map(|statement| comments.trailing(statement).last().map(Ranged::end))
.unwrap_or(statement.end());
let end = comments
.trailing(statement)
.last()
.map_or(statement.end(), Ranged::end);
FormatVerbatimStatementRange {
verbatim_range: TextRange::new(format_off_comment.end(), end),

View File

@@ -1,5 +1,5 @@
use ruff_formatter::FormatOptions;
use ruff_python_formatter::{format_module_source, PreviewMode, PyFormatOptions};
use ruff_python_formatter::{format_module_source, PyFormatOptions};
use similar::TextDiff;
use std::fmt::{Formatter, Write};
use std::io::BufReader;
@@ -142,40 +142,16 @@ fn format() {
} else {
let printed =
format_module_source(&content, options.clone()).expect("Formatting to succeed");
let formatted = printed.as_code();
let formatted_code = printed.as_code();
ensure_stability_when_formatting_twice(formatted, options.clone(), input_path);
ensure_stability_when_formatting_twice(formatted_code, options, input_path);
// We want to capture the differences in the preview style in our fixtures
let options_preview = options.with_preview(PreviewMode::Enabled);
let printed_preview = format_module_source(&content, options_preview.clone())
.expect("Formatting to succeed");
let formatted_preview = printed_preview.as_code();
ensure_stability_when_formatting_twice(
formatted_preview,
options_preview.clone(),
input_path,
);
if formatted == formatted_preview {
writeln!(snapshot, "## Output\n{}", CodeFrame::new("py", &formatted)).unwrap();
} else {
// Having both snapshots makes it hard to see the difference, so we're keeping only
// diff.
writeln!(
snapshot,
"## Output\n{}\n## Preview changes\n{}",
CodeFrame::new("py", &formatted),
CodeFrame::new(
"diff",
TextDiff::from_lines(formatted, formatted_preview)
.unified_diff()
.header("Stable", "Preview")
)
)
.unwrap();
}
writeln!(
snapshot,
"## Output\n{}",
CodeFrame::new("py", &formatted_code)
)
.unwrap();
}
insta::with_settings!({

View File

@@ -162,7 +162,7 @@ def f():
```diff
--- Black
+++ Ruff
@@ -1,29 +1,206 @@
@@ -1,29 +1,205 @@
+# This file doesn't use the standard decomposition.
+# Decorator syntax test cases are separated by double # comments.
+# Those before the 'output' comment are valid under the old syntax.
@@ -172,7 +172,6 @@ def f():
+
+##
+
+
+@decorator
+def f():
+ ...
@@ -210,48 +209,43 @@ def f():
+ ...
+
+
##
-@decorator()()
+##
+
+
+@decorator(**kwargs)
def f():
...
+def f():
+ ...
+
+
+##
+
##
-@(decorator)
+
+@decorator(*args, **kwargs)
def f():
...
+def f():
+ ...
+
+
+##
+
##
-@sequence["decorator"]
+
+@decorator(
+ *args,
+ **kwargs,
+)
def f():
...
+def f():
+ ...
+
+
+##
+
##
-@decorator[List[str]]
+
+@dotted.decorator
def f():
...
+def f():
+ ...
+
+
+##
+
##
-@var := decorator
+
+@dotted.decorator(arg)
+def f():
@@ -266,43 +260,48 @@ def f():
+ ...
+
+
+##
+
##
-@decorator()()
+
+@dotted.decorator(*args)
+def f():
+ ...
+
+
+##
def f():
...
+
##
-@(decorator)
+
+@dotted.decorator(**kwargs)
+def f():
+ ...
+
+
+##
def f():
...
+
##
-@sequence["decorator"]
+
+@dotted.decorator(*args, **kwargs)
+def f():
+ ...
+
+
+##
def f():
...
+
##
-@decorator[List[str]]
+
+@dotted.decorator(
+ *args,
+ **kwargs,
+)
+def f():
+ ...
+
+
+##
def f():
...
+
##
-@var := decorator
+
+@double.dotted.decorator
+def f():
@@ -388,7 +387,6 @@ def f():
##
@decorator
def f():
...

View File

@@ -5,7 +5,6 @@ input_file: crates/ruff_python_formatter/resources/test/fixtures/black/miscellan
## Input
```py
# flags: --pyi
from typing import Union
@bird
@@ -43,8 +42,7 @@ def eggs() -> Union[str, int]: ...
```diff
--- Black
+++ Ruff
@@ -1,32 +1,59 @@
+# flags: --pyi
@@ -1,32 +1,58 @@
from typing import Union
+
@@ -69,13 +67,13 @@ def eggs() -> Union[str, int]: ...
- def BMethod(self, arg: List[str]) -> None: ...
+ def BMethod(self, arg: List[str]) -> None:
+ ...
+
+
+class C:
+ ...
-class C: ...
+class C:
+ ...
+
+
@hmm
-class D: ...
+class D:
@@ -120,7 +118,6 @@ def eggs() -> Union[str, int]: ...
## Ruff Output
```py
# flags: --pyi
from typing import Union

View File

@@ -6,7 +6,6 @@ input_file: crates/ruff_python_formatter/resources/test/fixtures/black/miscellan
```py
''''''
'\''
'"'
"'"
@@ -70,7 +69,7 @@ f"\"{a}\"{'hello' * b}\"{c}\""
```diff
--- Black
+++ Ruff
@@ -25,7 +25,12 @@
@@ -24,7 +24,12 @@
r'Tricky "quote'
r"Not-so-tricky \"quote"
rf"{yay}"
@@ -90,7 +89,6 @@ f"\"{a}\"{'hello' * b}\"{c}\""
```py
""""""
"'"
'"'
"'"
@@ -153,7 +151,6 @@ f"\"{a}\"{'hello' * b}\"{c}\""
```py
""""""
"'"
'"'
"'"

View File

@@ -0,0 +1,86 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/black/py_39/python39.py
---
## Input
```py
#!/usr/bin/env python3.9
@relaxed_decorator[0]
def f():
...
@relaxed_decorator[extremely_long_name_that_definitely_will_not_fit_on_one_line_of_standard_length]
def f():
...
@extremely_long_variable_name_that_doesnt_fit := complex.expression(with_long="arguments_value_that_wont_fit_at_the_end_of_the_line")
def f():
...
```
## Black Differences
```diff
--- Black
+++ Ruff
@@ -1,6 +1,5 @@
#!/usr/bin/env python3.9
-
@relaxed_decorator[0]
def f():
...
```
## Ruff Output
```py
#!/usr/bin/env python3.9
@relaxed_decorator[0]
def f():
...
@relaxed_decorator[
extremely_long_name_that_definitely_will_not_fit_on_one_line_of_standard_length
]
def f():
...
@extremely_long_variable_name_that_doesnt_fit := complex.expression(
with_long="arguments_value_that_wont_fit_at_the_end_of_the_line"
)
def f():
...
```
## Black Output
```py
#!/usr/bin/env python3.9
@relaxed_decorator[0]
def f():
...
@relaxed_decorator[
extremely_long_name_that_definitely_will_not_fit_on_one_line_of_standard_length
]
def f():
...
@extremely_long_variable_name_that_doesnt_fit := complex.expression(
with_long="arguments_value_that_wont_fit_at_the_end_of_the_line"
)
def f():
...
```

View File

@@ -1,92 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/black/raw_docstring.py
---
## Input
```py
# flags: --preview --skip-string-normalization
class C:
r"""Raw"""
def f():
r"""Raw"""
class SingleQuotes:
r'''Raw'''
class UpperCaseR:
R"""Raw"""
```
## Black Differences
```diff
--- Black
+++ Ruff
@@ -1,4 +1,6 @@
+# flags: --preview --skip-string-normalization
class C:
+
r"""Raw"""
@@ -7,8 +9,9 @@
class SingleQuotes:
- r'''Raw'''
+ r"""Raw"""
+
class UpperCaseR:
R"""Raw"""
```
## Ruff Output
```py
# flags: --preview --skip-string-normalization
class C:
r"""Raw"""
def f():
r"""Raw"""
class SingleQuotes:
r"""Raw"""
class UpperCaseR:
R"""Raw"""
```
## Black Output
```py
class C:
r"""Raw"""
def f():
r"""Raw"""
class SingleQuotes:
r'''Raw'''
class UpperCaseR:
R"""Raw"""
```

View File

@@ -1,34 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/expression/binary_pow_spacing.py
---
## Input
```py
# No spacing
5 ** 5
5.0 ** 5.0
1e5 ** 2e5
True ** True
False ** False
None ** None
# Space
"a" ** "b"
```
## Output
```py
# No spacing
5**5
5.0**5.0
1e5**2e5
True**True
False**False
None**None
# Space
"a" ** "b"
```

View File

@@ -186,19 +186,6 @@ if "root" not in (
):
msg = "Could not find root. Please try a different forest."
raise ValueError(msg)
# Regression for https://github.com/astral-sh/ruff/issues/8183
def foo():
while (
not (aaaaaaaaaaaaaaaaaaaaa(bbbbbbbb, ccccccc)) and dddddddddd < eeeeeeeeeeeeeee
):
pass
def foo():
while (
not (aaaaaaaaaaaaaaaaaaaaa[bbbbbbbb, ccccccc]) and dddddddddd < eeeeeeeeeeeeeee
):
pass
```
## Output
@@ -305,13 +292,10 @@ if aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa & (
):
pass
if (
not (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
)
& aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
if not (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
) & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
pass
@@ -399,21 +383,6 @@ if "root" not in (
):
msg = "Could not find root. Please try a different forest."
raise ValueError(msg)
# Regression for https://github.com/astral-sh/ruff/issues/8183
def foo():
while (
not (aaaaaaaaaaaaaaaaaaaaa(bbbbbbbb, ccccccc)) and dddddddddd < eeeeeeeeeeeeeee
):
pass
def foo():
while (
not (aaaaaaaaaaaaaaaaaaaaa[bbbbbbbb, ccccccc]) and dddddddddd < eeeeeeeeeeeeeee
):
pass
```

View File

@@ -1,30 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_on_off/fmt_off_unclosed_deep_nested_trailing_comment.py
---
## Input
```py
# Regression test for https://github.com/astral-sh/ruff/issues/8211
# fmt: off
from dataclasses import dataclass
if True:
if False:
x: int # Optional[int]
```
## Output
```py
# Regression test for https://github.com/astral-sh/ruff/issues/8211
# fmt: off
from dataclasses import dataclass
if True:
if False:
x: int # Optional[int]
```

View File

@@ -1,30 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_on_off/fmt_off_unclosed_trailing_comment.py
---
## Input
```py
# Regression test for https://github.com/astral-sh/ruff/issues/8211
# fmt: off
from dataclasses import dataclass
@dataclass
class A:
x: int # Optional[int]
```
## Output
```py
# Regression test for https://github.com/astral-sh/ruff/issues/8211
# fmt: off
from dataclasses import dataclass
@dataclass
class A:
x: int # Optional[int]
```

View File

@@ -20,7 +20,6 @@ def test():
# fmt: on
def test():
pass
```

View File

@@ -93,21 +93,4 @@ def test3 ():
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -21,8 +21,7 @@
# formatted
-def test2():
- ...
+def test2(): ...
a = 10
```

View File

@@ -549,27 +549,4 @@ if True:
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -245,13 +245,11 @@
class Path:
if sys.version_info >= (3, 11):
- def joinpath(self):
- ...
+ def joinpath(self): ...
# The .open method comes from pathlib.pyi and should be kept in sync.
@overload
- def open(self):
- ...
+ def open(self): ...
def fakehttp():
```

View File

@@ -7,21 +7,6 @@ input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/
# Regression test: Don't forget the parentheses in the value when breaking
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: int = a + 1 * a
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = (
Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb()
)
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: (
Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
)= Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb()
JSONSerializable: TypeAlias = (
"str | int | float | bool | None | list | tuple | JSONMapping"
)
JSONSerializable: str | int | float | bool | None | list | tuple | JSONMapping = {1, 2, 3, 4}
JSONSerializable: str | int | float | bool | None | list | tuple | JSONMapping = aaaaaaaaaaaaaaaa
# Regression test: Don't forget the parentheses in the annotation when breaking
class DefaultRunner:
@@ -35,35 +20,12 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: int =
a + 1 * a
)
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = (
Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb()
)
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: (
Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
) = Bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb()
JSONSerializable: TypeAlias = (
"str | int | float | bool | None | list | tuple | JSONMapping"
)
JSONSerializable: str | int | float | bool | None | list | tuple | JSONMapping = {
1,
2,
3,
4,
}
JSONSerializable: str | int | float | bool | None | list | tuple | JSONMapping = (
aaaaaaaaaaaaaaaa
)
# Regression test: Don't forget the parentheses in the annotation when breaking
class DefaultRunner:
task_runner_cls: TaskRunnerProtocol | typing.Callable[
[], typing.Any
] = DefaultTaskRunner
task_runner_cls: (
TaskRunnerProtocol | typing.Callable[[], typing.Any]
) = DefaultTaskRunner
```

View File

@@ -1,45 +1,13 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/preview.py
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/assign_breaking.py
---
## Input
```py
"""
Black's `Preview.module_docstring_newlines`
"""
first_stmt_after_module_level_docstring = 1
# Below is black stable style
# In preview style, black always breaks the right side first
class CachedRepository:
# Black's `Preview.dummy_implementations`
def get_release_info(self): ...
def raw_docstring():
r"""Black's `Preview.accept_raw_docstrings`
a
b
"""
pass
def reference_docstring_newlines():
"""A regular docstring for comparison
a
b
"""
pass
class RemoveNewlineBeforeClassDocstring:
"""Black's `Preview.no_blank_line_before_class_docstring`"""
def f():
"""Black's `Preview.prefer_splitting_right_hand_side_of_assignments`"""
if True:
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc.cccccccc
@@ -84,41 +52,10 @@ preview = Disabled
```
```py
"""
Black's `Preview.module_docstring_newlines`
"""
first_stmt_after_module_level_docstring = 1
# Below is black stable style
# In preview style, black always breaks the right side first
class CachedRepository:
# Black's `Preview.dummy_implementations`
def get_release_info(self):
...
def raw_docstring():
r"""Black's `Preview.accept_raw_docstrings`
a
b
"""
pass
def reference_docstring_newlines():
"""A regular docstring for comparison
a
b
"""
pass
class RemoveNewlineBeforeClassDocstring:
"""Black's `Preview.no_blank_line_before_class_docstring`"""
def f():
"""Black's `Preview.prefer_splitting_right_hand_side_of_assignments`"""
if True:
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc.cccccccc
@@ -163,40 +100,10 @@ preview = Enabled
```
```py
"""
Black's `Preview.module_docstring_newlines`
"""
first_stmt_after_module_level_docstring = 1
# Below is black stable style
# In preview style, black always breaks the right side first
class CachedRepository:
# Black's `Preview.dummy_implementations`
def get_release_info(self): ...
def raw_docstring():
r"""Black's `Preview.accept_raw_docstrings`
a
b
"""
pass
def reference_docstring_newlines():
"""A regular docstring for comparison
a
b
"""
pass
class RemoveNewlineBeforeClassDocstring:
"""Black's `Preview.no_blank_line_before_class_docstring`"""
def f():
"""Black's `Preview.prefer_splitting_right_hand_side_of_assignments`"""
if True:
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa[
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
] = cccccccc.ccccccccccccc.cccccccc

View File

@@ -4,8 +4,6 @@ input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/
---
## Input
```py
# comment
class Test(
Aaaaaaaaaaaaaaaaa,
Bbbbbbbbbbbbbbbb,
@@ -234,9 +232,6 @@ class QuerySet(AltersData):
## Output
```py
# comment
class Test(
Aaaaaaaaaaaaaaaaa,
Bbbbbbbbbbbbbbbb,
@@ -499,45 +494,4 @@ class QuerySet(AltersData):
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -28,8 +28,7 @@
pass
-class Test((Aaaa)):
- ...
+class Test((Aaaa)): ...
class Test(
@@ -159,20 +158,17 @@
@dataclass
# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
-class AltCLIPOutput(ModelOutput):
- ...
+class AltCLIPOutput(ModelOutput): ...
@dataclass
-class AltCLIPOutput: # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
- ...
+class AltCLIPOutput: ... # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
@dataclass
class AltCLIPOutput(
# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
-):
- ...
+): ...
class TestTypeParams[
```

View File

@@ -996,167 +996,4 @@ def default_arg_comments2( #
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -2,8 +2,7 @@
def test(
# comment
# another
-):
- ...
+): ...
# Argument empty line spacing
@@ -12,8 +11,7 @@
a,
# another
b,
-):
- ...
+): ...
### Different function argument wrappings
@@ -57,8 +55,7 @@
b,
# comment
*args,
-):
- ...
+): ...
def kwarg_with_leading_comments(
@@ -66,8 +63,7 @@
b,
# comment
**kwargs,
-):
- ...
+): ...
def argument_with_long_default(
@@ -75,8 +71,7 @@
b=ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
+ [dddddddddddddddddddd, eeeeeeeeeeeeeeeeeeee, ffffffffffffffffffffffff],
h=[],
-):
- ...
+): ...
def argument_with_long_type_annotation(
@@ -85,12 +80,10 @@
| yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
| zzzzzzzzzzzzzzzzzzz = [0, 1, 2, 3],
h=[],
-):
- ...
+): ...
-def test():
- ...
+def test(): ...
# Type parameter empty line spacing
@@ -99,8 +92,7 @@
A,
# another
B,
-]():
- ...
+](): ...
# Type parameter comments
@@ -159,8 +151,7 @@
# Comment
-def with_leading_comment():
- ...
+def with_leading_comment(): ...
# Comment that could be mistaken for a trailing comment of the function declaration when
@@ -192,8 +183,7 @@
# Regression test for https://github.com/astral-sh/ruff/issues/5176#issuecomment-1598171989
def foo(
b=3 + 2, # comment
-):
- ...
+): ...
# Comments on the slash or the star, both of which don't have a node
@@ -454,8 +444,7 @@
def f(
# first
# second
-):
- ...
+): ...
def f( # first
@@ -475,8 +464,7 @@
# first
b,
# second
-):
- ...
+): ...
def f( # first
@@ -484,8 +472,7 @@
# second
b,
# third
-):
- ...
+): ...
def f( # first
@@ -494,8 +481,7 @@
# third
b,
# fourth
-):
- ...
+): ...
def f( # first
@@ -522,17 +508,14 @@
a,
# third
/, # second
-):
- ...
+): ...
# Walrus operator in return type.
-def this_is_unusual() -> (please := no):
- ...
+def this_is_unusual() -> (please := no): ...
-def this_is_unusual(x) -> (please := no):
- ...
+def this_is_unusual(x) -> (please := no): ...
# Regression test for: https://github.com/astral-sh/ruff/issues/7465
```

View File

@@ -20,38 +20,6 @@ return (
len(node.parents) for node in self.node_map.values()
)
)
# Regression tests for https://github.com/astral-sh/ruff/issues/8042
def f():
return (
self.get_filename() + ".csv" +
"text/csv" +
output.getvalue().encode("utf-8----------------"),
)
def f():
return (
self.get_filename() + ".csv" + "text/csv",
output.getvalue().encode("utf-8----------------")
)
def f():
return (
self.get_filename() + ".csv",
"text/csv",
output.getvalue().encode("utf-8----------------")
)
def f():
return self.get_filename() + ".csv" + "text/csv" + output.getvalue().encode("utf-8----------------"),
def f():
return self.get_filename() + ".csv" + "text/csv", output.getvalue().encode("utf-8----------------")
def f():
return self.get_filename() + ".csv", "text/csv", output.getvalue().encode("utf-8----------------")
```
## Output
@@ -70,54 +38,6 @@ return (
len(self.nodeseeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee),
sum(len(node.parents) for node in self.node_map.values()),
)
# Regression tests for https://github.com/astral-sh/ruff/issues/8042
def f():
return (
self.get_filename()
+ ".csv"
+ "text/csv"
+ output.getvalue().encode("utf-8----------------"),
)
def f():
return (
self.get_filename() + ".csv" + "text/csv",
output.getvalue().encode("utf-8----------------"),
)
def f():
return (
self.get_filename() + ".csv",
"text/csv",
output.getvalue().encode("utf-8----------------"),
)
def f():
return (
self.get_filename()
+ ".csv"
+ "text/csv"
+ output.getvalue().encode("utf-8----------------"),
)
def f():
return self.get_filename() + ".csv" + "text/csv", output.getvalue().encode(
"utf-8----------------"
)
def f():
return (
self.get_filename() + ".csv",
"text/csv",
output.getvalue().encode("utf-8----------------"),
)
```

View File

@@ -544,298 +544,4 @@ def process_board_action(
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -7,8 +7,7 @@
start: int | None = None,
num: int | None = None,
) -> ( # type: ignore[override]
-):
- ...
+): ...
def zrevrangebylex(
@@ -20,8 +19,7 @@
num: int | None = None,
) -> ( # type: ignore[override]
# comment
-):
- ...
+): ...
def zrevrangebylex(
@@ -33,8 +31,7 @@
num: int | None = None,
) -> ( # type: ignore[override]
1
-):
- ...
+): ...
def zrevrangebylex(
@@ -47,8 +44,7 @@
) -> ( # type: ignore[override]
1,
2,
-):
- ...
+): ...
def zrevrangebylex(
@@ -60,14 +56,12 @@
num: int | None = None,
) -> ( # type: ignore[override]
(1, 2)
-):
- ...
+): ...
def handleMatch( # type: ignore[override] # https://github.com/python/mypy/issues/10197
self, m: Match[str], data: str
-) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]:
- ...
+) -> Union[Tuple[None, None, None], Tuple[Element, int, int]]: ...
def double(
@@ -95,50 +89,44 @@
# function arguments break here with a single argument; we do not.)
def f(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
- ...
+) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: ...
def f(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa, a
-) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
- ...
+) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: ...
def f(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-) -> a:
- ...
+) -> a: ...
def f(
a
-) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
- ...
+) -> (
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+): ...
def f[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa]() -> (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-):
- ...
+): ...
def f[
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-]() -> a:
- ...
+]() -> a: ...
def f[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa](
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:
- ...
+) -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: ...
def f[aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa](
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
-) -> a:
- ...
+) -> a: ...
# Breaking return type annotations. Black adds parentheses if the parameters are
@@ -147,137 +135,126 @@
Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
]
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
]
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
]
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
]
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
x
) -> Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-]:
- ...
+]: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
x
) -> Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-]:
- ...
+]: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
*args
) -> Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-]:
- ...
+]: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx( # foo
) -> Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-]:
- ...
+]: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
# bar
) -> Set[
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-]:
- ...
+]: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
x
-) -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:
- ...
+) -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
x
-) -> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx:
- ...
+) -> (
+ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+): ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> X + Y + foooooooooooooooooooooooooooooooooooo():
- ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
+ X + Y + foooooooooooooooooooooooooooooooooooo()
+): ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(x) -> X + Y + foooooooooooooooooooooooooooooooooooo():
- ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
+ x
+) -> X + Y + foooooooooooooooooooooooooooooooooooo(): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
X and Y and foooooooooooooooooooooooooooooooooooo()
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
x
-) -> X and Y and foooooooooooooooooooooooooooooooooooo():
- ...
+) -> X and Y and foooooooooooooooooooooooooooooooooooo(): ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> X | Y | foooooooooooooooooooooooooooooooooooo():
- ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
+ X | Y | foooooooooooooooooooooooooooooooooooo()
+): ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(x) -> X | Y | foooooooooooooooooooooooooooooooooooo():
- ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
+ x
+) -> X | Y | foooooooooooooooooooooooooooooooooooo(): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
X | Y | foooooooooooooooooooooooooooooooooooo() # comment
-):
- ...
+): ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
x
) -> (
X | Y | foooooooooooooooooooooooooooooooooooo() # comment
-):
- ...
+): ...
def double() -> (
```

View File

@@ -115,57 +115,4 @@ def quuz():
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -12,25 +12,20 @@
pass
-class Del(expr_context):
- ...
+class Del(expr_context): ...
-class Load(expr_context):
- ...
+class Load(expr_context): ...
# Some comment.
-class Other(expr_context):
- ...
+class Other(expr_context): ...
-class Store(expr_context):
- ...
+class Store(expr_context): ...
-class Foo(Bar):
- ...
+class Foo(Bar): ...
class Baz(Qux):
@@ -49,12 +44,10 @@
pass
-def bar():
- ...
+def bar(): ...
-def baz():
- ...
+def baz(): ...
def quux():
```

View File

@@ -1,8 +1,7 @@
use std::fmt::Debug;
use ruff_python_ast::PySourceType;
use ruff_python_parser::lexer::{lex, LexicalError};
use ruff_python_parser::{AsMode, Tok};
use ruff_python_parser::{Mode, Tok};
use ruff_python_trivia::CommentRanges;
use ruff_text_size::TextRange;
@@ -26,12 +25,11 @@ impl CommentRangesBuilder {
/// Helper method to lex and extract comment ranges
pub fn tokens_and_ranges(
source: &str,
source_type: PySourceType,
) -> Result<(Vec<(Tok, TextRange)>, CommentRanges), LexicalError> {
let mut tokens = Vec::new();
let mut comment_ranges = CommentRangesBuilder::default();
for result in lex(source, source_type.as_mode()) {
for result in lex(source, Mode::Module) {
let (token, range) = result?;
comment_ranges.visit_token(&token, range);

View File

@@ -5,11 +5,8 @@ use ruff_text_size::{TextRange, TextSize};
/// Stores the ranges of all f-strings in a file sorted by [`TextRange::start`].
/// There can be multiple overlapping ranges for nested f-strings.
///
/// Note that the ranges for all unterminated f-strings are not stored.
#[derive(Debug)]
pub struct FStringRanges {
// Mapping from the f-string start location to its range.
raw: BTreeMap<TextSize, TextRange>,
}
@@ -92,6 +89,7 @@ impl FStringRangesBuilder {
}
pub(crate) fn finish(self) -> FStringRanges {
debug_assert!(self.start_locations.is_empty());
FStringRanges { raw: self.raw }
}
}

View File

@@ -22,7 +22,6 @@ bitflags = { workspace = true }
is-macro = { workspace = true }
itertools = { workspace = true }
lalrpop-util = { version = "0.20.0", default-features = false }
memchr = { workspace = true }
unicode-ident = { workspace = true }
unicode_names2 = { workspace = true }
rustc-hash = { workspace = true }

View File

@@ -407,9 +407,7 @@ impl<'source> Lexer<'source> {
#[cfg(debug_assertions)]
debug_assert_eq!(self.cursor.previous(), '#');
let bytes = self.cursor.rest().as_bytes();
let offset = memchr::memchr2(b'\n', b'\r', bytes).unwrap_or(bytes.len());
self.cursor.skip_bytes(offset);
self.cursor.eat_while(|c| !matches!(c, '\n' | '\r'));
Tok::Comment(self.token_text().to_string())
}

View File

@@ -127,21 +127,4 @@ impl<'a> Cursor<'a> {
self.bump();
}
}
/// Skips the next `count` bytes.
///
/// ## Panics
/// - If `count` is larger than the remaining bytes in the input stream.
/// - If `count` indexes into a multi-byte character.
pub(super) fn skip_bytes(&mut self, count: usize) {
#[cfg(debug_assertions)]
{
self.prev_char = self.chars.as_str()[..count]
.chars()
.next_back()
.unwrap_or('\0');
}
self.chars = self.chars.as_str()[count..].chars();
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_shrinking"
version = "0.1.3"
version = "0.1.2"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -24,8 +24,8 @@ use ruff_linter::rule_selector::{PreviewOptions, Specificity};
use ruff_linter::rules::pycodestyle;
use ruff_linter::settings::rule_table::RuleTable;
use ruff_linter::settings::types::{
FilePattern, FilePatternSet, PerFileIgnore, PreviewMode, PythonVersion, SerializationFormat,
UnsafeFixes, Version,
CodePattern, FilePattern, FilePatternSet, PerFileIgnore, PreviewMode, PythonVersion,
SerializationFormat, UnsafeFixes, Version,
};
use ruff_linter::settings::{
resolve_per_file_ignores, LinterSettings, DEFAULT_SELECTORS, DUMMY_VARIABLE_RGX, TASK_TAGS,
@@ -226,9 +226,17 @@ impl Configuration {
dummy_variable_rgx: lint
.dummy_variable_rgx
.unwrap_or_else(|| DUMMY_VARIABLE_RGX.clone()),
external: lint.external.unwrap_or_default(),
external: lint
.external
.map(|code| {
code.into_iter()
.map(|name| CodePattern::new(&name))
.collect()
})
.transpose()
.map_err(|e| anyhow!("Invalid `external` value: {e}"))?
.unwrap_or_default(),
ignore_init_module_imports: lint.ignore_init_module_imports.unwrap_or_default(),
line_length,
tab_size: self.indent_width.unwrap_or_default(),
namespace_packages: self.namespace_packages.unwrap_or_default(),
per_file_ignores: resolve_per_file_ignores(

View File

@@ -352,7 +352,7 @@ pub struct Options {
// Global Formatting options
/// The line length to use when enforcing long-lines violations (like `E501`)
/// and at which `isort` and the formatter prefers to wrap lines.
/// and at which the formatter prefers to wrap lines.
///
/// The length is determined by the number of characters per line, except for lines containing East Asian characters or emojis.
/// For these lines, the [unicode width](https://unicode.org/reports/tr11/) of each character is added up to determine the length.
@@ -548,17 +548,20 @@ pub struct LintCommonOptions {
)]
pub extend_unfixable: Option<Vec<RuleSelector>>,
/// A list of rule codes or prefixes that are unsupported by Ruff, but should be
/// A list of rule codes that are unsupported by Ruff, but should be
/// preserved when (e.g.) validating `# noqa` directives. Useful for
/// retaining `# noqa` directives that cover plugins not yet implemented
/// by Ruff.
///
/// Supports glob patterns. For more information on the glob syntax, refer
/// to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
#[option(
default = "[]",
value_type = "list[str]",
example = r#"
# Avoiding flagging (and removing) any codes starting with `V` from any
# `# noqa` directives, despite Ruff's lack of support for `vulture`.
external = ["V"]
# Avoiding flagging (and removing) `V101` from any `# noqa`
# directives, despite Ruff's lack of support for `vulture`.
external = ["V101"]
"#
)]
pub external: Option<Vec<String>>,
@@ -1412,9 +1415,6 @@ impl Flake8PytestStyleOptions {
pub struct Flake8QuotesOptions {
/// Quote style to prefer for inline strings (either "single" or
/// "double").
///
/// When using the formatter, ensure that `format.quote-style` is set to
/// the same preferred quote style.
#[option(
default = r#""double""#,
value_type = r#""single" | "double""#,
@@ -1426,9 +1426,6 @@ pub struct Flake8QuotesOptions {
/// Quote style to prefer for multiline strings (either "single" or
/// "double").
///
/// When using the formatter, only "double" is compatible, as the formatter
/// enforces double quotes for multiline strings.
#[option(
default = r#""double""#,
value_type = r#""single" | "double""#,
@@ -1439,9 +1436,6 @@ pub struct Flake8QuotesOptions {
pub multiline_quotes: Option<Quote>,
/// Quote style to prefer for docstrings (either "single" or "double").
///
/// When using the formatter, only "double" is compatible, as the formatter
/// enforces double quotes for docstrings strings.
#[option(
default = r#""double""#,
value_type = r#""single" | "double""#,
@@ -1692,9 +1686,6 @@ pub struct IsortOptions {
/// `combine-as-imports = true`. When `combine-as-imports` isn't
/// enabled, every aliased `import from` will be given its own line, in
/// which case, wrapping is not necessary.
///
/// When using the formatter, ensure that `format.skip-magic-trailing-comma` is set to `false` (default)
/// when enabling `force-wrap-aliases` to avoid that the formatter collapses members if they all fit on a single line.
#[option(
default = r#"false"#,
value_type = "bool",
@@ -1738,9 +1729,6 @@ pub struct IsortOptions {
/// the imports will never be folded into one line.
///
/// See isort's [`split-on-trailing-comma`](https://pycqa.github.io/isort/docs/configuration/options.html#split-on-trailing-comma) option.
///
/// When using the formatter, ensure that `format.skip-magic-trailing-comma` is set to `false` (default) when enabling `split-on-trailing-comma`
/// to avoid that the formatter removes the trailing commas.
#[option(
default = r#"true"#,
value_type = "bool",
@@ -1922,9 +1910,6 @@ pub struct IsortOptions {
/// The number of blank lines to place after imports.
/// Use `-1` for automatic determination.
///
/// When using the formatter, only the values `-1`, `1`, and `2` are compatible because
/// it enforces at least one empty and at most two empty lines after imports.
#[option(
default = r#"-1"#,
value_type = "int",
@@ -1936,9 +1921,6 @@ pub struct IsortOptions {
pub lines_after_imports: Option<isize>,
/// The number of lines to place between "direct" and `import from` imports.
///
/// When using the formatter, only the values `0` and `1` are compatible because
/// it preserves up to one empty line after imports in nested blocks.
#[option(
default = r#"0"#,
value_type = "int",
@@ -2322,7 +2304,7 @@ pub struct PycodestyleOptions {
/// documentation (`W505`), including standalone comments. By default,
/// this is set to null which disables reporting violations.
///
/// The length is determined by the number of characters per line, except for lines containing Asian characters or emojis.
/// The length is determined by the number of characters per line, except for lines containinAsian characters or emojis.
/// For these lines, the [unicode width](https://unicode.org/reports/tr11/) of each character is added up to determine the length.
///
/// See the [`doc-line-too-long`](https://docs.astral.sh/ruff/rules/doc-line-too-long/) rule for more information.

View File

@@ -9,7 +9,6 @@ use std::sync::RwLock;
use anyhow::Result;
use anyhow::{anyhow, bail};
use globset::{Candidate, GlobSet};
use ignore::{WalkBuilder, WalkState};
use itertools::Itertools;
use log::debug;
@@ -334,18 +333,12 @@ pub fn python_files_in_path(
let resolver = resolver.read().unwrap();
let settings = resolver.resolve(path, pyproject_config);
if let Some(file_name) = path.file_name() {
let file_path = Candidate::new(path);
let file_basename = Candidate::new(file_name);
if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.exclude,
) {
if match_exclusion(path, file_name, &settings.file_resolver.exclude) {
debug!("Ignored path via `exclude`: {:?}", path);
return WalkState::Skip;
} else if match_candidate_exclusion(
&file_path,
&file_basename,
} else if match_exclusion(
path,
file_name,
&settings.file_resolver.extend_exclude,
) {
debug!("Ignored path via `extend-exclude`: {:?}", path);
@@ -516,20 +509,10 @@ fn is_file_excluded(
for path in path.ancestors() {
let settings = resolver.resolve(path, pyproject_strategy);
if let Some(file_name) = path.file_name() {
let file_path = Candidate::new(path);
let file_basename = Candidate::new(file_name);
if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.exclude,
) {
if match_exclusion(path, file_name, &settings.file_resolver.exclude) {
debug!("Ignored path via `exclude`: {:?}", path);
return true;
} else if match_candidate_exclusion(
&file_path,
&file_basename,
&settings.file_resolver.extend_exclude,
) {
} else if match_exclusion(path, file_name, &settings.file_resolver.extend_exclude) {
debug!("Ignored path via `extend-exclude`: {:?}", path);
return true;
}
@@ -550,27 +533,11 @@ fn is_file_excluded(
pub fn match_exclusion<P: AsRef<Path>, R: AsRef<Path>>(
file_path: P,
file_basename: R,
exclusion: &GlobSet,
exclusion: &globset::GlobSet,
) -> bool {
if exclusion.is_empty() {
return false;
}
exclusion.is_match(file_path) || exclusion.is_match(file_basename)
}
/// Return `true` if the given candidates should be ignored based on the exclusion
/// criteria.
pub fn match_candidate_exclusion(
file_path: &Candidate,
file_basename: &Candidate,
exclusion: &GlobSet,
) -> bool {
if exclusion.is_empty() {
return false;
}
exclusion.is_match_candidate(file_path) || exclusion.is_match_candidate(file_basename)
}
#[cfg(test)]
mod tests {
use std::fs::{create_dir, File};

View File

@@ -67,7 +67,7 @@ quote-style = "double"
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
magic-trailing-comma = "respect"
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
@@ -78,7 +78,7 @@ As an example, the following would configure Ruff to:
```toml
[tool.ruff.lint]
# 1. Enable flake8-bugbear (`B`) rules, in addition to the defaults.
select = ["E4", "E7", "E9", "F", "B"]
select = ["E", "F", "B"]
# 2. Avoid enforcing line-length violations (`E501`)
ignore = ["E501"]
@@ -101,7 +101,7 @@ Linter plugin configurations are expressed as subsections, e.g.:
```toml
[tool.ruff.lint]
# Add "Q" to the list of enabled codes.
select = ["E4", "E7", "E9", "F", "Q"]
select = ["E", "F", "Q"]
[tool.ruff.lint.flake8-quotes]
docstring-quotes = "double"
@@ -121,7 +121,7 @@ For example, the `pyproject.toml` described above would be represented via the f
```toml
[lint]
# Enable flake8-bugbear (`B`) rules.
select = ["E4", "E7", "E9", "F", "B"]
select = ["E", "F", "B"]
# Never enforce `E501` (line length violations).
ignore = ["E501"]

View File

@@ -164,10 +164,10 @@ elif False: # fmt: skip
pass
@Test
@Test2 # fmt: skip
@Test2 # fmt: off
def test(): ...
a = [1, 2, 3, 4, 5] # fmt: skip
a = [1, 2, 3, 4, 5] # fmt: off
def test(a, b, c, d, e, f) -> int: # fmt: skip
pass
@@ -180,10 +180,9 @@ comments, which are treated equivalently to `# fmt: off` and `# fmt: on`, respec
Ruff's formatter is designed to be used alongside the linter. However, the linter includes
some rules that, when enabled, can cause conflicts with the formatter, leading to unexpected
behavior. When configured appropriately, the goal of Ruff's formatter-linter compatibility is
such that running the formatter should never introduce new lint errors.
behavior.
As such, when using Ruff as a formatter, we recommend avoiding the following lint rules:
When using Ruff as a formatter, we recommend disabling the following rules:
- [`tab-indentation`](rules/tab-indentation.md) (`W191`)
- [`indentation-with-invalid-multiple`](rules/indentation-with-invalid-multiple.md) (`E111`)
@@ -200,11 +199,7 @@ As such, when using Ruff as a formatter, we recommend avoiding the following lin
- [`single-line-implicit-string-concatenation`](rules/single-line-implicit-string-concatenation.md) (`ISC001`)
- [`multi-line-implicit-string-concatenation`](rules/multi-line-implicit-string-concatenation.md) (`ISC002`)
None of the above are included in Ruff's default configuration. However, if you've enabled
any of these rules or their parent categories (like `Q`), we recommend disabling them via the
linter's [`ignore`](settings.md#ignore) setting.
Similarly, we recommend avoiding the following isort settings, which are incompatible with the
Similarly, we recommend disabling the following isort settings, which are incompatible with the
formatter's treatment of import statements when set to non-default values:
- [`force-single-line`](settings.md#isort-force-single-line)
@@ -213,12 +208,6 @@ formatter's treatment of import statements when set to non-default values:
- [`lines-between-types`](settings.md#isort-lines-between-types)
- [`split-on-trailing-comma`](settings.md#isort-split-on-trailing-comma)
If you've configured any of these settings to take on non-default values, we recommend removing
them from your Ruff configuration.
When an incompatible lint rule or setting is enabled, `ruff format` will emit a warning. If your
`ruff format` is free of warnings, you're good to go!
## Exit codes
`ruff format` exits with the following status codes:

View File

@@ -399,6 +399,28 @@ def update_emission_strength():
value = self.emission_strength * 2
```
### Type annotations may be parenthesized when expanded
Black will avoid parenthesizing type annotations in an annotated assignment, while Ruff will insert
parentheses in some cases.
For example:
```python
# Black
StartElementHandler: Callable[[str, dict[str, str]], Any] | Callable[[str, list[str]], Any] | Callable[
[str, dict[str, str], list[str]], Any
] | None
# Ruff
StartElementHandler: (
Callable[[str, dict[str, str]], Any]
| Callable[[str, list[str]], Any]
| Callable[[str, dict[str, str], list[str]], Any]
| None
)
```
### Call chain calls break differently
Black occasionally breaks call chains differently than Ruff; in particular, Black occasionally

Some files were not shown because too many files have changed in this diff Show More