Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b41ae3f53 | ||
|
|
f944e1e1cf | ||
|
|
4fbc1082de | ||
|
|
cf2e887e38 | ||
|
|
ee994e8c07 | ||
|
|
c69c4fd655 | ||
|
|
e01e45ca35 | ||
|
|
4be74785fe | ||
|
|
40b7c64f7d | ||
|
|
a76c5d1226 | ||
|
|
5aeddeb825 | ||
|
|
5f8294aea4 | ||
|
|
e07d3f6313 |
@@ -1,6 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
rev: v0.0.159
|
||||
rev: v0.0.161
|
||||
hooks:
|
||||
- id: ruff
|
||||
|
||||
|
||||
6
Cargo.lock
generated
6
Cargo.lock
generated
@@ -724,7 +724,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
|
||||
|
||||
[[package]]
|
||||
name = "flake8-to-ruff"
|
||||
version = "0.0.159-dev.0"
|
||||
version = "0.0.161-dev.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap 4.0.29",
|
||||
@@ -1821,7 +1821,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.0.159"
|
||||
version = "0.0.161"
|
||||
dependencies = [
|
||||
"annotate-snippets 0.9.1",
|
||||
"anyhow",
|
||||
@@ -1873,7 +1873,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff_dev"
|
||||
version = "0.0.159"
|
||||
version = "0.0.161"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap 4.0.29",
|
||||
|
||||
@@ -6,7 +6,7 @@ members = [
|
||||
|
||||
[package]
|
||||
name = "ruff"
|
||||
version = "0.0.159"
|
||||
version = "0.0.161"
|
||||
edition = "2021"
|
||||
rust-version = "1.65.0"
|
||||
|
||||
|
||||
36
README.md
36
README.md
@@ -145,7 +145,7 @@ Ruff also works with [pre-commit](https://pre-commit.com):
|
||||
```yaml
|
||||
repos:
|
||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||
rev: v0.0.159
|
||||
rev: v0.0.161
|
||||
hooks:
|
||||
- id: ruff
|
||||
```
|
||||
@@ -350,6 +350,16 @@ error reporting for the entire file.
|
||||
For targeted exclusions across entire files (e.g., "Ignore all F841 violations in
|
||||
`/path/to/file.py`"), see the [`per-file-ignores`](#per-file-ignores) configuration setting.
|
||||
|
||||
### "Action Comments"
|
||||
|
||||
Ruff respects `isort`'s ["Action Comments"](https://pycqa.github.io/isort/docs/configuration/action_comments.html)
|
||||
(`# isort: skip_file`, `# isort: on`, `# isort: off`, `# isort: skip`, and `isort: split`), which
|
||||
enable selectively enabling and disabling import sorting for blocks of code and other inline
|
||||
configuration.
|
||||
|
||||
See the [`isort` documentation](https://pycqa.github.io/isort/docs/configuration/action_comments.html)
|
||||
for more.
|
||||
|
||||
### Automating `noqa` Directives
|
||||
|
||||
Ruff supports several workflows to aid in `noqa` management.
|
||||
@@ -1295,7 +1305,7 @@ paths.
|
||||
```toml
|
||||
[tool.ruff]
|
||||
exclude = [".venv"]
|
||||
````
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1313,7 +1323,7 @@ A list of file patterns to omit from linting, in addition to those specified by
|
||||
[tool.ruff]
|
||||
# In addition to the standard set of exclusions, omit all tests, plus a specific file.
|
||||
extend-exclude = ["tests", "src/bad.py"]
|
||||
````
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -1509,6 +1519,26 @@ dummy-variable-rgx = "^_$"
|
||||
|
||||
---
|
||||
|
||||
#### [`allowed-confusables`](#allowed-confusables)
|
||||
|
||||
A list of allowed "confusable" Unicode characters to ignore when enforcing `RUF001`, `RUF002`,
|
||||
and `RUF003`.
|
||||
|
||||
**Default value**: `[]`
|
||||
|
||||
**Type**: `Vec<char>`
|
||||
|
||||
**Example usage**:
|
||||
|
||||
```toml
|
||||
[tool.ruff]
|
||||
# Allow minus-sign (U+2212), greek-small-letter-rho (U+03C1), and greek-small-letter-alpha (U+03B1),
|
||||
# which could be confused for "-", "p", and "*", respectively.
|
||||
allowed-confusables = ["−", "ρ", "∗"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
#### [`ignore-init-module-imports`](#ignore-init-module-imports)
|
||||
|
||||
Avoid automatically removing unused imports in `__init__.py` files. Such imports will still be
|
||||
|
||||
4
flake8_to_ruff/Cargo.lock
generated
4
flake8_to_ruff/Cargo.lock
generated
@@ -771,7 +771,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
|
||||
|
||||
[[package]]
|
||||
name = "flake8_to_ruff"
|
||||
version = "0.0.159"
|
||||
version = "0.0.161"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -1975,7 +1975,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.0.159"
|
||||
version = "0.0.161"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "flake8-to-ruff"
|
||||
version = "0.0.159-dev.0"
|
||||
version = "0.0.161-dev.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
|
||||
@@ -243,6 +243,7 @@ mod tests {
|
||||
fn it_converts_empty() -> Result<()> {
|
||||
let actual = convert(&HashMap::from([]), None)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -286,6 +287,7 @@ mod tests {
|
||||
Some(vec![]),
|
||||
)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -329,6 +331,7 @@ mod tests {
|
||||
Some(vec![]),
|
||||
)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -372,6 +375,7 @@ mod tests {
|
||||
Some(vec![]),
|
||||
)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -415,6 +419,7 @@ mod tests {
|
||||
Some(vec![]),
|
||||
)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -466,6 +471,7 @@ mod tests {
|
||||
Some(vec![Plugin::Flake8Docstrings]),
|
||||
)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -544,6 +550,7 @@ mod tests {
|
||||
None,
|
||||
)?;
|
||||
let expected = Pyproject::new(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
|
||||
16
resources/test/fixtures/isort/insert_empty_lines.py
vendored
Normal file
16
resources/test/fixtures/isort/insert_empty_lines.py
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
import a
|
||||
import b
|
||||
x = 1
|
||||
import os
|
||||
import sys
|
||||
def f():
|
||||
pass
|
||||
if True:
|
||||
x = 1
|
||||
import collections
|
||||
import typing
|
||||
class X: pass
|
||||
y = 1
|
||||
import os
|
||||
import sys
|
||||
"""Docstring"""
|
||||
6
resources/test/fixtures/isort/preserve_import_star.py
vendored
Normal file
6
resources/test/fixtures/isort/preserve_import_star.py
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
from some_other_module import some_class
|
||||
from some_other_module import *
|
||||
# Above
|
||||
from some_module import some_class # Aside
|
||||
# Above
|
||||
from some_module import * # Aside
|
||||
10
resources/test/fixtures/isort/skip_file.py
vendored
Normal file
10
resources/test/fixtures/isort/skip_file.py
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# isort: skip_file
|
||||
import e
|
||||
import f
|
||||
|
||||
# isort: split
|
||||
|
||||
import a
|
||||
import b
|
||||
import c
|
||||
import d
|
||||
9
resources/test/fixtures/isort/split.py
vendored
Normal file
9
resources/test/fixtures/isort/split.py
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import e
|
||||
import f
|
||||
|
||||
# isort: split
|
||||
|
||||
import a
|
||||
import b
|
||||
import c
|
||||
import d
|
||||
10
resources/test/fixtures/pyproject.toml
vendored
10
resources/test/fixtures/pyproject.toml
vendored
@@ -1,4 +1,5 @@
|
||||
[tool.ruff]
|
||||
allowed-confusables = ["−", "ρ", "∗"]
|
||||
line-length = 88
|
||||
extend-exclude = [
|
||||
"excluded_file.py",
|
||||
@@ -35,13 +36,8 @@ ignore-names = [
|
||||
"longMessage",
|
||||
"maxDiff",
|
||||
]
|
||||
classmethod-decorators = [
|
||||
"classmethod",
|
||||
"pydantic.validator",
|
||||
]
|
||||
staticmethod-decorators = [
|
||||
"staticmethod",
|
||||
]
|
||||
classmethod-decorators = ["classmethod", "pydantic.validator"]
|
||||
staticmethod-decorators = ["staticmethod"]
|
||||
|
||||
[tool.ruff.flake8-tidy-imports]
|
||||
ban-relative-imports = "parents"
|
||||
|
||||
7
resources/test/fixtures/ruff/RUF002.py
vendored
7
resources/test/fixtures/ruff/RUF002.py
vendored
@@ -1,7 +0,0 @@
|
||||
x = "𝐁ad string"
|
||||
|
||||
|
||||
def f():
|
||||
"""Here's a docstring with an unusual parenthesis: )"""
|
||||
# And here's a comment with an unusual punctuation mark: ᜵
|
||||
...
|
||||
7
resources/test/fixtures/ruff/RUF003.py
vendored
7
resources/test/fixtures/ruff/RUF003.py
vendored
@@ -1,7 +0,0 @@
|
||||
x = "𝐁ad string"
|
||||
|
||||
|
||||
def f():
|
||||
"""Here's a docstring with an unusual parenthesis: )"""
|
||||
# And here's a comment with an unusual punctuation mark: ᜵
|
||||
...
|
||||
@@ -1,7 +1,14 @@
|
||||
x = "𝐁ad string"
|
||||
y = "−"
|
||||
|
||||
|
||||
def f():
|
||||
"""Here's a docstring with an unusual parenthesis: )"""
|
||||
# And here's a comment with an unusual punctuation mark: ᜵
|
||||
...
|
||||
|
||||
|
||||
def g():
|
||||
"""Here's a docstring with a greek rho: ρ"""
|
||||
# And here's a comment with a greek alpha: ∗
|
||||
...
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "ruff_dev"
|
||||
version = "0.0.159"
|
||||
version = "0.0.161"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
@@ -301,6 +301,16 @@ pub fn match_trailing_content(stmt: &Stmt, locator: &SourceCodeLocator) -> bool
|
||||
false
|
||||
}
|
||||
|
||||
/// Return the number of trailing empty lines following a statement.
|
||||
pub fn count_trailing_lines(stmt: &Stmt, locator: &SourceCodeLocator) -> usize {
|
||||
let suffix =
|
||||
locator.slice_source_code_at(&Location::new(stmt.end_location.unwrap().row() + 1, 0));
|
||||
suffix
|
||||
.lines()
|
||||
.take_while(|line| line.trim().is_empty())
|
||||
.count()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::Result;
|
||||
|
||||
@@ -118,9 +118,9 @@ pub type LocatedCmpop<U = ()> = Located<Cmpop, U>;
|
||||
/// Extract all `Cmpop` operators from a source code snippet, with appropriate
|
||||
/// ranges.
|
||||
///
|
||||
/// RustPython doesn't include line and column information on `Cmpop` nodes.
|
||||
/// (CPython doesn't either.) This method iterates over the token stream and
|
||||
/// re-identifies `Cmpop` nodes, annotating them with valid arnges.
|
||||
/// `RustPython` doesn't include line and column information on `Cmpop` nodes.
|
||||
/// `CPython` doesn't either. This method iterates over the token stream and
|
||||
/// re-identifies `Cmpop` nodes, annotating them with valid ranges.
|
||||
pub fn locate_cmpops(contents: &str) -> Vec<LocatedCmpop> {
|
||||
let mut tok_iter = lexer::make_tokenizer(contents)
|
||||
.flatten()
|
||||
|
||||
@@ -92,7 +92,7 @@ fn apply_fixes<'a>(
|
||||
}
|
||||
|
||||
// Add the remaining content.
|
||||
let slice = locator.slice_source_code_at(last_pos);
|
||||
let slice = locator.slice_source_code_at(&last_pos);
|
||||
output.append(&slice);
|
||||
|
||||
(Cow::from(output.finish()), num_fixed)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//! Lint rules based on import analysis.
|
||||
|
||||
use nohash_hasher::IntSet;
|
||||
use rustpython_parser::ast::Suite;
|
||||
|
||||
use crate::ast::visitor::Visitor;
|
||||
use crate::checks::Check;
|
||||
use crate::directives::IsortDirectives;
|
||||
use crate::isort;
|
||||
use crate::isort::track::ImportTracker;
|
||||
use crate::settings::Settings;
|
||||
@@ -18,7 +18,7 @@ fn check_import_blocks(
|
||||
) -> Vec<Check> {
|
||||
let mut checks = vec![];
|
||||
for block in tracker.into_iter() {
|
||||
if !block.is_empty() {
|
||||
if !block.imports.is_empty() {
|
||||
if let Some(check) = isort::plugins::check_imports(&block, locator, settings, autofix) {
|
||||
checks.push(check);
|
||||
}
|
||||
@@ -30,11 +30,11 @@ fn check_import_blocks(
|
||||
pub fn check_imports(
|
||||
python_ast: &Suite,
|
||||
locator: &SourceCodeLocator,
|
||||
exclusions: &IntSet<usize>,
|
||||
directives: &IsortDirectives,
|
||||
settings: &Settings,
|
||||
autofix: bool,
|
||||
) -> Vec<Check> {
|
||||
let mut tracker = ImportTracker::new(exclusions);
|
||||
let mut tracker = ImportTracker::new(directives);
|
||||
for stmt in python_ast {
|
||||
tracker.visit_stmt(stmt);
|
||||
}
|
||||
|
||||
@@ -30,9 +30,15 @@ impl Flags {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct IsortDirectives {
|
||||
pub exclusions: IntSet<usize>,
|
||||
pub splits: Vec<usize>,
|
||||
}
|
||||
|
||||
pub struct Directives {
|
||||
pub noqa_line_for: IntMap<usize, usize>,
|
||||
pub isort_exclusions: IntSet<usize>,
|
||||
pub isort: IsortDirectives,
|
||||
}
|
||||
|
||||
pub fn extract_directives(
|
||||
@@ -46,10 +52,10 @@ pub fn extract_directives(
|
||||
} else {
|
||||
IntMap::default()
|
||||
},
|
||||
isort_exclusions: if flags.contains(Flags::ISORT) {
|
||||
extract_isort_exclusions(lxr, locator)
|
||||
isort: if flags.contains(Flags::ISORT) {
|
||||
extract_isort_directives(lxr, locator)
|
||||
} else {
|
||||
IntSet::default()
|
||||
IsortDirectives::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -73,17 +79,32 @@ pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap<usize, usize> {
|
||||
}
|
||||
|
||||
/// Extract a set of lines over which to disable isort.
|
||||
pub fn extract_isort_exclusions(lxr: &[LexResult], locator: &SourceCodeLocator) -> IntSet<usize> {
|
||||
pub fn extract_isort_directives(lxr: &[LexResult], locator: &SourceCodeLocator) -> IsortDirectives {
|
||||
let mut exclusions: IntSet<usize> = IntSet::default();
|
||||
let mut splits: Vec<usize> = Vec::default();
|
||||
let mut skip_file: bool = false;
|
||||
let mut off: Option<Location> = None;
|
||||
let mut last: Option<Location> = None;
|
||||
for &(start, ref tok, end) in lxr.iter().flatten() {
|
||||
// TODO(charlie): Modify RustPython to include the comment text in the token.
|
||||
last = Some(end);
|
||||
|
||||
// No need to keep processing, but we do need to determine the last token.
|
||||
if skip_file {
|
||||
continue;
|
||||
}
|
||||
|
||||
if matches!(tok, Tok::Comment) {
|
||||
// TODO(charlie): Modify RustPython to include the comment text in the token.
|
||||
let comment_text = locator.slice_source_code_range(&Range {
|
||||
location: start,
|
||||
end_location: end,
|
||||
});
|
||||
if off.is_some() {
|
||||
|
||||
if comment_text == "# isort: split" {
|
||||
splits.push(start.row());
|
||||
} else if comment_text == "# isort: skip_file" {
|
||||
skip_file = true;
|
||||
} else if off.is_some() {
|
||||
if comment_text == "# isort: on" {
|
||||
if let Some(start) = off {
|
||||
for row in start.row() + 1..=end.row() {
|
||||
@@ -93,43 +114,50 @@ pub fn extract_isort_exclusions(lxr: &[LexResult], locator: &SourceCodeLocator)
|
||||
off = None;
|
||||
}
|
||||
} else {
|
||||
if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") {
|
||||
if comment_text.contains("isort: skip") {
|
||||
exclusions.insert(start.row());
|
||||
} else if comment_text == "# isort: off" {
|
||||
off = Some(start);
|
||||
}
|
||||
}
|
||||
} else if matches!(tok, Tok::EndOfFile) {
|
||||
if let Some(start) = off {
|
||||
for row in start.row() + 1..=end.row() {
|
||||
exclusions.insert(row);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
exclusions
|
||||
if skip_file {
|
||||
// Enforce `isort: skip_file`.
|
||||
if let Some(end) = last {
|
||||
for row in 1..=end.row() {
|
||||
exclusions.insert(row);
|
||||
}
|
||||
}
|
||||
} else if let Some(start) = off {
|
||||
// Enforce unterminated `isort: off`.
|
||||
if let Some(end) = last {
|
||||
for row in start.row() + 1..=end.row() {
|
||||
exclusions.insert(row);
|
||||
}
|
||||
}
|
||||
}
|
||||
IsortDirectives { exclusions, splits }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use nohash_hasher::IntMap;
|
||||
use nohash_hasher::{IntMap, IntSet};
|
||||
use rustpython_parser::lexer;
|
||||
use rustpython_parser::lexer::LexResult;
|
||||
|
||||
use crate::directives::extract_noqa_line_for;
|
||||
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
|
||||
use crate::SourceCodeLocator;
|
||||
|
||||
#[test]
|
||||
fn extraction() {
|
||||
let empty: IntMap<usize, usize> = IntMap::default();
|
||||
|
||||
fn noqa_extraction() {
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
||||
"x = 1
|
||||
y = 2
|
||||
z = x + 1",
|
||||
)
|
||||
.collect();
|
||||
assert_eq!(extract_noqa_line_for(&lxr), empty);
|
||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
||||
"
|
||||
@@ -138,7 +166,7 @@ y = 2
|
||||
z = x + 1",
|
||||
)
|
||||
.collect();
|
||||
assert_eq!(extract_noqa_line_for(&lxr), empty);
|
||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
||||
"x = 1
|
||||
@@ -147,7 +175,7 @@ z = x + 1
|
||||
",
|
||||
)
|
||||
.collect();
|
||||
assert_eq!(extract_noqa_line_for(&lxr), empty);
|
||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
||||
"x = 1
|
||||
@@ -157,7 +185,7 @@ z = x + 1
|
||||
",
|
||||
)
|
||||
.collect();
|
||||
assert_eq!(extract_noqa_line_for(&lxr), empty);
|
||||
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
|
||||
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(
|
||||
"x = '''abc
|
||||
@@ -200,4 +228,106 @@ z = x + 1",
|
||||
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn isort_exclusions() {
|
||||
let contents = "x = 1
|
||||
y = 2
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).exclusions,
|
||||
IntSet::default()
|
||||
);
|
||||
|
||||
let contents = "# isort: off
|
||||
x = 1
|
||||
y = 2
|
||||
# isort: on
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).exclusions,
|
||||
IntSet::from_iter([2, 3, 4])
|
||||
);
|
||||
|
||||
let contents = "# isort: off
|
||||
x = 1
|
||||
# isort: off
|
||||
y = 2
|
||||
# isort: on
|
||||
z = x + 1
|
||||
# isort: on";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).exclusions,
|
||||
IntSet::from_iter([2, 3, 4, 5])
|
||||
);
|
||||
|
||||
let contents = "# isort: off
|
||||
x = 1
|
||||
y = 2
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).exclusions,
|
||||
IntSet::from_iter([2, 3, 4])
|
||||
);
|
||||
|
||||
let contents = "# isort: skip_file
|
||||
x = 1
|
||||
y = 2
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).exclusions,
|
||||
IntSet::from_iter([1, 2, 3, 4])
|
||||
);
|
||||
|
||||
let contents = "# isort: off
|
||||
x = 1
|
||||
# isort: on
|
||||
y = 2
|
||||
# isort: skip_file
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).exclusions,
|
||||
IntSet::from_iter([1, 2, 3, 4, 5, 6])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn isort_splits() {
|
||||
let contents = "x = 1
|
||||
y = 2
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(
|
||||
extract_isort_directives(&lxr, &locator).splits,
|
||||
Vec::<usize>::new()
|
||||
);
|
||||
|
||||
let contents = "x = 1
|
||||
y = 2
|
||||
# isort: split
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(extract_isort_directives(&lxr, &locator).splits, vec![3]);
|
||||
|
||||
let contents = "x = 1
|
||||
y = 2 # isort: split
|
||||
z = x + 1";
|
||||
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
|
||||
let locator = SourceCodeLocator::new(contents);
|
||||
assert_eq!(extract_isort_directives(&lxr, &locator).splits, vec![2]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ use rustpython_ast::{Stmt, StmtKind};
|
||||
use crate::isort::categorize::{categorize, ImportType};
|
||||
use crate::isort::comments::Comment;
|
||||
use crate::isort::sorting::{member_key, module_key};
|
||||
use crate::isort::track::{Block, Trailer};
|
||||
use crate::isort::types::{
|
||||
AliasData, CommentSet, ImportBlock, ImportFromData, Importable, OrderedImportBlock,
|
||||
};
|
||||
@@ -191,7 +192,18 @@ fn normalize_imports(imports: Vec<AnnotatedImport>, combine_as_imports: bool) ->
|
||||
} => {
|
||||
// Associate the comments with the first alias (best effort).
|
||||
if let Some(alias) = names.first() {
|
||||
if alias.asname.is_none() || combine_as_imports {
|
||||
if alias.name == "*" {
|
||||
let entry = block
|
||||
.import_from_star
|
||||
.entry(ImportFromData { module, level })
|
||||
.or_default();
|
||||
for comment in atop {
|
||||
entry.atop.push(comment.value);
|
||||
}
|
||||
for comment in inline {
|
||||
entry.inline.push(comment.value);
|
||||
}
|
||||
} else if alias.asname.is_none() || combine_as_imports {
|
||||
let entry = &mut block
|
||||
.import_from
|
||||
.entry(ImportFromData { module, level })
|
||||
@@ -225,7 +237,18 @@ fn normalize_imports(imports: Vec<AnnotatedImport>, combine_as_imports: bool) ->
|
||||
|
||||
// Create an entry for every alias.
|
||||
for alias in names {
|
||||
if alias.asname.is_none() || combine_as_imports {
|
||||
if alias.name == "*" {
|
||||
let entry = block
|
||||
.import_from_star
|
||||
.entry(ImportFromData { module, level })
|
||||
.or_default();
|
||||
for comment in alias.atop {
|
||||
entry.atop.push(comment.value);
|
||||
}
|
||||
for comment in alias.inline {
|
||||
entry.inline.push(comment.value);
|
||||
}
|
||||
} else if alias.asname.is_none() || combine_as_imports {
|
||||
let entry = block
|
||||
.import_from
|
||||
.entry(ImportFromData { module, level })
|
||||
@@ -323,6 +346,22 @@ fn categorize_imports<'a>(
|
||||
.import_from_as
|
||||
.insert((import_from, alias), comments);
|
||||
}
|
||||
// Categorize `StmtKind::ImportFrom` (with star).
|
||||
for (import_from, comments) in block.import_from_star {
|
||||
let classification = categorize(
|
||||
&import_from.module_base(),
|
||||
import_from.level,
|
||||
src,
|
||||
known_first_party,
|
||||
known_third_party,
|
||||
extra_standard_library,
|
||||
);
|
||||
block_by_type
|
||||
.entry(classification)
|
||||
.or_default()
|
||||
.import_from_star
|
||||
.insert(import_from, comments);
|
||||
}
|
||||
block_by_type
|
||||
}
|
||||
|
||||
@@ -367,6 +406,33 @@ fn sort_imports(block: ImportBlock) -> OrderedImportBlock {
|
||||
)
|
||||
}),
|
||||
)
|
||||
.chain(
|
||||
// Include all star imports.
|
||||
block
|
||||
.import_from_star
|
||||
.into_iter()
|
||||
.map(|(import_from, comments)| {
|
||||
(
|
||||
import_from,
|
||||
(
|
||||
CommentSet {
|
||||
atop: comments.atop,
|
||||
inline: vec![],
|
||||
},
|
||||
FxHashMap::from_iter([(
|
||||
AliasData {
|
||||
name: "*",
|
||||
asname: None,
|
||||
},
|
||||
CommentSet {
|
||||
atop: vec![],
|
||||
inline: comments.inline,
|
||||
},
|
||||
)]),
|
||||
),
|
||||
)
|
||||
}),
|
||||
)
|
||||
.map(|(import_from, (comments, aliases))| {
|
||||
// Within each `StmtKind::ImportFrom`, sort the members.
|
||||
(
|
||||
@@ -399,7 +465,7 @@ fn sort_imports(block: ImportBlock) -> OrderedImportBlock {
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn format_imports(
|
||||
block: &[&Stmt],
|
||||
block: &Block,
|
||||
comments: Vec<Comment>,
|
||||
line_length: usize,
|
||||
src: &[PathBuf],
|
||||
@@ -409,7 +475,8 @@ pub fn format_imports(
|
||||
combine_as_imports: bool,
|
||||
force_wrap_aliases: bool,
|
||||
) -> String {
|
||||
let block = annotate_imports(block, comments);
|
||||
let trailer = &block.trailer;
|
||||
let block = annotate_imports(&block.imports, comments);
|
||||
|
||||
// Normalize imports (i.e., deduplicate, aggregate `from` imports).
|
||||
let block = normalize_imports(block, combine_as_imports);
|
||||
@@ -458,6 +525,16 @@ pub fn format_imports(
|
||||
is_first_statement = false;
|
||||
}
|
||||
}
|
||||
match trailer {
|
||||
None => {}
|
||||
Some(Trailer::Sibling) => {
|
||||
output.append("\n");
|
||||
}
|
||||
Some(Trailer::FunctionDef | Trailer::ClassDef) => {
|
||||
output.append("\n");
|
||||
output.append("\n");
|
||||
}
|
||||
}
|
||||
output.finish().to_string()
|
||||
}
|
||||
|
||||
@@ -481,11 +558,13 @@ mod tests {
|
||||
#[test_case(Path::new("fit_line_length_comment.py"))]
|
||||
#[test_case(Path::new("force_wrap_aliases.py"))]
|
||||
#[test_case(Path::new("import_from_after_import.py"))]
|
||||
#[test_case(Path::new("insert_empty_lines.py"))]
|
||||
#[test_case(Path::new("leading_prefix.py"))]
|
||||
#[test_case(Path::new("no_reorder_within_section.py"))]
|
||||
#[test_case(Path::new("order_by_type.py"))]
|
||||
#[test_case(Path::new("order_relative_imports_by_level.py"))]
|
||||
#[test_case(Path::new("preserve_comment_order.py"))]
|
||||
#[test_case(Path::new("preserve_import_star.py"))]
|
||||
#[test_case(Path::new("preserve_indentation.py"))]
|
||||
#[test_case(Path::new("reorder_within_section.py"))]
|
||||
#[test_case(Path::new("separate_first_party_imports.py"))]
|
||||
@@ -493,7 +572,9 @@ mod tests {
|
||||
#[test_case(Path::new("separate_local_folder_imports.py"))]
|
||||
#[test_case(Path::new("separate_third_party_imports.py"))]
|
||||
#[test_case(Path::new("skip.py"))]
|
||||
#[test_case(Path::new("skip_file.py"))]
|
||||
#[test_case(Path::new("sort_similar_imports.py"))]
|
||||
#[test_case(Path::new("split.py"))]
|
||||
#[test_case(Path::new("trailing_suffix.py"))]
|
||||
#[test_case(Path::new("type_comments.py"))]
|
||||
fn default(path: &Path) -> Result<()> {
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use rustpython_ast::{Location, Stmt};
|
||||
use textwrap::{dedent, indent};
|
||||
|
||||
use crate::ast::helpers::{match_leading_content, match_trailing_content};
|
||||
use crate::ast::helpers::{count_trailing_lines, match_leading_content, match_trailing_content};
|
||||
use crate::ast::types::Range;
|
||||
use crate::ast::whitespace::leading_space;
|
||||
use crate::autofix::Fix;
|
||||
use crate::checks::CheckKind;
|
||||
use crate::isort::track::Block;
|
||||
use crate::isort::{comments, format_imports};
|
||||
use crate::{Check, Settings, SourceCodeLocator};
|
||||
|
||||
@@ -30,13 +31,13 @@ fn extract_indentation(body: &[&Stmt], locator: &SourceCodeLocator) -> String {
|
||||
|
||||
/// I001
|
||||
pub fn check_imports(
|
||||
body: &[&Stmt],
|
||||
block: &Block,
|
||||
locator: &SourceCodeLocator,
|
||||
settings: &Settings,
|
||||
autofix: bool,
|
||||
) -> Option<Check> {
|
||||
let range = extract_range(body);
|
||||
let indentation = extract_indentation(body, locator);
|
||||
let range = extract_range(&block.imports);
|
||||
let indentation = extract_indentation(&block.imports, locator);
|
||||
|
||||
// Extract comments. Take care to grab any inline comments from the last line.
|
||||
let comments = comments::collect_comments(
|
||||
@@ -48,12 +49,13 @@ pub fn check_imports(
|
||||
);
|
||||
|
||||
// Special-cases: there's leading or trailing content in the import block.
|
||||
let has_leading_content = match_leading_content(body.first().unwrap(), locator);
|
||||
let has_trailing_content = match_trailing_content(body.last().unwrap(), locator);
|
||||
let has_leading_content = match_leading_content(block.imports.first().unwrap(), locator);
|
||||
let has_trailing_content = match_trailing_content(block.imports.last().unwrap(), locator);
|
||||
let num_trailing_lines = count_trailing_lines(block.imports.last().unwrap(), locator);
|
||||
|
||||
// Generate the sorted import block.
|
||||
let expected = format_imports(
|
||||
body,
|
||||
block,
|
||||
comments,
|
||||
settings.line_length - indentation.len(),
|
||||
&settings.src,
|
||||
@@ -81,7 +83,7 @@ pub fn check_imports(
|
||||
Location::new(range.location.row(), 0)
|
||||
},
|
||||
// TODO(charlie): Preserve trailing suffixes. Right now, we strip them.
|
||||
Location::new(range.end_location.row() + 1, 0),
|
||||
Location::new(range.end_location.row() + 1 + num_trailing_lines, 0),
|
||||
));
|
||||
}
|
||||
Some(check)
|
||||
@@ -89,7 +91,7 @@ pub fn check_imports(
|
||||
// Expand the span the entire range, including leading and trailing space.
|
||||
let range = Range {
|
||||
location: Location::new(range.location.row(), 0),
|
||||
end_location: Location::new(range.end_location.row() + 1, 0),
|
||||
end_location: Location::new(range.end_location.row() + 1 + num_trailing_lines, 0),
|
||||
};
|
||||
let actual = dedent(&locator.slice_source_code_range(&range));
|
||||
if actual == expected {
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
---
|
||||
source: src/isort/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
row: 1
|
||||
column: 0
|
||||
end_location:
|
||||
row: 3
|
||||
column: 0
|
||||
fix:
|
||||
content: "import a\nimport b\n\n"
|
||||
location:
|
||||
row: 1
|
||||
column: 0
|
||||
end_location:
|
||||
row: 3
|
||||
column: 0
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
row: 4
|
||||
column: 0
|
||||
end_location:
|
||||
row: 6
|
||||
column: 0
|
||||
fix:
|
||||
content: "import os\nimport sys\n\n\n"
|
||||
location:
|
||||
row: 4
|
||||
column: 0
|
||||
end_location:
|
||||
row: 6
|
||||
column: 0
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
row: 14
|
||||
column: 0
|
||||
end_location:
|
||||
row: 16
|
||||
column: 0
|
||||
fix:
|
||||
content: "import os\nimport sys\n\n"
|
||||
location:
|
||||
row: 14
|
||||
column: 0
|
||||
end_location:
|
||||
row: 16
|
||||
column: 0
|
||||
|
||||
@@ -10,12 +10,12 @@ expression: checks
|
||||
row: 2
|
||||
column: 9
|
||||
fix:
|
||||
content: "\nimport os\nimport sys\n"
|
||||
content: "\nimport os\nimport sys\n\n"
|
||||
location:
|
||||
row: 1
|
||||
column: 7
|
||||
end_location:
|
||||
row: 3
|
||||
row: 4
|
||||
column: 0
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
source: src/isort/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
row: 1
|
||||
column: 0
|
||||
end_location:
|
||||
row: 7
|
||||
column: 0
|
||||
fix:
|
||||
content: "# Above\nfrom some_module import * # Aside\n\n# Above\nfrom some_module import some_class # Aside\nfrom some_other_module import *\nfrom some_other_module import some_class\n"
|
||||
location:
|
||||
row: 1
|
||||
column: 0
|
||||
end_location:
|
||||
row: 7
|
||||
column: 0
|
||||
|
||||
@@ -2,6 +2,21 @@
|
||||
source: src/isort/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
row: 7
|
||||
column: 0
|
||||
end_location:
|
||||
row: 8
|
||||
column: 0
|
||||
fix:
|
||||
content: "import sys\n\n"
|
||||
location:
|
||||
row: 7
|
||||
column: 0
|
||||
end_location:
|
||||
row: 8
|
||||
column: 0
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
row: 9
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
---
|
||||
source: src/isort/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
[]
|
||||
|
||||
6
src/isort/snapshots/ruff__isort__tests__split.py.snap
Normal file
6
src/isort/snapshots/ruff__isort__tests__split.py.snap
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
source: src/isort/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
[]
|
||||
|
||||
@@ -10,12 +10,12 @@ expression: checks
|
||||
row: 2
|
||||
column: 9
|
||||
fix:
|
||||
content: "import os\nimport sys\n"
|
||||
content: "import os\nimport sys\n\n"
|
||||
location:
|
||||
row: 1
|
||||
column: 0
|
||||
end_location:
|
||||
row: 3
|
||||
row: 4
|
||||
column: 0
|
||||
- kind: UnsortedImports
|
||||
location:
|
||||
@@ -25,7 +25,7 @@ expression: checks
|
||||
row: 6
|
||||
column: 13
|
||||
fix:
|
||||
content: " import os\n import sys\n"
|
||||
content: " import os\n import sys\n\n"
|
||||
location:
|
||||
row: 5
|
||||
column: 0
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use nohash_hasher::IntSet;
|
||||
use rustpython_ast::{
|
||||
Alias, Arg, Arguments, Boolop, Cmpop, Comprehension, Constant, Excepthandler,
|
||||
ExcepthandlerKind, Expr, ExprContext, Keyword, MatchCase, Operator, Pattern, Stmt, StmtKind,
|
||||
@@ -6,34 +5,49 @@ use rustpython_ast::{
|
||||
};
|
||||
|
||||
use crate::ast::visitor::Visitor;
|
||||
use crate::directives::IsortDirectives;
|
||||
|
||||
pub enum Trailer {
|
||||
Sibling,
|
||||
ClassDef,
|
||||
FunctionDef,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Block<'a> {
|
||||
pub imports: Vec<&'a Stmt>,
|
||||
pub trailer: Option<Trailer>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ImportTracker<'a> {
|
||||
exclusions: &'a IntSet<usize>,
|
||||
blocks: Vec<Vec<&'a Stmt>>,
|
||||
blocks: Vec<Block<'a>>,
|
||||
directives: &'a IsortDirectives,
|
||||
split_index: usize,
|
||||
}
|
||||
|
||||
impl<'a> ImportTracker<'a> {
|
||||
pub fn new(exclusions: &'a IntSet<usize>) -> Self {
|
||||
pub fn new(directives: &'a IsortDirectives) -> Self {
|
||||
Self {
|
||||
exclusions,
|
||||
blocks: vec![vec![]],
|
||||
directives,
|
||||
blocks: vec![Block::default()],
|
||||
split_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn track_import(&mut self, stmt: &'a Stmt) {
|
||||
let index = self.blocks.len() - 1;
|
||||
self.blocks[index].push(stmt);
|
||||
self.blocks[index].imports.push(stmt);
|
||||
}
|
||||
|
||||
fn finalize(&mut self) {
|
||||
fn finalize(&mut self, trailer: Option<Trailer>) {
|
||||
let index = self.blocks.len() - 1;
|
||||
if !self.blocks[index].is_empty() {
|
||||
self.blocks.push(vec![]);
|
||||
if !self.blocks[index].imports.is_empty() {
|
||||
self.blocks[index].trailer = trailer;
|
||||
self.blocks.push(Block::default());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_iter(self) -> impl IntoIterator<Item = Vec<&'a Stmt>> {
|
||||
pub fn into_iter(self) -> impl IntoIterator<Item = Block<'a>> {
|
||||
self.blocks.into_iter()
|
||||
}
|
||||
}
|
||||
@@ -43,15 +57,37 @@ where
|
||||
'b: 'a,
|
||||
{
|
||||
fn visit_stmt(&mut self, stmt: &'b Stmt) {
|
||||
// Track manual splits.
|
||||
while self.split_index < self.directives.splits.len() {
|
||||
if stmt.location.row() >= self.directives.splits[self.split_index] {
|
||||
self.finalize(Some(match &stmt.node {
|
||||
StmtKind::FunctionDef { .. } | StmtKind::AsyncFunctionDef { .. } => {
|
||||
Trailer::FunctionDef
|
||||
}
|
||||
StmtKind::ClassDef { .. } => Trailer::ClassDef,
|
||||
_ => Trailer::Sibling,
|
||||
}));
|
||||
self.split_index += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Track imports.
|
||||
if matches!(
|
||||
stmt.node,
|
||||
StmtKind::Import { .. } | StmtKind::ImportFrom { .. }
|
||||
) && !self.exclusions.contains(&stmt.location.row())
|
||||
) && !self.directives.exclusions.contains(&stmt.location.row())
|
||||
{
|
||||
self.track_import(stmt);
|
||||
} else {
|
||||
self.finalize();
|
||||
self.finalize(Some(match &stmt.node {
|
||||
StmtKind::FunctionDef { .. } | StmtKind::AsyncFunctionDef { .. } => {
|
||||
Trailer::FunctionDef
|
||||
}
|
||||
StmtKind::ClassDef { .. } => Trailer::ClassDef,
|
||||
_ => Trailer::Sibling,
|
||||
}));
|
||||
}
|
||||
|
||||
// Track scope.
|
||||
@@ -60,75 +96,75 @@ where
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::AsyncFunctionDef { body, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::ClassDef { body, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::For { body, orelse, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
|
||||
for stmt in orelse {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::AsyncFor { body, orelse, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
|
||||
for stmt in orelse {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::While { body, orelse, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
|
||||
for stmt in orelse {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::If { body, orelse, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
|
||||
for stmt in orelse {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::With { body, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::AsyncWith { body, .. } => {
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
StmtKind::Match { cases, .. } => {
|
||||
for match_case in cases {
|
||||
@@ -148,17 +184,17 @@ where
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
|
||||
for stmt in orelse {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
|
||||
for stmt in finalbody {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -187,7 +223,7 @@ where
|
||||
for stmt in body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
|
||||
fn visit_arguments(&mut self, _: &'b Arguments) {}
|
||||
@@ -204,7 +240,7 @@ where
|
||||
for stmt in &match_case.body {
|
||||
self.visit_stmt(stmt);
|
||||
}
|
||||
self.finalize();
|
||||
self.finalize(None);
|
||||
}
|
||||
|
||||
fn visit_pattern(&mut self, _: &'b Pattern) {}
|
||||
|
||||
@@ -59,6 +59,9 @@ pub struct ImportBlock<'a> {
|
||||
// Set of (module, level, name, asname), used to track re-exported 'from' imports.
|
||||
// Ex) `from module import member as member`
|
||||
pub import_from_as: FxHashMap<(ImportFromData<'a>, AliasData<'a>), CommentSet<'a>>,
|
||||
// Map from (module, level) to `AliasData`, used to track star imports.
|
||||
// Ex) `from module import *`
|
||||
pub import_from_star: FxHashMap<ImportFromData<'a>, CommentSet<'a>>,
|
||||
}
|
||||
|
||||
type AliasDataWithComments<'a> = (AliasData<'a>, CommentSet<'a>);
|
||||
|
||||
@@ -89,7 +89,7 @@ pub(crate) fn check_path(
|
||||
checks.extend(check_imports(
|
||||
&python_ast,
|
||||
locator,
|
||||
&directives.isort_exclusions,
|
||||
&directives.isort,
|
||||
settings,
|
||||
autofix,
|
||||
));
|
||||
@@ -193,7 +193,7 @@ pub fn add_noqa_to_path(path: &Path, settings: &Settings) -> Result<usize> {
|
||||
&locator,
|
||||
&Directives {
|
||||
noqa_line_for: IntMap::default(),
|
||||
isort_exclusions: directives.isort_exclusions,
|
||||
isort: directives.isort,
|
||||
},
|
||||
settings,
|
||||
false,
|
||||
|
||||
@@ -46,14 +46,13 @@ impl<'a> Printer<'a> {
|
||||
|
||||
fn pre_text(&self, diagnostics: &Diagnostics) {
|
||||
if self.log_level >= &LogLevel::Default {
|
||||
if diagnostics.fixed > 0 {
|
||||
println!(
|
||||
"Found {} error(s) ({} fixed).",
|
||||
diagnostics.messages.len(),
|
||||
diagnostics.fixed,
|
||||
);
|
||||
} else if !diagnostics.messages.is_empty() {
|
||||
println!("Found {} error(s).", diagnostics.messages.len());
|
||||
let fixed = diagnostics.fixed;
|
||||
let remaining = diagnostics.messages.len();
|
||||
let total = fixed + remaining;
|
||||
if fixed > 0 {
|
||||
println!("Found {total} error(s) ({fixed} fixed, {remaining} remaining).");
|
||||
} else if remaining > 0 {
|
||||
println!("Found {remaining} error(s).");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub fn remove_class_def_base(
|
||||
bases: &[Expr],
|
||||
keywords: &[Keyword],
|
||||
) -> Option<Fix> {
|
||||
let contents = locator.slice_source_code_at(stmt_at);
|
||||
let contents = locator.slice_source_code_at(&stmt_at);
|
||||
|
||||
// Case 1: `object` is the only base.
|
||||
if bases.len() == 1 && keywords.is_empty() {
|
||||
|
||||
@@ -1623,41 +1623,45 @@ pub fn ambiguous_unicode_character(
|
||||
for current_char in text.chars() {
|
||||
// Search for confusing characters.
|
||||
if let Some(representant) = CONFUSABLES.get(&(current_char as u32)) {
|
||||
if let Some(representant) = char::from_u32(*representant) {
|
||||
let col = if row_offset == 0 {
|
||||
start.column() + col_offset
|
||||
} else {
|
||||
col_offset
|
||||
};
|
||||
let location = Location::new(start.row() + row_offset, col);
|
||||
let end_location = Location::new(location.row(), location.column() + 1);
|
||||
let mut check = Check::new(
|
||||
match context {
|
||||
Context::String => {
|
||||
CheckKind::AmbiguousUnicodeCharacterString(current_char, representant)
|
||||
}
|
||||
Context::Docstring => CheckKind::AmbiguousUnicodeCharacterDocstring(
|
||||
current_char,
|
||||
representant,
|
||||
),
|
||||
Context::Comment => {
|
||||
CheckKind::AmbiguousUnicodeCharacterComment(current_char, representant)
|
||||
}
|
||||
},
|
||||
Range {
|
||||
location,
|
||||
end_location,
|
||||
},
|
||||
);
|
||||
if settings.enabled.contains(check.kind.code()) {
|
||||
if autofix && settings.fixable.contains(check.kind.code()) {
|
||||
check.amend(Fix::replacement(
|
||||
representant.to_string(),
|
||||
if !settings.allowed_confusables.contains(¤t_char) {
|
||||
if let Some(representant) = char::from_u32(*representant) {
|
||||
let col = if row_offset == 0 {
|
||||
start.column() + col_offset
|
||||
} else {
|
||||
col_offset
|
||||
};
|
||||
let location = Location::new(start.row() + row_offset, col);
|
||||
let end_location = Location::new(location.row(), location.column() + 1);
|
||||
let mut check = Check::new(
|
||||
match context {
|
||||
Context::String => CheckKind::AmbiguousUnicodeCharacterString(
|
||||
current_char,
|
||||
representant,
|
||||
),
|
||||
Context::Docstring => CheckKind::AmbiguousUnicodeCharacterDocstring(
|
||||
current_char,
|
||||
representant,
|
||||
),
|
||||
Context::Comment => CheckKind::AmbiguousUnicodeCharacterComment(
|
||||
current_char,
|
||||
representant,
|
||||
),
|
||||
},
|
||||
Range {
|
||||
location,
|
||||
end_location,
|
||||
));
|
||||
},
|
||||
);
|
||||
if settings.enabled.contains(check.kind.code()) {
|
||||
if autofix && settings.fixable.contains(check.kind.code()) {
|
||||
check.amend(Fix::replacement(
|
||||
representant.to_string(),
|
||||
location,
|
||||
end_location,
|
||||
));
|
||||
}
|
||||
checks.push(check);
|
||||
}
|
||||
checks.push(check);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,30 +4,31 @@ pub mod checks;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::convert::AsRef;
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use test_case::test_case;
|
||||
use rustc_hash::FxHashSet;
|
||||
|
||||
use crate::checks::CheckCode;
|
||||
use crate::linter::test_path;
|
||||
use crate::settings;
|
||||
|
||||
#[test_case(CheckCode::RUF001, Path::new("RUF001.py"); "RUF001")]
|
||||
#[test_case(CheckCode::RUF002, Path::new("RUF002.py"); "RUF002")]
|
||||
#[test_case(CheckCode::RUF003, Path::new("RUF003.py"); "RUF003")]
|
||||
fn checks(check_code: CheckCode, path: &Path) -> Result<()> {
|
||||
let snapshot = format!("{}_{}", check_code.as_ref(), path.to_string_lossy());
|
||||
#[test]
|
||||
fn confusables() -> Result<()> {
|
||||
let mut checks = test_path(
|
||||
Path::new("./resources/test/fixtures/ruff")
|
||||
.join(path)
|
||||
.as_path(),
|
||||
&settings::Settings::for_rule(check_code),
|
||||
Path::new("./resources/test/fixtures/ruff/confusables.py"),
|
||||
&settings::Settings {
|
||||
allowed_confusables: FxHashSet::from_iter(['−', 'ρ', '∗']),
|
||||
..settings::Settings::for_rules(vec![
|
||||
CheckCode::RUF001,
|
||||
CheckCode::RUF002,
|
||||
CheckCode::RUF003,
|
||||
])
|
||||
},
|
||||
true,
|
||||
)?;
|
||||
checks.sort_by_key(|check| check.location);
|
||||
insta::assert_yaml_snapshot!(snapshot, checks);
|
||||
insta::assert_yaml_snapshot!(checks);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
source: src/rules/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind:
|
||||
AmbiguousUnicodeCharacterString:
|
||||
- 𝐁
|
||||
- B
|
||||
location:
|
||||
row: 1
|
||||
column: 5
|
||||
end_location:
|
||||
row: 1
|
||||
column: 6
|
||||
fix:
|
||||
content: B
|
||||
location:
|
||||
row: 1
|
||||
column: 5
|
||||
end_location:
|
||||
row: 1
|
||||
column: 6
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
source: src/rules/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind:
|
||||
AmbiguousUnicodeCharacterDocstring:
|
||||
- )
|
||||
- )
|
||||
location:
|
||||
row: 5
|
||||
column: 55
|
||||
end_location:
|
||||
row: 5
|
||||
column: 56
|
||||
fix:
|
||||
content: )
|
||||
location:
|
||||
row: 5
|
||||
column: 55
|
||||
end_location:
|
||||
row: 5
|
||||
column: 56
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
source: src/rules/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind:
|
||||
AmbiguousUnicodeCharacterComment:
|
||||
- ᜵
|
||||
- /
|
||||
location:
|
||||
row: 6
|
||||
column: 61
|
||||
end_location:
|
||||
row: 6
|
||||
column: 62
|
||||
fix:
|
||||
content: /
|
||||
location:
|
||||
row: 6
|
||||
column: 61
|
||||
end_location:
|
||||
row: 6
|
||||
column: 62
|
||||
|
||||
59
src/rules/snapshots/ruff__rules__tests__confusables.snap
Normal file
59
src/rules/snapshots/ruff__rules__tests__confusables.snap
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
source: src/rules/mod.rs
|
||||
expression: checks
|
||||
---
|
||||
- kind:
|
||||
AmbiguousUnicodeCharacterString:
|
||||
- 𝐁
|
||||
- B
|
||||
location:
|
||||
row: 1
|
||||
column: 5
|
||||
end_location:
|
||||
row: 1
|
||||
column: 6
|
||||
fix:
|
||||
content: B
|
||||
location:
|
||||
row: 1
|
||||
column: 5
|
||||
end_location:
|
||||
row: 1
|
||||
column: 6
|
||||
- kind:
|
||||
AmbiguousUnicodeCharacterDocstring:
|
||||
- )
|
||||
- )
|
||||
location:
|
||||
row: 6
|
||||
column: 55
|
||||
end_location:
|
||||
row: 6
|
||||
column: 56
|
||||
fix:
|
||||
content: )
|
||||
location:
|
||||
row: 6
|
||||
column: 55
|
||||
end_location:
|
||||
row: 6
|
||||
column: 56
|
||||
- kind:
|
||||
AmbiguousUnicodeCharacterComment:
|
||||
- ᜵
|
||||
- /
|
||||
location:
|
||||
row: 7
|
||||
column: 61
|
||||
end_location:
|
||||
row: 7
|
||||
column: 62
|
||||
fix:
|
||||
content: /
|
||||
location:
|
||||
row: 7
|
||||
column: 61
|
||||
end_location:
|
||||
row: 7
|
||||
column: 62
|
||||
|
||||
@@ -8,6 +8,7 @@ use anyhow::{anyhow, Result};
|
||||
use once_cell::sync::Lazy;
|
||||
use path_absolutize::path_dedot;
|
||||
use regex::Regex;
|
||||
use rustc_hash::FxHashSet;
|
||||
|
||||
use crate::checks_gen::{CheckCodePrefix, CATEGORIES};
|
||||
use crate::settings::pyproject::load_options;
|
||||
@@ -19,6 +20,7 @@ use crate::{
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Configuration {
|
||||
pub allowed_confusables: FxHashSet<char>,
|
||||
pub dummy_variable_rgx: Regex,
|
||||
pub exclude: Vec<FilePattern>,
|
||||
pub extend_exclude: Vec<FilePattern>,
|
||||
@@ -82,9 +84,12 @@ impl Configuration {
|
||||
) -> Result<Self> {
|
||||
let options = load_options(pyproject)?;
|
||||
Ok(Configuration {
|
||||
allowed_confusables: FxHashSet::from_iter(
|
||||
options.allowed_confusables.unwrap_or_default(),
|
||||
),
|
||||
dummy_variable_rgx: match options.dummy_variable_rgx {
|
||||
Some(pattern) => Regex::new(&pattern)
|
||||
.map_err(|e| anyhow!("Invalid dummy-variable-rgx value: {e}"))?,
|
||||
.map_err(|e| anyhow!("Invalid `dummy-variable-rgx` value: {e}"))?,
|
||||
None => DEFAULT_DUMMY_VARIABLE_RGX.clone(),
|
||||
},
|
||||
src: options.src.map_or_else(
|
||||
|
||||
@@ -28,6 +28,7 @@ pub mod types;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Settings {
|
||||
pub allowed_confusables: FxHashSet<char>,
|
||||
pub dummy_variable_rgx: Regex,
|
||||
pub enabled: FxHashSet<CheckCode>,
|
||||
pub exclude: GlobSet,
|
||||
@@ -58,6 +59,7 @@ impl Settings {
|
||||
project_root: Option<&PathBuf>,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
allowed_confusables: config.allowed_confusables,
|
||||
dummy_variable_rgx: config.dummy_variable_rgx,
|
||||
enabled: resolve_codes(
|
||||
&config
|
||||
@@ -95,6 +97,7 @@ impl Settings {
|
||||
|
||||
pub fn for_rule(check_code: CheckCode) -> Self {
|
||||
Self {
|
||||
allowed_confusables: FxHashSet::from_iter([]),
|
||||
dummy_variable_rgx: Regex::new("^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$").unwrap(),
|
||||
enabled: FxHashSet::from_iter([check_code.clone()]),
|
||||
exclude: GlobSet::empty(),
|
||||
@@ -121,6 +124,7 @@ impl Settings {
|
||||
|
||||
pub fn for_rules(check_codes: Vec<CheckCode>) -> Self {
|
||||
Self {
|
||||
allowed_confusables: FxHashSet::from_iter([]),
|
||||
dummy_variable_rgx: Regex::new("^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$").unwrap(),
|
||||
enabled: FxHashSet::from_iter(check_codes.clone()),
|
||||
exclude: GlobSet::empty(),
|
||||
@@ -149,6 +153,9 @@ impl Settings {
|
||||
impl Hash for Settings {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
// Add base properties in alphabetical order.
|
||||
for confusable in &self.allowed_confusables {
|
||||
confusable.hash(state);
|
||||
}
|
||||
self.dummy_variable_rgx.as_str().hash(state);
|
||||
for value in &self.enabled {
|
||||
value.hash(state);
|
||||
|
||||
@@ -13,6 +13,7 @@ use crate::{
|
||||
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
|
||||
pub struct Options {
|
||||
pub allowed_confusables: Option<Vec<char>>,
|
||||
pub dummy_variable_rgx: Option<String>,
|
||||
pub exclude: Option<Vec<String>>,
|
||||
pub extend_exclude: Option<Vec<String>>,
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use common_path::common_path_all;
|
||||
use log::debug;
|
||||
use path_absolutize::Absolutize;
|
||||
@@ -82,7 +82,8 @@ pub fn find_project_root(sources: &[PathBuf]) -> Option<PathBuf> {
|
||||
|
||||
pub fn load_options(pyproject: Option<&PathBuf>) -> Result<Options> {
|
||||
if let Some(pyproject) = pyproject {
|
||||
Ok(parse_pyproject_toml(pyproject)?
|
||||
Ok(parse_pyproject_toml(pyproject)
|
||||
.map_err(|err| anyhow!("Failed to parse `{}`: {}", pyproject.to_string_lossy(), err))?
|
||||
.tool
|
||||
.and_then(|tool| tool.ruff)
|
||||
.unwrap_or_default())
|
||||
@@ -133,6 +134,7 @@ mod tests {
|
||||
pyproject.tool,
|
||||
Some(Tools {
|
||||
ruff: Some(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -174,6 +176,7 @@ line-length = 79
|
||||
pyproject.tool,
|
||||
Some(Tools {
|
||||
ruff: Some(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -215,6 +218,7 @@ exclude = ["foo.py"]
|
||||
pyproject.tool,
|
||||
Some(Tools {
|
||||
ruff: Some(Options {
|
||||
allowed_confusables: None,
|
||||
line_length: None,
|
||||
fix: None,
|
||||
exclude: Some(vec!["foo.py".to_string()]),
|
||||
@@ -256,6 +260,7 @@ select = ["E501"]
|
||||
pyproject.tool,
|
||||
Some(Tools {
|
||||
ruff: Some(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -298,6 +303,7 @@ ignore = ["E501"]
|
||||
pyproject.tool,
|
||||
Some(Tools {
|
||||
ruff: Some(Options {
|
||||
allowed_confusables: None,
|
||||
dummy_variable_rgx: None,
|
||||
exclude: None,
|
||||
extend_exclude: None,
|
||||
@@ -374,6 +380,7 @@ other-attribute = 1
|
||||
assert_eq!(
|
||||
config,
|
||||
Options {
|
||||
allowed_confusables: Some(vec!['−', 'ρ', '∗']),
|
||||
line_length: Some(88),
|
||||
fix: None,
|
||||
exclude: None,
|
||||
|
||||
@@ -25,7 +25,7 @@ impl<'a> SourceCodeLocator<'a> {
|
||||
self.rope.get_or_init(|| Rope::from_str(self.contents))
|
||||
}
|
||||
|
||||
pub fn slice_source_code_at(&self, location: Location) -> Cow<'_, str> {
|
||||
pub fn slice_source_code_at(&self, location: &Location) -> Cow<'_, str> {
|
||||
let rope = self.get_or_init_rope();
|
||||
let offset = rope.line_to_char(location.row() - 1) + location.column();
|
||||
Cow::from(rope.slice(offset..))
|
||||
|
||||
Reference in New Issue
Block a user