Compare commits

...

21 Commits

Author SHA1 Message Date
Charlie Marsh
1c41789c2a Bump version to 0.0.252 (#3142) 2023-02-22 14:50:14 -05:00
Charlie Marsh
2f9de335db Upgrade RustPython to match new flattened exports (#3141) 2023-02-22 19:36:13 +00:00
Ran Benita
ba61bb6a6c Fix isort no-lines-before preceded by an empty section (#3139)
Fix isort no-lines-before preceded by an empty section

Fix #3138.
2023-02-22 14:35:53 -05:00
Charlie Marsh
17ab71ff75 Include match in nested block check (#3137) 2023-02-22 14:32:08 -05:00
Charlie Marsh
4ad4e3e091 Avoid useless-else-on-loop for break within match (#3136) 2023-02-22 19:12:44 +00:00
Florian Best
6ced5122e4 refactor(use-from-import): build fixed variant via AST (#3132) 2023-02-22 13:17:37 -05:00
Marijn Valk
7d55b417f7 add delta-rs to list of users (#3133) 2023-02-22 13:07:58 -05:00
Charlie Marsh
f0e0efc46f Upgrade RustPython to handle trailing commas in map patterns (#3130) 2023-02-22 11:17:13 -05:00
Charlie Marsh
1efa2e07ad Avoid match statement misidentification in token rules (#3129) 2023-02-22 15:44:45 +00:00
Charlie Marsh
df3932f750 Use file-specific quote for C408 (#3128) 2023-02-22 15:26:46 +00:00
Rupert Tombs
817d0b4902 Fix =/== error in ManualDictLookup (#3117) 2023-02-22 15:14:30 +00:00
Micha Reiser
ffd8e958fc chore: Upgrade Rust to 1.67.0 (#3125) 2023-02-22 10:03:17 -05:00
Micha Reiser
ed33b75bad test(ruff_python_formatter): Run all Black tests (#2993)
This PR changes the testing infrastructure to run all black tests and:

* Pass if Ruff and Black generate the same formatting
* Fail and write a markdown snapshot that shows the input code, the differences between Black and Ruff, Ruffs output, and Blacks output

This is achieved by introducing a new `fixture` macro (open to better name suggestions) that "duplicates" the attributed test for every file that matches the specified glob pattern. Creating a new test for each file over having a test that iterates over all files has the advantage that you can run a single test, and that test failures indicate which case is failing. 

The `fixture` macro also makes it straightforward to e.g. setup our own spec tests that test very specific formatting by creating a new folder and use insta to assert the formatted output.
2023-02-22 09:25:06 -05:00
Micha Reiser
262e768fd3 refactor(ruff): Implement doc_lines_from_tokens as iterator (#3124)
This is a nit refactor... It implements the extraction of document lines as an iterator instead of a Vector to avoid the extra allocation.
2023-02-22 09:22:06 -05:00
Ran Benita
bc3a9ce003 Mark typing.assert_never as no return (#3121)
This function always raises, so RET503 shouldn't trigger for it.
2023-02-22 09:15:39 -05:00
Charlie Marsh
48005d87f8 Add missing backticks from rustdoc (#3112) 2023-02-22 05:03:06 +00:00
Charlie Marsh
e37e9c2ca3 Skip EXE001 and EXE002 rules on Windows (#3111) 2023-02-21 23:39:56 -05:00
Matthieu Devlin
8fde63b323 [pylint] Implement E1205 and E106 (#3084) 2023-02-21 22:53:11 -05:00
Matthew Lloyd
97338e4cd6 [pylint] redefined-loop-name (W2901) (#3022)
Slightly broadens W2901 to cover `with` statements too.

Closes #2972.
2023-02-22 03:23:47 +00:00
Charlie Marsh
9645790a8b Support shell expansion for --config argument (#3107) 2023-02-21 23:33:41 +00:00
Charlie Marsh
18800c6884 Include file permissions in cache key (#3104) 2023-02-21 18:20:06 -05:00
216 changed files with 14547 additions and 1398 deletions

38
Cargo.lock generated
View File

@@ -753,7 +753,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flake8-to-ruff"
version = "0.0.251"
version = "0.0.252"
dependencies = [
"anyhow",
"clap 4.1.6",
@@ -1035,15 +1035,6 @@ dependencies = [
"windows-sys 0.45.0",
]
[[package]]
name = "is_executable"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa9acdc6d67b75e626ad644734e8bc6df893d9cd2a834129065d3dd6158ea9c8"
dependencies = [
"winapi",
]
[[package]]
name = "itertools"
version = "0.10.5"
@@ -1927,7 +1918,7 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.0.251"
version = "0.0.252"
dependencies = [
"anyhow",
"bisection",
@@ -1947,7 +1938,6 @@ dependencies = [
"ignore",
"imperative",
"insta",
"is_executable",
"itertools",
"js-sys",
"libcst",
@@ -1983,7 +1973,7 @@ dependencies = [
[[package]]
name = "ruff_cli"
version = "0.0.251"
version = "0.0.252"
dependencies = [
"annotate-snippets 0.9.1",
"anyhow",
@@ -2012,6 +2002,7 @@ dependencies = [
"rustc-hash",
"serde",
"serde_json",
"shellexpand",
"similar",
"strum",
"textwrap",
@@ -2084,13 +2075,26 @@ dependencies = [
"insta",
"once_cell",
"ruff_formatter",
"ruff_testing_macros",
"ruff_text_size",
"rustc-hash",
"rustpython-common",
"rustpython-parser",
"similar",
"test-case",
]
[[package]]
name = "ruff_testing_macros"
version = "0.0.0"
dependencies = [
"glob",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "ruff_text_size"
version = "0.0.0"
@@ -2146,7 +2150,7 @@ dependencies = [
[[package]]
name = "rustpython-ast"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=ddf497623ae56d21aa4166ff1c0725a7db67e955#ddf497623ae56d21aa4166ff1c0725a7db67e955"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"num-bigint",
"rustpython-compiler-core",
@@ -2155,7 +2159,7 @@ dependencies = [
[[package]]
name = "rustpython-common"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=ddf497623ae56d21aa4166ff1c0725a7db67e955#ddf497623ae56d21aa4166ff1c0725a7db67e955"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"ascii",
"bitflags",
@@ -2180,7 +2184,7 @@ dependencies = [
[[package]]
name = "rustpython-compiler-core"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=ddf497623ae56d21aa4166ff1c0725a7db67e955#ddf497623ae56d21aa4166ff1c0725a7db67e955"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"bincode",
"bitflags",
@@ -2197,7 +2201,7 @@ dependencies = [
[[package]]
name = "rustpython-parser"
version = "0.2.0"
source = "git+https://github.com/RustPython/RustPython.git?rev=ddf497623ae56d21aa4166ff1c0725a7db67e955#ddf497623ae56d21aa4166ff1c0725a7db67e955"
source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e"
dependencies = [
"ahash",
"anyhow",

View File

@@ -3,7 +3,7 @@ members = ["crates/*"]
[workspace.package]
edition = "2021"
rust-version = "1.65.0"
rust-version = "1.67.0"
[workspace.dependencies]
anyhow = { version = "1.0.66" }
@@ -13,8 +13,8 @@ libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "f2f0b7a487a87
once_cell = { version = "1.16.0" }
regex = { version = "1.6.0" }
rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "ddf497623ae56d21aa4166ff1c0725a7db67e955" }
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "ddf497623ae56d21aa4166ff1c0725a7db67e955" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" }
rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" }
schemars = { version = "0.8.11" }
serde = { version = "1.0.147", features = ["derive"] }
serde_json = { version = "1.0.87" }

View File

@@ -168,7 +168,7 @@ Ruff also works with [pre-commit](https://pre-commit.com):
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.251'
rev: 'v0.0.252'
hooks:
- id: ruff
```
@@ -178,7 +178,7 @@ Or, to enable autofix:
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.251'
rev: 'v0.0.252'
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
@@ -742,6 +742,7 @@ Ruff is used in a number of major open-source projects, including:
* [featuretools](https://github.com/alteryx/featuretools)
* [meson-python](https://github.com/mesonbuild/meson-python)
* [ZenML](https://github.com/zenml-io/zenml)
* [delta-rs](https://github.com/delta-io/delta-rs)
## License

View File

@@ -1,6 +1,6 @@
[package]
name = "flake8-to-ruff"
version = "0.0.251"
version = "0.0.252"
edition = { workspace = true }
rust-version = { workspace = true }

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.0.251"
version = "0.0.252"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
edition = { workspace = true }
rust-version = { workspace = true }
@@ -57,7 +57,6 @@ titlecase = { version = "2.2.1" }
toml = { workspace = true }
# https://docs.rs/getrandom/0.2.7/getrandom/#webassembly-support
# For (future) wasm-pack support
[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies]
getrandom = { version = "0.2.7", features = ["js"] }
console_error_panic_hook = { version = "0.1.7" }
@@ -66,9 +65,6 @@ serde-wasm-bindgen = { version = "0.4" }
js-sys = { version = "0.3.60" }
wasm-bindgen = { version = "0.2.83" }
[target.'cfg(not(target_family = "wasm"))'.dependencies]
is_executable = "1.0.1"
[dev-dependencies]
insta = { version = "1.19.0", features = ["yaml", "redactions"] }
test-case = { version = "2.2.2" }

View File

@@ -3,6 +3,8 @@ import os
import posix
from posix import abort
import sys as std_sys
import typing
import typing_extensions
import _thread
import _winapi
@@ -211,6 +213,18 @@ def noreturn_sys_exit():
std_sys.exit(0)
def noreturn_typing_assert_never():
if x > 0:
return 1
typing.assert_never(0)
def noreturn_typing_extensions_assert_never():
if x > 0:
return 1
typing_extensions.assert_never(0)
def noreturn__thread_exit():
if x > 0:
return 1
@@ -275,3 +289,7 @@ def x(y):
return 1
case 1:
print() # error
def foo(baz: str) -> str:
return baz

View File

@@ -0,0 +1,3 @@
from __future__ import annotations
from typing import Any
from . import my_local_folder_object

View File

@@ -57,3 +57,6 @@ class C: ...; ...
#: E701:2:12
match *0, 1, *2:
case 0,: y = 0
#:
class Foo:
match: Optional[Match] = None

View File

@@ -0,0 +1,12 @@
import logging
logging.warning("Hello %s %s", "World!") # [logging-too-few-args]
# do not handle calls with kwargs (like pylint)
logging.warning("Hello %s", "World!", "again", something="else")
logging.warning("Hello %s", "World!")
import warning
warning.warning("Hello %s %s", "World!")

View File

@@ -0,0 +1,12 @@
import logging
logging.warning("Hello %s", "World!", "again") # [logging-too-many-args]
# do not handle calls with kwargs (like pylint)
logging.warning("Hello %s", "World!", "again", something="else")
logging.warning("Hello %s", "World!")
import warning
warning.warning("Hello %s", "World!", "again")

View File

@@ -0,0 +1,155 @@
# For -> for, variable reused
for i in []:
for i in []: # error
pass
# With -> for, variable reused
with None as i:
for i in []: # error
pass
# For -> with, variable reused
for i in []:
with None as i: # error
pass
# With -> with, variable reused
with None as i:
with None as i: # error
pass
# For -> for, different variable
for i in []:
for j in []: # ok
pass
# With -> with, different variable
with None as i:
with None as j: # ok
pass
# For -> for -> for, doubly nested variable reuse
for i in []:
for j in []:
for i in []: # error
pass
# For -> for -> for -> for, doubly nested variable reuse x2
for i in []:
for j in []:
for i in []: # error
for j in []: # error
pass
# For -> assignment
for i in []:
i = 5 # error
# For -> augmented assignment
for i in []:
i += 5 # error
# For -> annotated assignment
for i in []:
i: int = 5 # error
# Async for -> for, variable reused
async for i in []:
for i in []: # error
pass
# For -> async for, variable reused
for i in []:
async for i in []: # error
pass
# For -> for, outer loop unpacks tuple
for i, j in enumerate([]):
for i in []: # error
pass
# For -> for, inner loop unpacks tuple
for i in []:
for i, j in enumerate([]): # error
pass
# For -> for, both loops unpack tuple
for (i, (j, k)) in []:
for i, j in enumerate([]): # two errors
pass
# For else -> for, variable reused in else
for i in []:
pass
else:
for i in []: # no error
pass
# For -> for, ignore dummy variables
for _ in []:
for _ in []: # no error
pass
# For -> for, outer loop unpacks with asterisk
for i, *j in []:
for j in []: # error
pass
# For -> function definition
for i in []:
def f():
i = 2 # no error
# For -> class definition
for i in []:
class A:
i = 2 # no error
# For -> function definition -> for -> assignment
for i in []:
def f():
for i in []: # no error
i = 2 # error
# For -> class definition -> for -> for
for i in []:
class A:
for i in []: # no error
for i in []: # error
pass
# For -> use in assignment target without actual assignment; subscript
for i in []:
a[i] = 2 # no error
i[a] = 2 # no error
# For -> use in assignment target without actual assignment; attribute
for i in []:
a.i = 2 # no error
i.a = 2 # no error
# For target with subscript -> assignment
for a[0] in []:
a[0] = 2 # error
a[1] = 2 # no error
# For target with subscript -> assignment
for a['i'] in []:
a['i'] = 2 # error
a['j'] = 2 # no error
# For target with attribute -> assignment
for a.i in []:
a.i = 2 # error
a.j = 2 # no error
# For target with double nested attribute -> assignment
for a.i.j in []:
a.i.j = 2 # error
a.j.i = 2 # no error
# For target with attribute -> assignment with different spacing
for a.i in []:
a. i = 2 # error
for a. i in []:
a.i = 2 # error

View File

@@ -124,3 +124,14 @@ def test_break_in_with():
else:
return True
return False
def test_break_in_match():
"""no false positive for break in match"""
for name in ["demo"]:
match name:
case "demo":
break
else:
return True
return False

View File

@@ -9,9 +9,7 @@ use rustpython_parser::ast::{
Arguments, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Keyword, KeywordData,
Located, Location, MatchCase, Pattern, PatternKind, Stmt, StmtKind,
};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::token::StringKind;
use rustpython_parser::{lexer, Mode, StringKind, Tok};
use smallvec::{smallvec, SmallVec};
use crate::ast::types::{Binding, BindingKind, CallPath, Range};
@@ -655,7 +653,7 @@ pub fn has_comments<T>(located: &Located<T>, locator: &Locator) -> bool {
/// Returns `true` if a [`Range`] includes at least one comment.
pub fn has_comments_in(range: Range, locator: &Locator) -> bool {
for tok in lexer::make_tokenizer(locator.slice(&range)) {
for tok in lexer::lex_located(locator.slice(&range), Mode::Module, range.location) {
match tok {
Ok((_, tok, _)) => {
if matches!(tok, Tok::Comment(..)) {
@@ -870,7 +868,7 @@ pub fn match_parens(start: Location, locator: &Locator) -> Option<Range> {
let mut fix_start = None;
let mut fix_end = None;
let mut count: usize = 0;
for (start, tok, end) in lexer::make_tokenizer_located(contents, start).flatten() {
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, start).flatten() {
if matches!(tok, Tok::Lpar) {
if count == 0 {
fix_start = Some(start);
@@ -902,7 +900,8 @@ pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> Range {
| StmtKind::AsyncFunctionDef { .. }
) {
let contents = locator.slice(&Range::from_located(stmt));
for (start, tok, end) in lexer::make_tokenizer_located(contents, stmt.location).flatten() {
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten()
{
if matches!(tok, Tok::Name { .. }) {
return Range::new(start, end);
}
@@ -928,16 +927,18 @@ pub fn binding_range(binding: &Binding, locator: &Locator) -> Range {
}
// Return the ranges of `Name` tokens within a specified node.
pub fn find_names<T>(located: &Located<T>, locator: &Locator) -> Vec<Range> {
pub fn find_names<'a, T, U>(
located: &'a Located<T, U>,
locator: &'a Locator,
) -> impl Iterator<Item = Range> + 'a {
let contents = locator.slice(&Range::from_located(located));
lexer::make_tokenizer_located(contents, located.location)
lexer::lex_located(contents, Mode::Module, located.location)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::Name { .. }))
.map(|(start, _, end)| Range {
location: start,
end_location: end,
})
.collect()
}
/// Return the `Range` of `name` in `Excepthandler`.
@@ -949,7 +950,7 @@ pub fn excepthandler_name_range(handler: &Excepthandler, locator: &Locator) -> O
(Some(_), Some(type_)) => {
let type_end_location = type_.end_location.unwrap();
let contents = locator.slice(&Range::new(type_end_location, body[0].location));
let range = lexer::make_tokenizer_located(contents, type_end_location)
let range = lexer::lex_located(contents, Mode::Module, type_end_location)
.flatten()
.tuple_windows()
.find(|(tok, next_tok)| {
@@ -976,7 +977,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
location: handler.location,
end_location: end,
});
let range = lexer::make_tokenizer_located(contents, handler.location)
let range = lexer::lex_located(contents, Mode::Module, handler.location)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Except { .. }))
.map(|(location, _, end_location)| Range {
@@ -990,7 +991,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range {
/// Find f-strings that don't contain any formatted values in a `JoinedStr`.
pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Range)> {
let contents = locator.slice(&Range::from_located(expr));
lexer::make_tokenizer_located(contents, expr.location)
lexer::lex_located(contents, Mode::Module, expr.location)
.flatten()
.filter_map(|(location, tok, end_location)| match tok {
Tok::String {
@@ -1044,7 +1045,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
.expect("Expected orelse to be non-empty")
.location,
});
let range = lexer::make_tokenizer_located(contents, body_end)
let range = lexer::lex_located(contents, Mode::Module, body_end)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Else))
.map(|(location, _, end_location)| Range {
@@ -1060,7 +1061,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
/// Return the `Range` of the first `Tok::Colon` token in a `Range`.
pub fn first_colon_range(range: Range, locator: &Locator) -> Option<Range> {
let contents = locator.slice(&range);
let range = lexer::make_tokenizer_located(contents, range.location)
let range = lexer::lex_located(contents, Mode::Module, range.location)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Colon))
.map(|(location, _, end_location)| Range {
@@ -1090,7 +1091,7 @@ pub fn elif_else_range(stmt: &Stmt, locator: &Locator) -> Option<Range> {
_ => return None,
};
let contents = locator.slice(&Range::new(start, end));
let range = lexer::make_tokenizer_located(contents, start)
let range = lexer::lex_located(contents, Mode::Module, start)
.flatten()
.find(|(_, kind, _)| matches!(kind, Tok::Elif | Tok::Else))
.map(|(location, _, end_location)| Range {
@@ -1206,8 +1207,8 @@ pub fn is_logger_candidate(func: &Expr) -> bool {
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::parser;
use crate::ast::helpers::{
elif_else_range, else_range, first_colon_range, identifier_range, match_trailing_content,

View File

@@ -0,0 +1,24 @@
pub enum LoggingLevel {
Debug,
Critical,
Error,
Exception,
Info,
Warn,
Warning,
}
impl LoggingLevel {
pub fn from_str(level: &str) -> Option<Self> {
match level {
"debug" => Some(LoggingLevel::Debug),
"critical" => Some(LoggingLevel::Critical),
"error" => Some(LoggingLevel::Error),
"exception" => Some(LoggingLevel::Exception),
"info" => Some(LoggingLevel::Info),
"warn" => Some(LoggingLevel::Warn),
"warning" => Some(LoggingLevel::Warning),
_ => None,
}
}
}

View File

@@ -4,6 +4,7 @@ pub mod comparable;
pub mod function_type;
pub mod hashable;
pub mod helpers;
pub mod logging;
pub mod operations;
pub mod relocate;
pub mod types;

View File

@@ -1,8 +1,7 @@
use bitflags::bitflags;
use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers::any_over_expr;
use crate::ast::types::{BindingKind, Scope};
@@ -205,6 +204,7 @@ pub fn in_nested_block<'a>(mut parents: impl Iterator<Item = &'a Stmt>) -> bool
| StmtKind::TryStar { .. }
| StmtKind::If { .. }
| StmtKind::With { .. }
| StmtKind::Match { .. }
)
})
}
@@ -283,7 +283,7 @@ pub type LocatedCmpop<U = ()> = Located<Cmpop, U>;
/// `CPython` doesn't either. This method iterates over the token stream and
/// re-identifies [`Cmpop`] nodes, annotating them with valid ranges.
pub fn locate_cmpops(contents: &str) -> Vec<LocatedCmpop> {
let mut tok_iter = lexer::make_tokenizer(contents).flatten().peekable();
let mut tok_iter = lexer::lex(contents, Mode::Module).flatten().peekable();
let mut ops: Vec<LocatedCmpop> = vec![];
let mut count: usize = 0;
loop {

View File

@@ -29,7 +29,7 @@ impl Range {
}
}
pub fn from_located<T>(located: &Located<T>) -> Self {
pub fn from_located<T, U>(located: &Located<T, U>) -> Self {
Range::new(located.location, located.end_location.unwrap())
}

View File

@@ -4,8 +4,7 @@ use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers;
use crate::ast::helpers::to_absolute;
@@ -371,7 +370,7 @@ pub fn remove_argument(
if n_arguments == 1 {
// Case 1: there is only one argument.
let mut count: usize = 0;
for (start, tok, end) in lexer::make_tokenizer_located(contents, stmt_at).flatten() {
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
if matches!(tok, Tok::Lpar) {
if count == 0 {
fix_start = Some(if remove_parentheses {
@@ -403,7 +402,7 @@ pub fn remove_argument(
{
// Case 2: argument or keyword is _not_ the last node.
let mut seen_comma = false;
for (start, tok, end) in lexer::make_tokenizer_located(contents, stmt_at).flatten() {
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
if seen_comma {
if matches!(tok, Tok::NonLogicalNewline) {
// Also delete any non-logical newlines after the comma.
@@ -426,7 +425,7 @@ pub fn remove_argument(
} else {
// Case 3: argument or keyword is the last node, so we have to find the last
// comma in the stmt.
for (start, tok, _) in lexer::make_tokenizer_located(contents, stmt_at).flatten() {
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() {
if start == expr_at {
fix_end = Some(expr_end);
break;
@@ -448,8 +447,8 @@ pub fn remove_argument(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::parser;
use crate::autofix::helpers::{next_stmt_break, trailing_semicolon};
use crate::source_code::Locator;

View File

@@ -6,19 +6,18 @@ use std::path::Path;
use itertools::Itertools;
use log::error;
use nohash_hasher::IntMap;
use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS};
use ruff_python::typing::TYPING_EXTENSIONS;
use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_common::cformat::{CFormatError, CFormatErrorType};
use rustpython_parser as parser;
use rustpython_parser::ast::{
Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext,
ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind,
Suite,
};
use rustpython_parser::parser;
use smallvec::smallvec;
use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS};
use ruff_python::typing::TYPING_EXTENSIONS;
use crate::ast::helpers::{
binding_range, collect_call_path, extract_handler_names, from_relative_import, to_module_path,
};
@@ -342,7 +341,7 @@ where
match &stmt.node {
StmtKind::Global { names } => {
let scope_index = *self.scope_stack.last().expect("No current scope found");
let ranges = helpers::find_names(stmt, self.locator);
let ranges: Vec<Range> = helpers::find_names(stmt, self.locator).collect();
if scope_index != GLOBAL_SCOPE_INDEX {
// Add the binding to the current scope.
let context = self.execution_context();
@@ -372,7 +371,7 @@ where
}
StmtKind::Nonlocal { names } => {
let scope_index = *self.scope_stack.last().expect("No current scope found");
let ranges = helpers::find_names(stmt, self.locator);
let ranges: Vec<Range> = helpers::find_names(stmt, self.locator).collect();
if scope_index != GLOBAL_SCOPE_INDEX {
let context = self.execution_context();
let scope = &mut self.scopes[scope_index];
@@ -1651,6 +1650,9 @@ where
self.current_stmt_parent().map(Into::into),
);
}
if self.settings.rules.enabled(&Rule::RedefinedLoopName) {
pylint::rules::redefined_loop_name(self, &Node::Stmt(stmt));
}
}
StmtKind::While { body, orelse, .. } => {
if self.settings.rules.enabled(&Rule::FunctionUsesLoopVariable) {
@@ -1695,6 +1697,9 @@ where
if self.settings.rules.enabled(&Rule::UselessElseOnLoop) {
pylint::rules::useless_else_on_loop(self, stmt, body, orelse);
}
if self.settings.rules.enabled(&Rule::RedefinedLoopName) {
pylint::rules::redefined_loop_name(self, &Node::Stmt(stmt));
}
if matches!(stmt.node, StmtKind::For { .. }) {
if self.settings.rules.enabled(&Rule::ReimplementedBuiltin) {
flake8_simplify::rules::convert_for_loop_to_any_all(
@@ -2054,8 +2059,8 @@ where
value,
..
} => {
// If we're in a class or module scope, then the annotation needs to be available
// at runtime.
// If we're in a class or module scope, then the annotation needs to be
// available at runtime.
// See: https://docs.python.org/3/reference/simple_stmts.html#annotated-assignment-statements
if !self.annotations_future_enabled
&& matches!(
@@ -2912,6 +2917,13 @@ where
{
flake8_logging_format::rules::logging_call(self, func, args, keywords);
}
// pylint logging checker
if self.settings.rules.enabled(&Rule::LoggingTooFewArgs)
|| self.settings.rules.enabled(&Rule::LoggingTooManyArgs)
{
pylint::rules::logging_call(self, func, args, keywords);
}
}
ExprKind::Dict { keys, values } => {
if self

View File

@@ -152,8 +152,8 @@ pub fn check_logical_lines(
#[cfg(test)]
mod tests {
use rustpython_parser::lexer;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use crate::checkers::logical_lines::iter_logical_lines;
use crate::source_code::Locator;
@@ -164,7 +164,7 @@ mod tests {
x = 1
y = 2
z = x + 1"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@@ -185,7 +185,7 @@ x = [
]
y = 2
z = x + 1"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@@ -199,7 +199,7 @@ z = x + 1"#;
assert_eq!(actual, expected);
let contents = "x = 'abc'";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@@ -212,7 +212,7 @@ z = x + 1"#;
def f():
x = 1
f()"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()
@@ -227,7 +227,7 @@ def f():
# Comment goes here.
x = 1
f()"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = iter_logical_lines(&lxr, &locator)
.into_iter()

View File

@@ -1,6 +1,7 @@
//! Lint rules based on token traversal.
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::lex::docstring_detection::StateMachine;
use crate::registry::{Diagnostic, Rule};

View File

@@ -126,6 +126,8 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Pylint, "E0101") => Rule::ReturnInInit,
(Pylint, "E0604") => Rule::InvalidAllObject,
(Pylint, "E0605") => Rule::InvalidAllFormat,
(Pylint, "E1205") => Rule::LoggingTooManyArgs,
(Pylint, "E1206") => Rule::LoggingTooFewArgs,
(Pylint, "E1307") => Rule::BadStringFormatType,
(Pylint, "E2502") => Rule::BidirectionalUnicode,
(Pylint, "E1310") => Rule::BadStrStripCall,
@@ -146,6 +148,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Pylint, "R0913") => Rule::TooManyArguments,
(Pylint, "R0912") => Rule::TooManyBranches,
(Pylint, "R0915") => Rule::TooManyStatements,
(Pylint, "W2901") => Rule::RedefinedLoopName,
// flake8-builtins
(Flake8Builtins, "001") => Rule::BuiltinVariableShadowing,

View File

@@ -3,7 +3,8 @@
use bitflags::bitflags;
use nohash_hasher::{IntMap, IntSet};
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::registry::LintSource;
use crate::settings::Settings;
@@ -150,56 +151,61 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
#[cfg(test)]
mod tests {
use nohash_hasher::{IntMap, IntSet};
use rustpython_parser::lexer;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
#[test]
fn noqa_extraction() {
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"
x = 1
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1
",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1
",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = '''abc
def
ghi
'''
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(
@@ -207,13 +213,14 @@ z = x + 1",
IntMap::from_iter([(1, 4), (2, 4), (3, 4)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = '''abc
def
ghi
'''
z = 2",
Mode::Module,
)
.collect();
assert_eq!(
@@ -221,12 +228,13 @@ z = 2",
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = '''abc
def
ghi
'''",
Mode::Module,
)
.collect();
assert_eq!(
@@ -234,17 +242,19 @@ ghi
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
r#"x = \
1"#,
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
r#"from foo import \
bar as baz, \
qux as quux"#,
Mode::Module,
)
.collect();
assert_eq!(
@@ -252,7 +262,7 @@ ghi
IntMap::from_iter([(1, 3), (2, 3)])
);
let lxr: Vec<LexResult> = lexer::make_tokenizer(
let lxr: Vec<LexResult> = lexer::lex(
r#"
# Foo
from foo import \
@@ -262,6 +272,7 @@ x = \
1
y = \
2"#,
Mode::Module,
)
.collect();
assert_eq!(
@@ -275,7 +286,7 @@ y = \
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
let contents = "# isort: off
@@ -283,7 +294,7 @@ x = 1
y = 2
# isort: on
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
@@ -296,7 +307,7 @@ y = 2
# isort: on
z = x + 1
# isort: on";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4, 5])
@@ -306,7 +317,7 @@ z = x + 1
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
@@ -316,7 +327,7 @@ z = x + 1";
x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
let contents = "# isort: off
@@ -325,7 +336,7 @@ x = 1
y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
}
@@ -334,20 +345,20 @@ z = x + 1";
let contents = "x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new());
let contents = "x = 1
y = 2
# isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]);
let contents = "x = 1
y = 2 # isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]);
}
}

View File

@@ -1,34 +1,62 @@
//! Doc line extraction. In this context, a doc line is a line consisting of a
//! standalone comment or a constant string statement.
use std::iter::FusedIterator;
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
/// Extract doc lines (standalone comments) from a token sequence.
pub fn doc_lines_from_tokens(lxr: &[LexResult]) -> Vec<usize> {
let mut doc_lines: Vec<usize> = Vec::default();
let mut prev: Option<usize> = None;
for (start, tok, end) in lxr.iter().flatten() {
if matches!(tok, Tok::Indent | Tok::Dedent | Tok::Newline) {
continue;
}
if matches!(tok, Tok::Comment(..)) {
if let Some(prev) = prev {
if start.row() > prev {
doc_lines.push(start.row());
}
} else {
doc_lines.push(start.row());
}
}
prev = Some(end.row());
}
doc_lines
pub fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
DocLines::new(lxr)
}
pub struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
prev: Option<usize>,
}
impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult]) -> Self {
Self {
inner: lxr.iter().flatten(),
prev: None,
}
}
}
impl Iterator for DocLines<'_> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
loop {
let (start, tok, end) = self.inner.next()?;
match tok {
Tok::Indent | Tok::Dedent | Tok::Newline => continue,
Tok::Comment(..) => {
if let Some(prev) = self.prev {
if start.row() > prev {
break Some(start.row());
}
} else {
break Some(start.row());
}
}
_ => {}
}
self.prev = Some(end.row());
}
}
}
impl FusedIterator for DocLines<'_> {}
#[derive(Default)]
struct StringLinesVisitor {
string_lines: Vec<usize>,

View File

@@ -1,5 +1,3 @@
use std::fs::File;
use std::io::{BufReader, Read};
use std::ops::Deref;
use std::path::{Path, PathBuf};
@@ -88,12 +86,3 @@ pub fn relativize_path(path: impl AsRef<Path>) -> String {
}
format!("{}", path.display())
}
/// Read a file's contents from disk.
pub fn read_file<P: AsRef<Path>>(path: P) -> Result<String> {
let file = File::open(path)?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
Ok(contents)
}

View File

@@ -4,7 +4,7 @@
//!
//! TODO(charlie): Consolidate with the existing AST-based docstring extraction.
use rustpython_parser::lexer::Tok;
use rustpython_parser::Tok;
#[derive(Default)]
enum State {

View File

@@ -5,8 +5,8 @@ use anyhow::{anyhow, Result};
use colored::Colorize;
use log::error;
use rustc_hash::FxHashMap;
use rustpython_parser::error::ParseError;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::ParseError;
use crate::autofix::fix_file;
use crate::checkers::ast::check_ast;
@@ -223,7 +223,7 @@ const MAX_ITERATIONS: usize = 100;
/// Add any missing `# noqa` pragmas to the source code at the given `Path`.
pub fn add_noqa_to_path(path: &Path, package: Option<&Path>, settings: &Settings) -> Result<usize> {
// Read the file from disk.
let contents = fs::read_file(path)?;
let contents = std::fs::read_to_string(path)?;
// Tokenize once.
let tokens: Vec<LexResult> = rustpython_helpers::tokenize(&contents);

View File

@@ -149,6 +149,9 @@ ruff_macros::register_rules!(
rules::pylint::rules::TooManyArguments,
rules::pylint::rules::TooManyBranches,
rules::pylint::rules::TooManyStatements,
rules::pylint::rules::RedefinedLoopName,
rules::pylint::rules::LoggingTooFewArgs,
rules::pylint::rules::LoggingTooManyArgs,
// flake8-builtins
rules::flake8_builtins::rules::BuiltinVariableShadowing,
rules::flake8_builtins::rules::BuiltinArgumentShadowing,

View File

@@ -1,6 +1,7 @@
/// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py)
use once_cell::sync::Lazy;
use regex::Regex;
use rustpython_parser as parser;
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| {
Regex::new(
@@ -77,7 +78,7 @@ pub fn comment_contains_code(line: &str, task_tags: &[String]) -> bool {
}
// Finally, compile the source code.
rustpython_parser::parser::parse_program(&line, "<filename>").is_ok()
parser::parse_program(&line, "<filename>").is_ok()
}
/// Returns `true` if a line is probably part of some multiline code.

View File

@@ -1,7 +1,6 @@
use anyhow::{bail, Result};
use rustpython_parser::ast::Stmt;
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::fix::Fix;
@@ -16,7 +15,7 @@ pub fn add_return_none_annotation(locator: &Locator, stmt: &Stmt) -> Result<Fix>
let mut seen_lpar = false;
let mut seen_rpar = false;
let mut count: usize = 0;
for (start, tok, ..) in lexer::make_tokenizer_located(contents, range.location).flatten() {
for (start, tok, ..) in lexer::lex_located(contents, Mode::Module, range.location).flatten() {
if seen_lpar && seen_rpar {
if matches!(tok, Tok::Colon) {
return Ok(Fix::insertion(" -> None".to_string(), start));

View File

@@ -1,7 +1,7 @@
use itertools::Itertools;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::lexer::{LexResult, Spanned};
use rustpython_parser::token::Tok;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::fix::Fix;

View File

@@ -522,11 +522,13 @@ pub fn fix_unnecessary_collection_call(
// Quote each argument.
for arg in &call.args {
let quoted = format!(
"\"{}\"",
"{}{}{}",
stylist.quote(),
arg.keyword
.as_ref()
.expect("Expected dictionary argument to be kwarg")
.value
.value,
stylist.quote(),
);
arena.push(quoted);
}

View File

@@ -1,3 +1,8 @@
#[cfg(target_family = "unix")]
use std::os::unix::fs::PermissionsExt;
#[cfg(target_family = "unix")]
use std::path::Path;
use once_cell::sync::Lazy;
use regex::Regex;
@@ -33,6 +38,17 @@ pub fn extract_shebang(line: &str) -> ShebangDirective {
}
}
#[cfg(target_family = "unix")]
pub fn is_executable(filepath: &Path) -> bool {
{
let Ok(metadata) = filepath.metadata() else {
return false;
};
let permissions = metadata.permissions();
permissions.mode() & 0o111 != 0
}
}
#[cfg(test)]
mod tests {
use crate::rules::flake8_executable::helpers::{

View File

@@ -1,12 +1,13 @@
#![allow(unused_imports)]
use std::path::Path;
#[cfg(not(target_family = "wasm"))]
use is_executable::IsExecutable;
use ruff_macros::{define_violation, derive_message_formats};
#[cfg(not(target_family = "wasm"))]
use crate::ast::types::Range;
use crate::registry::Diagnostic;
#[cfg(target_family = "unix")]
use crate::rules::flake8_executable::helpers::is_executable;
use crate::violation::Violation;
define_violation!(
@@ -20,9 +21,9 @@ impl Violation for ShebangMissingExecutableFile {
}
/// EXE002
#[cfg(not(target_family = "wasm"))]
#[cfg(target_family = "unix")]
pub fn shebang_missing(filepath: &Path) -> Option<Diagnostic> {
if filepath.is_executable() {
if is_executable(filepath) {
let diagnostic = Diagnostic::new(ShebangMissingExecutableFile, Range::default());
Some(diagnostic)
} else {
@@ -30,7 +31,7 @@ pub fn shebang_missing(filepath: &Path) -> Option<Diagnostic> {
}
}
#[cfg(target_family = "wasm")]
#[cfg(not(target_family = "unix"))]
pub fn shebang_missing(_filepath: &Path) -> Option<Diagnostic> {
None
}

View File

@@ -1,14 +1,15 @@
#![allow(unused_imports)]
use std::path::Path;
#[cfg(not(target_family = "wasm"))]
use is_executable::IsExecutable;
use ruff_macros::{define_violation, derive_message_formats};
#[cfg(not(target_family = "wasm"))]
use rustpython_parser::ast::Location;
#[cfg(not(target_family = "wasm"))]
use ruff_macros::{define_violation, derive_message_formats};
use crate::ast::types::Range;
use crate::registry::Diagnostic;
#[cfg(target_family = "unix")]
use crate::rules::flake8_executable::helpers::is_executable;
use crate::rules::flake8_executable::helpers::ShebangDirective;
use crate::violation::Violation;
@@ -23,14 +24,14 @@ impl Violation for ShebangNotExecutable {
}
/// EXE001
#[cfg(not(target_family = "wasm"))]
#[cfg(target_family = "unix")]
pub fn shebang_not_executable(
filepath: &Path,
lineno: usize,
shebang: &ShebangDirective,
) -> Option<Diagnostic> {
if let ShebangDirective::Match(_, start, end, _) = shebang {
if filepath.is_executable() {
if is_executable(filepath) {
None
} else {
let diagnostic = Diagnostic::new(
@@ -47,7 +48,7 @@ pub fn shebang_not_executable(
}
}
#[cfg(target_family = "wasm")]
#[cfg(not(target_family = "unix"))]
pub fn shebang_not_executable(
_filepath: &Path,
_lineno: usize,

View File

@@ -1,7 +1,8 @@
use itertools::Itertools;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Operator};
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::registry::Diagnostic;

View File

@@ -1,6 +1,7 @@
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Location, Operator};
use crate::ast::helpers::{find_keyword, is_logger_candidate, SimpleCallArgs};
use crate::ast::logging::LoggingLevel;
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
use crate::fix::Fix;
@@ -10,31 +11,6 @@ use crate::rules::flake8_logging_format::violations::{
LoggingRedundantExcInfo, LoggingStringConcat, LoggingStringFormat, LoggingWarn,
};
enum LoggingLevel {
Debug,
Critical,
Error,
Exception,
Info,
Warn,
Warning,
}
impl LoggingLevel {
fn from_str(level: &str) -> Option<Self> {
match level {
"debug" => Some(LoggingLevel::Debug),
"critical" => Some(LoggingLevel::Critical),
"error" => Some(LoggingLevel::Error),
"exception" => Some(LoggingLevel::Exception),
"info" => Some(LoggingLevel::Info),
"warn" => Some(LoggingLevel::Warn),
"warning" => Some(LoggingLevel::Warning),
_ => None,
}
}
}
const RESERVED_ATTRS: &[&str; 22] = &[
"args",
"asctime",

View File

@@ -1,6 +1,7 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use super::settings::Quote;
use crate::ast::types::Range;

View File

@@ -179,6 +179,8 @@ const NORETURN_FUNCS: &[&[&str]] = &[
&["posix", "_exit"],
&["posix", "abort"],
&["sys", "exit"],
&["typing", "assert_never"],
&["typing_extensions", "assert_never"],
&["_thread", "exit"],
&["_winapi", "ExitProcess"],
// third-party modules

View File

@@ -5,239 +5,239 @@ expression: diagnostics
- kind:
ImplicitReturn: ~
location:
row: 18
row: 20
column: 4
end_location:
row: 19
row: 21
column: 16
fix:
content: "\n return None"
location:
row: 19
row: 21
column: 16
end_location:
row: 19
row: 21
column: 16
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 25
row: 27
column: 8
end_location:
row: 25
row: 27
column: 15
fix:
content: "\n return None"
location:
row: 25
row: 27
column: 15
end_location:
row: 25
row: 27
column: 15
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 34
row: 36
column: 4
end_location:
row: 34
row: 36
column: 11
fix:
content: "\n return None"
location:
row: 34
row: 36
column: 11
end_location:
row: 34
row: 36
column: 11
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 39
row: 41
column: 4
end_location:
row: 41
row: 43
column: 20
fix:
content: "\n return None"
location:
row: 41
row: 43
column: 20
end_location:
row: 41
row: 43
column: 20
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 50
row: 52
column: 8
end_location:
row: 50
row: 52
column: 15
fix:
content: "\n return None"
location:
row: 50
row: 52
column: 15
end_location:
row: 50
row: 52
column: 15
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 57
row: 59
column: 4
end_location:
row: 57
row: 59
column: 22
fix:
content: "\n return None"
location:
row: 57
row: 59
column: 22
end_location:
row: 57
row: 59
column: 22
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 64
row: 66
column: 4
end_location:
row: 64
row: 66
column: 21
fix:
content: "\n return None"
location:
row: 64
row: 66
column: 21
end_location:
row: 64
row: 66
column: 21
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 80
row: 82
column: 4
end_location:
row: 83
row: 85
column: 14
fix:
content: "\n return None"
location:
row: 83
row: 85
column: 14
end_location:
row: 83
row: 85
column: 14
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 111
row: 113
column: 4
end_location:
row: 114
row: 116
column: 16
fix:
content: "\n return None"
location:
row: 114
row: 116
column: 16
end_location:
row: 114
row: 116
column: 16
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 118
row: 120
column: 4
end_location:
row: 124
row: 126
column: 19
fix:
content: "\n return None"
location:
row: 124
row: 126
column: 19
end_location:
row: 124
row: 126
column: 19
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 128
row: 130
column: 4
end_location:
row: 131
row: 133
column: 16
fix:
content: "\n return None"
location:
row: 131
row: 133
column: 16
end_location:
row: 131
row: 133
column: 16
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 135
row: 137
column: 4
end_location:
row: 141
row: 143
column: 19
fix:
content: "\n return None"
location:
row: 141
row: 143
column: 19
end_location:
row: 141
row: 143
column: 19
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 260
row: 274
column: 4
end_location:
row: 261
row: 275
column: 20
fix:
content: "\n return None"
location:
row: 261
row: 275
column: 20
end_location:
row: 261
row: 275
column: 20
parent: ~
- kind:
ImplicitReturn: ~
location:
row: 277
row: 291
column: 12
end_location:
row: 277
row: 291
column: 19
fix:
content: "\n return None"
location:
row: 277
row: 291
column: 19
end_location:
row: 277
row: 291
column: 19
parent: ~

View File

@@ -35,6 +35,7 @@ define_violation!(
/// ```python
/// if isinstance(obj, (int, float)):
/// pass
/// ```
///
/// ## References
/// * [Python: "isinstance"](https://docs.python.org/3/library/functions.html#isinstance)

View File

@@ -91,9 +91,9 @@ define_violation!(
///
/// ### Example
/// ```python
/// if x = 1:
/// if x == 1:
/// return "Hello"
/// elif x = 2:
/// elif x == 2:
/// return "Goodbye"
/// else:
/// return "Goodnight"

View File

@@ -6,12 +6,13 @@ use log::debug;
use ruff_python::sys::KNOWN_STANDARD_LIBRARY;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use strum_macros::EnumIter;
use super::types::{ImportBlock, Importable};
use crate::settings::types::PythonVersion;
#[derive(
Debug, PartialOrd, Ord, PartialEq, Eq, Clone, Serialize, Deserialize, JsonSchema, Hash,
Debug, PartialOrd, Ord, PartialEq, Eq, Clone, Serialize, Deserialize, JsonSchema, Hash, EnumIter,
)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
pub enum ImportType {

View File

@@ -1,8 +1,7 @@
use std::borrow::Cow;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::source_code::Locator;
@@ -17,7 +16,7 @@ pub struct Comment<'a> {
/// Collect all comments in an import block.
pub fn collect_comments<'a>(range: &Range, locator: &'a Locator) -> Vec<Comment<'a>> {
let contents = locator.slice(range);
lexer::make_tokenizer_located(contents, range.location)
lexer::lex_located(contents, Mode::Module, range.location)
.flatten()
.filter_map(|(start, tok, end)| {
if let Tok::Comment(value) = tok {

View File

@@ -1,6 +1,5 @@
use rustpython_parser::ast::{Location, Stmt};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use super::types::TrailingComma;
use crate::ast::helpers::is_docstring_stmt;
@@ -13,7 +12,7 @@ pub fn trailing_comma(stmt: &Stmt, locator: &Locator) -> TrailingComma {
let contents = locator.slice(&Range::from_located(stmt));
let mut count: usize = 0;
let mut trailing_comma = TrailingComma::Absent;
for (_, tok, _) in lexer::make_tokenizer(contents).flatten() {
for (_, tok, _) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten() {
if matches!(tok, Tok::Lpar) {
count += 1;
}
@@ -110,7 +109,7 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location {
// Find the first token that isn't a comment or whitespace.
let contents = locator.skip(splice);
for (.., tok, end) in lexer::make_tokenizer_located(contents, splice).flatten() {
for (.., tok, end) in lexer::lex_located(contents, Mode::Module, splice).flatten() {
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
splice = end;
} else {
@@ -124,8 +123,8 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location {
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::parser;
use super::find_splice_location;
use crate::source_code::Locator;

View File

@@ -12,6 +12,7 @@ use normalize::normalize_imports;
use order::order_imports;
use settings::RelativeImportsOrder;
use sorting::cmp_either_import;
use strum::IntoEnumIterator;
use track::{Block, Trailer};
use types::EitherImport::{Import, ImportFrom};
use types::{AliasData, CommentSet, EitherImport, OrderedImportBlock, TrailingComma};
@@ -232,7 +233,7 @@ fn format_import_block(
target_version: PythonVersion,
) -> String {
// Categorize by type (e.g., first-party vs. third-party).
let block_by_type = categorize_imports(
let mut block_by_type = categorize_imports(
block,
src,
package,
@@ -247,7 +248,17 @@ fn format_import_block(
// Generate replacement source code.
let mut is_first_block = true;
for (import_type, import_block) in block_by_type {
let mut pending_lines_before = false;
for import_type in ImportType::iter() {
let import_block = block_by_type.remove(&import_type);
if !no_lines_before.contains(&import_type) {
pending_lines_before = true;
}
let Some(import_block) = import_block else {
continue;
};
let mut imports = order_imports(
import_block,
order_by_type,
@@ -280,8 +291,10 @@ fn format_import_block(
// Add a blank line between every section.
if is_first_block {
is_first_block = false;
} else if !no_lines_before.contains(&import_type) {
pending_lines_before = false;
} else if pending_lines_before {
output.push_str(stylist.line_ending());
pending_lines_before = false;
}
let mut lines_inserted = false;
@@ -791,6 +804,31 @@ mod tests {
Ok(())
}
#[test_case(Path::new("no_lines_before_with_empty_sections.py"))]
fn no_lines_before_with_empty_sections(path: &Path) -> Result<()> {
let snapshot = format!(
"no_lines_before_with_empty_sections.py_{}",
path.to_string_lossy()
);
let mut diagnostics = test_path(
Path::new("isort").join(path).as_path(),
&Settings {
isort: super::settings::Settings {
no_lines_before: BTreeSet::from([
ImportType::StandardLibrary,
ImportType::LocalFolder,
]),
..super::settings::Settings::default()
},
src: vec![test_resource_path("fixtures/isort")],
..Settings::for_rule(Rule::UnsortedImports)
},
)?;
diagnostics.sort_by_key(|diagnostic| diagnostic.location);
assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("lines_after_imports_nothing_after.py"))]
#[test_case(Path::new("lines_after_imports_func_after.py"))]
#[test_case(Path::new("lines_after_imports_class_after.py"))]

View File

@@ -2,6 +2,7 @@ use std::fmt;
use log::error;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser as parser;
use rustpython_parser::ast::{Location, StmtKind, Suite};
use super::super::helpers;
@@ -16,13 +17,15 @@ use crate::violation::AlwaysAutofixableViolation;
define_violation!(
/// ## What it does
/// Adds any required imports, as specified by the user, to the top of the file.
/// Adds any required imports, as specified by the user, to the top of the
/// file.
///
/// ## Why is this bad?
/// In some projects, certain imports are required to be present in all files. For
/// example, some projects assume that `from __future__ import annotations` is enabled,
/// and thus require that import to be present in all files. Omitting a "required" import
/// (as specified by the user) can cause errors or unexpected behavior.
/// In some projects, certain imports are required to be present in all
/// files. For example, some projects assume that `from __future__
/// import annotations` is enabled, and thus require that import to be
/// present in all files. Omitting a "required" import (as specified by
/// the user) can cause errors or unexpected behavior.
///
/// ## Example
/// ```python
@@ -210,18 +213,26 @@ pub fn add_required_imports(
.required_imports
.iter()
.flat_map(|required_import| {
let Ok(body) = rustpython_parser::parser::parse_program(required_import, "<filename>") else {
let Ok(body) = parser::parse_program(required_import, "<filename>") else {
error!("Failed to parse required import: `{}`", required_import);
return vec![];
};
if body.is_empty() || body.len() > 1 {
error!("Expected require import to contain a single statement: `{}`", required_import);
error!(
"Expected require import to contain a single statement: `{}`",
required_import
);
return vec![];
}
match &body[0].node {
StmtKind::ImportFrom { module, names, level } => {
names.iter().filter_map(|name| {
StmtKind::ImportFrom {
module,
names,
level,
} => names
.iter()
.filter_map(|name| {
add_required_import(
&AnyImport::ImportFrom(ImportFrom {
module: module.as_ref().map(String::as_str),
@@ -238,10 +249,11 @@ pub fn add_required_imports(
settings,
autofix,
)
}).collect()
}
StmtKind::Import { names } => {
names.iter().filter_map(|name| {
})
.collect(),
StmtKind::Import { names } => names
.iter()
.filter_map(|name| {
add_required_import(
&AnyImport::Import(Import {
name: Alias {
@@ -256,10 +268,13 @@ pub fn add_required_imports(
settings,
autofix,
)
}).collect()
}
})
.collect(),
_ => {
error!("Expected required import to be in import-from style: `{}`", required_import);
error!(
"Expected required import to be in import-from style: `{}`",
required_import
);
vec![]
}
}

View File

@@ -0,0 +1,22 @@
---
source: crates/ruff/src/rules/isort/mod.rs
expression: diagnostics
---
- kind:
UnsortedImports: ~
location:
row: 1
column: 0
end_location:
row: 4
column: 0
fix:
content: "from __future__ import annotations\nfrom typing import Any\n\nfrom . import my_local_folder_object\n"
location:
row: 1
column: 0
end_location:
row: 4
column: 0
parent: ~

View File

@@ -10,10 +10,11 @@ define_violation!(
/// ## What it does
/// Checks for functions with a high `McCabe` complexity.
///
/// The `McCabe` complexity of a function is a measure of the complexity of the
/// control flow graph of the function. It is calculated by adding one to the
/// number of decision points in the function. A decision point is a place in
/// the code where the program has a choice of two or more paths to follow.
/// The `McCabe` complexity of a function is a measure of the complexity of
/// the control flow graph of the function. It is calculated by adding
/// one to the number of decision points in the function. A decision
/// point is a place in the code where the program has a choice of two
/// or more paths to follow.
///
/// ## Why is this bad?
/// Functions with a high complexity are hard to understand and maintain.
@@ -147,7 +148,7 @@ pub fn function_is_too_complex(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::get_complexity_number;

View File

@@ -1,6 +1,7 @@
use bitflags::bitflags;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::source_code::Locator;

View File

@@ -1,6 +1,6 @@
use rustpython_parser::lexer::{LexResult, Tok};
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::fix::Fix;

View File

@@ -1,5 +1,5 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::error::ParseError;
use rustpython_parser::ParseError;
use crate::ast::types::Range;
use crate::registry::Diagnostic;

View File

@@ -1,9 +1,8 @@
#![allow(dead_code)]
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::Tok;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::Location;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::registry::DiagnosticKind;

View File

@@ -2,8 +2,7 @@ use anyhow::{bail, Result};
use libcst_native::{Call, Codegen, CodegenState, Dict, DictElement, Expression};
use ruff_python::string::strip_quotes_and_prefixes;
use rustpython_parser::ast::{Excepthandler, Expr};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::cst::matchers::{match_expr, match_module};
@@ -122,7 +121,7 @@ pub fn remove_exception_handler_assignment(
// End of the token just before the `as` to the semicolon.
let mut prev = None;
for (start, tok, end) in
lexer::make_tokenizer_located(contents, excepthandler.location).flatten()
lexer::lex_located(contents, Mode::Module, excepthandler.location).flatten()
{
if matches!(tok, Tok::As) {
fix_start = prev;

View File

@@ -1,10 +1,8 @@
use itertools::Itertools;
use log::error;
use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers::contains_effect;
use crate::ast::types::{BindingKind, Range, RefEquality, ScopeKind};
@@ -20,8 +18,8 @@ define_violation!(
/// Checks for the presence of unused variables in function scopes.
///
/// ## Why is this bad?
/// A variable that is defined but not used is likely a mistake, and should be
/// removed to avoid confusion.
/// A variable that is defined but not used is likely a mistake, and should
/// be removed to avoid confusion.
///
/// If a variable is intentionally defined-but-not-used, it should be
/// prefixed with an underscore, or some other value that adheres to the
@@ -61,8 +59,8 @@ impl AlwaysAutofixableViolation for UnusedVariable {
}
}
/// Return the start and end [`Location`] of the token after the next match of the predicate,
/// skipping over any bracketed expressions.
/// Return the start and end [`Location`] of the token after the next match of
/// the predicate, skipping over any bracketed expressions.
fn match_token_after<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
where
F: Fn(Tok) -> bool,
@@ -74,9 +72,10 @@ where
let mut sqb_count = 0;
let mut brace_count = 0;
for ((_, tok, _), (start, _, end)) in lexer::make_tokenizer_located(contents, located.location)
.flatten()
.tuple_windows()
for ((_, tok, _), (start, _, end)) in
lexer::lex_located(contents, Mode::Module, located.location)
.flatten()
.tuple_windows()
{
match tok {
Tok::Lpar => {
@@ -123,8 +122,8 @@ where
unreachable!("No token after matched");
}
/// Return the start and end [`Location`] of the token matching the predicate, skipping over
/// any bracketed expressions.
/// Return the start and end [`Location`] of the token matching the predicate,
/// skipping over any bracketed expressions.
fn match_token<F, T>(located: &Located<T>, locator: &Locator, f: F) -> Range
where
F: Fn(Tok) -> bool,
@@ -136,7 +135,8 @@ where
let mut sqb_count = 0;
let mut brace_count = 0;
for (start, tok, end) in lexer::make_tokenizer_located(contents, located.location).flatten() {
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, located.location).flatten()
{
match tok {
Tok::Lpar => {
par_count += 1;

View File

@@ -17,6 +17,8 @@ mod tests {
use crate::settings::Settings;
use crate::test::test_path;
#[test_case(Rule::LoggingTooManyArgs, Path::new("logging_too_many_args.py"); "PLE1205")]
#[test_case(Rule::LoggingTooFewArgs, Path::new("logging_too_few_args.py"); "PLE1206")]
#[test_case(Rule::ReturnInInit, Path::new("return_in_init.py"); "PLE0101")]
#[test_case(Rule::UselessImportAlias, Path::new("import_aliasing.py"); "PLC0414")]
#[test_case(Rule::UnnecessaryDirectLambdaCall, Path::new("unnecessary_direct_lambda_call.py"); "PLC3002")]
@@ -47,6 +49,7 @@ mod tests {
#[test_case(Rule::BidirectionalUnicode, Path::new("bidirectional_unicode.py"); "PLE2502")]
#[test_case(Rule::BadStrStripCall, Path::new("bad_str_strip_call.py"); "PLE01310")]
#[test_case(Rule::YieldInInit, Path::new("yield_in_init.py"); "PLE0100")]
#[test_case(Rule::RedefinedLoopName, Path::new("redefined_loop_name.py"); "PLW2901")]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(

View File

@@ -1,12 +1,10 @@
use std::str::FromStr;
use ruff_macros::{define_violation, derive_message_formats};
use rustc_hash::FxHashMap;
use rustpython_common::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location, Operator};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
@@ -247,7 +245,7 @@ pub fn bad_string_format_type(checker: &mut Checker, expr: &Expr, right: &Expr)
// Grab each string segment (in case there's an implicit concatenation).
let content = checker.locator.slice(&Range::from_located(expr));
let mut strings: Vec<(Location, Location)> = vec![];
for (start, tok, end) in lexer::make_tokenizer_located(content, expr.location).flatten() {
for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() {
if matches!(tok, Tok::String { .. }) {
strings.push((start, end));
} else if matches!(tok, Tok::Percent) {

View File

@@ -0,0 +1,139 @@
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use ruff_macros::{define_violation, derive_message_formats};
use crate::ast::helpers::{is_logger_candidate, SimpleCallArgs};
use crate::ast::logging::LoggingLevel;
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
use crate::registry::{Diagnostic, Rule};
use crate::rules::pyflakes::cformat::CFormatSummary;
use crate::violation::Violation;
define_violation!(
/// ## What it does
/// Checks for too few positional arguments for a `logging` format string.
///
/// ## Why is this bad?
/// A `TypeError` will be raised if the statement is run.
///
/// ## Example
/// ```python
/// import logging
///
/// try:
/// function()
/// except Exception as e:
/// logging.error('%s error occurred: %s', e) # [logging-too-few-args]
/// raise
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// try:
/// function()
/// except Exception as e:
/// logging.error('%s error occurred: %s', type(e), e)
/// raise
/// ```
pub struct LoggingTooFewArgs;
);
impl Violation for LoggingTooFewArgs {
#[derive_message_formats]
fn message(&self) -> String {
format!("Not enough arguments for `logging` format string")
}
}
define_violation!(
/// ## What it does
/// Checks for too many positional arguments for a `logging` format string.
///
/// ## Why is this bad?
/// A `TypeError` will be raised if the statement is run.
///
/// ## Example
/// ```python
/// import logging
///
/// try:
/// function()
/// except Exception as e:
/// logging.error('Error occurred: %s', type(e), e) # [logging-too-many-args]
/// raise
/// ```
///
/// Use instead:
/// ```python
/// import logging
///
/// try:
/// function()
/// except Exception as e:
/// logging.error('%s error occurred: %s', type(e), e)
/// raise
/// ```
pub struct LoggingTooManyArgs;
);
impl Violation for LoggingTooManyArgs {
#[derive_message_formats]
fn message(&self) -> String {
format!("Too many arguments for `logging` format string")
}
}
/// Check logging calls for violations.
pub fn logging_call(checker: &mut Checker, func: &Expr, args: &[Expr], keywords: &[Keyword]) {
if !is_logger_candidate(func) {
return;
}
if let ExprKind::Attribute { attr, .. } = &func.node {
if LoggingLevel::from_str(attr.as_str()).is_some() {
let call_args = SimpleCallArgs::new(args, keywords);
// E1205 - E1206
if let Some(msg) = call_args.get_argument("msg", Some(0)) {
if let ExprKind::Constant {
value: Constant::Str(value),
..
} = &msg.node
{
if let Ok(summary) = CFormatSummary::try_from(value.as_str()) {
if summary.starred {
return;
}
if !call_args.kwargs.is_empty() {
// Keyword checking on logging strings is complicated by
// special keywords - out of scope.
return;
}
let message_args = call_args.args.len() - 1;
if checker.settings.rules.enabled(&Rule::LoggingTooManyArgs)
&& summary.num_positional < message_args
{
checker.diagnostics.push(Diagnostic::new(
LoggingTooManyArgs,
Range::from_located(func),
));
}
if checker.settings.rules.enabled(&Rule::LoggingTooFewArgs)
&& summary.num_positional > message_args
{
checker.diagnostics.push(Diagnostic::new(
LoggingTooFewArgs,
Range::from_located(func),
));
}
}
}
}
}
}
}

View File

@@ -7,10 +7,12 @@ pub use consider_using_sys_exit::{consider_using_sys_exit, ConsiderUsingSysExit}
pub use global_variable_not_assigned::GlobalVariableNotAssigned;
pub use invalid_all_format::{invalid_all_format, InvalidAllFormat};
pub use invalid_all_object::{invalid_all_object, InvalidAllObject};
pub use logging::{logging_call, LoggingTooFewArgs, LoggingTooManyArgs};
pub use magic_value_comparison::{magic_value_comparison, MagicValueComparison};
pub use merge_isinstance::{merge_isinstance, ConsiderMergingIsinstance};
pub use nonlocal_without_binding::NonlocalWithoutBinding;
pub use property_with_parameters::{property_with_parameters, PropertyWithParameters};
pub use redefined_loop_name::{redefined_loop_name, RedefinedLoopName};
pub use return_in_init::{return_in_init, ReturnInInit};
pub use too_many_arguments::{too_many_arguments, TooManyArguments};
pub use too_many_branches::{too_many_branches, TooManyBranches};
@@ -36,10 +38,12 @@ mod consider_using_sys_exit;
mod global_variable_not_assigned;
mod invalid_all_format;
mod invalid_all_object;
mod logging;
mod magic_value_comparison;
mod merge_isinstance;
mod nonlocal_without_binding;
mod property_with_parameters;
mod redefined_loop_name;
mod return_in_init;
mod too_many_arguments;
mod too_many_branches;

View File

@@ -0,0 +1,302 @@
use crate::ast::comparable::ComparableExpr;
use crate::ast::helpers::unparse_expr;
use crate::ast::types::{Node, Range};
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
use crate::checkers::ast::Checker;
use crate::registry::Diagnostic;
use crate::settings::hashable::HashableRegex;
use crate::violation::Violation;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Expr, ExprContext, ExprKind, Stmt, StmtKind, Withitem};
use serde::{Deserialize, Serialize};
use std::{fmt, iter};
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, Copy)]
pub enum BindingKind {
For,
With,
Assignment,
}
impl fmt::Display for BindingKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match self {
BindingKind::For => fmt.write_str("for loop"),
BindingKind::With => fmt.write_str("with statement"),
BindingKind::Assignment => fmt.write_str("assignment"),
}
}
}
define_violation!(
/// ## What it does
/// Checks for variables defined in `for` loops and `with` statements that
/// get overwritten within the body, for example by another `for` loop or
/// `with` statement or by direct assignment.
///
/// ## Why is this bad?
/// Redefinition of a loop variable inside the loop's body causes its value
/// to differ from the original loop iteration for the remainder of the
/// block, in a way that will likely cause bugs.
///
/// In Python, unlike many other languages, `for` loops and `with`
/// statements don't define their own scopes. Therefore, a nested loop that
/// uses the same target variable name as an outer loop will reuse the same
/// actual variable, and the value from the last iteration will "leak out"
/// into the remainder of the enclosing loop.
///
/// While this mistake is easy to spot in small examples, it can be hidden
/// in larger blocks of code where the definition and redefinition of the
/// variable may not be visible at the same time.
///
/// ## Example
/// ```python
/// for i in range(10):
/// i = 9
/// print(i) # prints 9 every iteration
///
/// for i in range(10):
/// for i in range(10): # original value overwritten
/// pass
/// print(i) # also prints 9 every iteration
///
/// with path1.open() as f:
/// with path2.open() as f:
/// f = path2.open()
/// print(f.readline()) # prints a line from path2
/// ```
pub struct RedefinedLoopName {
pub name: String,
pub outer_kind: BindingKind,
pub inner_kind: BindingKind,
}
);
impl Violation for RedefinedLoopName {
#[derive_message_formats]
fn message(&self) -> String {
let RedefinedLoopName {
name,
outer_kind,
inner_kind,
} = self;
format!("Outer {outer_kind} variable `{name}` overwritten by inner {inner_kind} target")
}
}
struct ExprWithBindingKind<'a> {
expr: &'a Expr,
binding_kind: BindingKind,
}
struct InnerForWithAssignTargetsVisitor<'a> {
dummy_variable_rgx: &'a HashableRegex,
assignment_targets: Vec<ExprWithBindingKind<'a>>,
}
impl<'a, 'b> Visitor<'b> for InnerForWithAssignTargetsVisitor<'a>
where
'b: 'a,
{
fn visit_stmt(&mut self, stmt: &'b Stmt) {
// Collect target expressions.
match &stmt.node {
// For and async for.
StmtKind::For { target, .. } | StmtKind::AsyncFor { target, .. } => {
self.assignment_targets.extend(
assignment_targets_from_expr(target, self.dummy_variable_rgx).map(|expr| {
ExprWithBindingKind {
expr,
binding_kind: BindingKind::For,
}
}),
);
}
// With.
StmtKind::With { items, .. } => {
self.assignment_targets.extend(
assignment_targets_from_with_items(items, self.dummy_variable_rgx).map(
|expr| ExprWithBindingKind {
expr,
binding_kind: BindingKind::With,
},
),
);
}
// Assignment, augmented assignment, and annotated assignment.
StmtKind::Assign { targets, .. } => {
self.assignment_targets.extend(
assignment_targets_from_assign_targets(targets, self.dummy_variable_rgx).map(
|expr| ExprWithBindingKind {
expr,
binding_kind: BindingKind::Assignment,
},
),
);
}
StmtKind::AugAssign { target, .. } | StmtKind::AnnAssign { target, .. } => {
self.assignment_targets.extend(
assignment_targets_from_expr(target, self.dummy_variable_rgx).map(|expr| {
ExprWithBindingKind {
expr,
binding_kind: BindingKind::Assignment,
}
}),
);
}
_ => {}
}
// Decide whether to recurse.
match &stmt.node {
// Don't recurse into blocks that create a new scope.
StmtKind::ClassDef { .. } => {}
StmtKind::FunctionDef { .. } => {}
// Otherwise, do recurse.
_ => {
visitor::walk_stmt(self, stmt);
}
}
}
}
fn assignment_targets_from_expr<'a, U>(
expr: &'a Expr<U>,
dummy_variable_rgx: &'a HashableRegex,
) -> Box<dyn Iterator<Item = &'a Expr<U>> + 'a> {
// The Box is necessary to ensure the match arms have the same return type - we can't use
// a cast to "impl Iterator", since at the time of writing that is only allowed for
// return types and argument types.
match &expr.node {
ExprKind::Attribute {
ctx: ExprContext::Store,
..
} => Box::new(iter::once(expr)),
ExprKind::Subscript {
ctx: ExprContext::Store,
..
} => Box::new(iter::once(expr)),
ExprKind::Starred {
ctx: ExprContext::Store,
value,
..
} => Box::new(iter::once(&**value)),
ExprKind::Name {
ctx: ExprContext::Store,
id,
..
} => {
// Ignore dummy variables.
if dummy_variable_rgx.is_match(id) {
Box::new(iter::empty())
} else {
Box::new(iter::once(expr))
}
}
ExprKind::List {
ctx: ExprContext::Store,
elts,
..
} => Box::new(
elts.iter()
.flat_map(|elt| assignment_targets_from_expr(elt, dummy_variable_rgx)),
),
ExprKind::Tuple {
ctx: ExprContext::Store,
elts,
..
} => Box::new(
elts.iter()
.flat_map(|elt| assignment_targets_from_expr(elt, dummy_variable_rgx)),
),
_ => Box::new(iter::empty()),
}
}
fn assignment_targets_from_with_items<'a, U>(
items: &'a [Withitem<U>],
dummy_variable_rgx: &'a HashableRegex,
) -> impl Iterator<Item = &'a Expr<U>> + 'a {
items
.iter()
.filter_map(|item| {
item.optional_vars
.as_ref()
.map(|expr| assignment_targets_from_expr(&**expr, dummy_variable_rgx))
})
.flatten()
}
fn assignment_targets_from_assign_targets<'a, U>(
targets: &'a [Expr<U>],
dummy_variable_rgx: &'a HashableRegex,
) -> impl Iterator<Item = &'a Expr<U>> + 'a {
targets
.iter()
.flat_map(|target| assignment_targets_from_expr(target, dummy_variable_rgx))
}
/// PLW2901
pub fn redefined_loop_name<'a, 'b>(checker: &'a mut Checker<'b>, node: &Node<'b>) {
let (outer_assignment_targets, inner_assignment_targets) = match node {
Node::Stmt(stmt) => match &stmt.node {
// With.
StmtKind::With { items, body, .. } => {
let outer_assignment_targets: Vec<ExprWithBindingKind<'a>> =
assignment_targets_from_with_items(items, &checker.settings.dummy_variable_rgx)
.map(|expr| ExprWithBindingKind {
expr,
binding_kind: BindingKind::With,
})
.collect();
let mut visitor = InnerForWithAssignTargetsVisitor {
dummy_variable_rgx: &checker.settings.dummy_variable_rgx,
assignment_targets: vec![],
};
for stmt in body {
visitor.visit_stmt(stmt);
}
(outer_assignment_targets, visitor.assignment_targets)
}
// For and async for.
StmtKind::For { target, body, .. } | StmtKind::AsyncFor { target, body, .. } => {
let outer_assignment_targets: Vec<ExprWithBindingKind<'a>> =
assignment_targets_from_expr(target, &checker.settings.dummy_variable_rgx)
.map(|expr| ExprWithBindingKind {
expr,
binding_kind: BindingKind::For,
})
.collect();
let mut visitor = InnerForWithAssignTargetsVisitor {
dummy_variable_rgx: &checker.settings.dummy_variable_rgx,
assignment_targets: vec![],
};
for stmt in body {
visitor.visit_stmt(stmt);
}
(outer_assignment_targets, visitor.assignment_targets)
}
_ => panic!(
"redefined_loop_name called on Statement that is not a With, For, or AsyncFor"
),
},
Node::Expr(_) => panic!("redefined_loop_name called on Node that is not a Statement"),
};
for outer_assignment_target in &outer_assignment_targets {
for inner_assignment_target in &inner_assignment_targets {
// Compare the targets structurally.
if ComparableExpr::from(outer_assignment_target.expr)
.eq(&(ComparableExpr::from(inner_assignment_target.expr)))
{
checker.diagnostics.push(Diagnostic::new(
RedefinedLoopName {
name: unparse_expr(outer_assignment_target.expr, checker.stylist),
outer_kind: outer_assignment_target.binding_kind,
inner_kind: inner_assignment_target.binding_kind,
},
Range::from_located(inner_assignment_target.expr),
));
}
}
}
}

View File

@@ -120,7 +120,7 @@ pub fn too_many_branches(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::num_branches;

View File

@@ -55,7 +55,7 @@ pub fn too_many_return_statements(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::num_returns;

View File

@@ -123,7 +123,7 @@ pub fn too_many_statements(
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
use super::num_statements;

View File

@@ -1,6 +1,7 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Alias, Stmt};
use rustpython_parser::ast::{Alias, AliasData, Located, Stmt, StmtKind};
use crate::ast::helpers::{create_stmt, unparse_stmt};
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
use crate::fix::Fix;
@@ -58,7 +59,21 @@ pub fn use_from_import(checker: &mut Checker, stmt: &Stmt, alias: &Alias, names:
);
if fixable && checker.patch(diagnostic.kind.rule()) {
diagnostic.amend(Fix::replacement(
format!("from {module} import {asname}"),
unparse_stmt(
&create_stmt(StmtKind::ImportFrom {
module: Some(module.to_string()),
names: vec![Located::new(
stmt.location,
stmt.end_location.unwrap(),
AliasData {
name: asname.into(),
asname: None,
},
)],
level: Some(0),
}),
checker.stylist,
),
stmt.location,
stmt.end_location.unwrap(),
));

View File

@@ -1,5 +1,5 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{ExcepthandlerKind, Stmt, StmtKind};
use rustpython_parser::ast::{ExcepthandlerKind, MatchCase, Stmt, StmtKind};
use crate::ast::helpers;
use crate::checkers::ast::Checker;
@@ -23,6 +23,9 @@ fn loop_exits_early(body: &[Stmt]) -> bool {
body.iter().any(|stmt| match &stmt.node {
StmtKind::If { body, orelse, .. } => loop_exits_early(body) || loop_exits_early(orelse),
StmtKind::With { body, .. } | StmtKind::AsyncWith { body, .. } => loop_exits_early(body),
StmtKind::Match { cases, .. } => cases
.iter()
.any(|MatchCase { body, .. }| loop_exits_early(body)),
StmtKind::Try {
body,
handlers,

View File

@@ -0,0 +1,15 @@
---
source: crates/ruff/src/rules/pylint/mod.rs
expression: diagnostics
---
- kind:
LoggingTooManyArgs: ~
location:
row: 3
column: 0
end_location:
row: 3
column: 15
fix: ~
parent: ~

View File

@@ -0,0 +1,15 @@
---
source: crates/ruff/src/rules/pylint/mod.rs
expression: diagnostics
---
- kind:
LoggingTooFewArgs: ~
location:
row: 3
column: 0
end_location:
row: 3
column: 15
fix: ~
parent: ~

View File

@@ -0,0 +1,330 @@
---
source: crates/ruff/src/rules/pylint/mod.rs
expression: diagnostics
---
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 3
column: 8
end_location:
row: 3
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: With
inner_kind: For
location:
row: 8
column: 8
end_location:
row: 8
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: With
location:
row: 13
column: 17
end_location:
row: 13
column: 18
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: With
inner_kind: With
location:
row: 18
column: 17
end_location:
row: 18
column: 18
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 34
column: 12
end_location:
row: 34
column: 13
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 40
column: 12
end_location:
row: 40
column: 13
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: j
outer_kind: For
inner_kind: For
location:
row: 41
column: 16
end_location:
row: 41
column: 17
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: Assignment
location:
row: 46
column: 4
end_location:
row: 46
column: 5
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: Assignment
location:
row: 50
column: 4
end_location:
row: 50
column: 5
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: Assignment
location:
row: 54
column: 4
end_location:
row: 54
column: 5
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 58
column: 8
end_location:
row: 58
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 63
column: 14
end_location:
row: 63
column: 15
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 68
column: 8
end_location:
row: 68
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 73
column: 8
end_location:
row: 73
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 78
column: 8
end_location:
row: 78
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: j
outer_kind: For
inner_kind: For
location:
row: 78
column: 11
end_location:
row: 78
column: 12
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: j
outer_kind: For
inner_kind: For
location:
row: 95
column: 8
end_location:
row: 95
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: Assignment
location:
row: 112
column: 12
end_location:
row: 112
column: 13
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: i
outer_kind: For
inner_kind: For
location:
row: 118
column: 16
end_location:
row: 118
column: 17
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: "a[0]"
outer_kind: For
inner_kind: Assignment
location:
row: 133
column: 4
end_location:
row: 133
column: 8
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: "a['i']"
outer_kind: For
inner_kind: Assignment
location:
row: 138
column: 4
end_location:
row: 138
column: 10
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: a.i
outer_kind: For
inner_kind: Assignment
location:
row: 143
column: 4
end_location:
row: 143
column: 7
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: a.i.j
outer_kind: For
inner_kind: Assignment
location:
row: 148
column: 4
end_location:
row: 148
column: 9
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: a.i
outer_kind: For
inner_kind: Assignment
location:
row: 153
column: 4
end_location:
row: 153
column: 8
fix: ~
parent: ~
- kind:
RedefinedLoopName:
name: a.i
outer_kind: For
inner_kind: Assignment
location:
row: 155
column: 4
end_location:
row: 155
column: 7
fix: ~
parent: ~

View File

@@ -4,8 +4,7 @@ use libcst_native::{
SmallStatement, Statement, Suite,
};
use rustpython_parser::ast::{Expr, Keyword, Location};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::autofix::helpers::remove_argument;
@@ -110,7 +109,7 @@ pub fn remove_import_members(contents: &str, members: &[&str]) -> String {
// Find all Tok::Name tokens that are not preceded by Tok::As, and all
// Tok::Comma tokens.
let mut prev_tok = None;
for (start, tok, end) in lexer::make_tokenizer(contents)
for (start, tok, end) in lexer::lex(contents, Mode::Module)
.flatten()
.skip_while(|(_, tok, _)| !matches!(tok, Tok::Import))
{

View File

@@ -1,5 +1,6 @@
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::ast::types::Range;
use crate::fix::Fix;

View File

@@ -4,8 +4,7 @@ use rustpython_common::format::{
FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate,
};
use rustpython_parser::ast::{Constant, Expr, ExprKind, KeywordData};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::checkers::ast::Checker;
@@ -129,7 +128,7 @@ fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option<String> {
let contents = checker.locator.slice(&Range::from_located(value));
// Tokenize: we need to avoid trying to fix implicit string concatenations.
if lexer::make_tokenizer(contents)
if lexer::lex(contents, Mode::Module)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
.count()

View File

@@ -2,8 +2,7 @@ use std::fmt;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use serde::{Deserialize, Serialize};
use crate::ast::types::Range;
@@ -118,7 +117,7 @@ pub fn native_literals(
// safely remove the outer call in this situation. We're following pyupgrade
// here and skip.
let arg_code = checker.locator.slice(&Range::from_located(arg));
if lexer::make_tokenizer(arg_code)
if lexer::lex_located(arg_code, Mode::Module, arg.location)
.flatten()
.filter(|(_, tok, _)| matches!(tok, Tok::String { .. }))
.count()

View File

@@ -4,8 +4,7 @@ use log::error;
use num_bigint::{BigInt, Sign};
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::{Range, RefEquality};
use crate::ast::whitespace::indentation;
@@ -67,7 +66,7 @@ fn metadata<T>(locator: &Locator, located: &Located<T>) -> Option<BlockMetadata>
let mut else_ = None;
for (start, tok, _) in
lexer::make_tokenizer_located(text, Location::new(located.location.row(), 0))
lexer::lex_located(text, Mode::Module, Location::new(located.location.row(), 0))
.flatten()
.filter(|(_, tok, _)| {
!matches!(

View File

@@ -7,8 +7,7 @@ use rustpython_common::cformat::{
CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString,
};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Location};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::ast::whitespace::indentation;
@@ -319,8 +318,9 @@ pub(crate) fn printf_string_formatting(
// Grab each string segment (in case there's an implicit concatenation).
let mut strings: Vec<(Location, Location)> = vec![];
let mut extension = None;
for (start, tok, end) in lexer::make_tokenizer_located(
for (start, tok, end) in lexer::lex_located(
checker.locator.slice(&Range::from_located(expr)),
Mode::Module,
expr.location,
)
.flatten()

View File

@@ -4,8 +4,7 @@ use anyhow::{anyhow, Result};
use log::error;
use ruff_macros::{define_violation, derive_message_formats};
use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Location};
use rustpython_parser::lexer;
use rustpython_parser::token::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::helpers::find_keyword;
use crate::ast::types::Range;
@@ -142,7 +141,7 @@ fn create_remove_param_fix(locator: &Locator, expr: &Expr, mode_param: &Expr) ->
let mut fix_end: Option<Location> = None;
let mut is_first_arg: bool = false;
let mut delete_first_arg: bool = false;
for (start, tok, end) in lexer::make_tokenizer_located(content, expr.location).flatten() {
for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() {
if start == mode_param.location {
if is_first_arg {
delete_first_arg = true;

View File

@@ -1,13 +1,12 @@
use rustpython_parser as parser;
use rustpython_parser::ast::{Mod, Suite};
use rustpython_parser::error::ParseError;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::mode::Mode;
use rustpython_parser::{lexer, parser};
use rustpython_parser::{lexer, Mode, ParseError};
/// Collect tokens up to and including the first error.
pub fn tokenize(contents: &str) -> Vec<LexResult> {
let mut tokens: Vec<LexResult> = vec![];
for tok in lexer::make_tokenizer(contents) {
for tok in lexer::lex(contents, Mode::Module) {
let is_err = tok.is_err();
tokens.push(tok);
if is_err {

View File

@@ -5,7 +5,6 @@ use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use serde::{Deserialize, Serialize};
use crate::fs;
use crate::settings::options::Options;
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
@@ -30,13 +29,13 @@ impl Pyproject {
/// Parse a `ruff.toml` file.
fn parse_ruff_toml<P: AsRef<Path>>(path: P) -> Result<Options> {
let contents = fs::read_file(path)?;
let contents = std::fs::read_to_string(path)?;
toml::from_str(&contents).map_err(Into::into)
}
/// Parse a `pyproject.toml` file.
fn parse_pyproject_toml<P: AsRef<Path>>(path: P) -> Result<Pyproject> {
let contents = fs::read_file(path)?;
let contents = std::fs::read_to_string(path)?;
toml::from_str(&contents).map_err(Into::into)
}

View File

@@ -1258,7 +1258,7 @@ impl<'a> Generator<'a> {
#[cfg(test)]
mod tests {
use rustpython_parser::parser;
use rustpython_parser as parser;
use crate::source_code::stylist::{Indentation, LineEnding, Quote};
use crate::source_code::Generator;

View File

@@ -2,7 +2,8 @@
//! are omitted from the AST (e.g., commented lines).
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::{LexResult, Tok};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
pub struct Indexer {
commented_lines: Vec<usize>,
@@ -49,15 +50,15 @@ impl From<&[LexResult]> for Indexer {
#[cfg(test)]
mod tests {
use rustpython_parser::lexer;
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use crate::source_code::Indexer;
#[test]
fn continuation() {
let contents = r#"x = 1"#;
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
@@ -69,7 +70,7 @@ x = 1
y = 2
"#
.trim();
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), Vec::<usize>::new().as_slice());
@@ -89,7 +90,7 @@ if True:
)
"#
.trim();
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), [1, 5, 6, 11]);
@@ -109,7 +110,7 @@ x = 1; \
import os
"#
.trim();
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let indexer: Indexer = lxr.as_slice().into();
assert_eq!(indexer.continuation_lines(), [9, 12]);
}

View File

@@ -6,8 +6,8 @@ mod stylist;
pub(crate) use generator::Generator;
pub(crate) use indexer::Indexer;
pub(crate) use locator::Locator;
use rustpython_parser::error::ParseError;
use rustpython_parser::parser;
use rustpython_parser as parser;
use rustpython_parser::ParseError;
pub(crate) use stylist::{LineEnding, Stylist};
/// Run round-trip source code generation on a given Python code.

View File

@@ -5,8 +5,7 @@ use std::ops::Deref;
use once_cell::unsync::OnceCell;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use rustpython_parser::{lexer, Mode, Tok};
use crate::ast::types::Range;
use crate::rules::pydocstyle::helpers::leading_quote;
@@ -165,7 +164,7 @@ impl Deref for LineEnding {
/// Detect the indentation style of the given tokens.
fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation> {
for (_start, tok, end) in lexer::make_tokenizer(contents).flatten() {
for (_start, tok, end) in lexer::lex(contents, Mode::Module).flatten() {
if let Tok::Indent { .. } = tok {
let start = Location::new(end.row(), 0);
let whitespace = locator.slice(&Range::new(start, end));
@@ -177,7 +176,7 @@ fn detect_indentation(contents: &str, locator: &Locator) -> Option<Indentation>
/// Detect the quotation style of the given tokens.
fn detect_quote(contents: &str, locator: &Locator) -> Option<Quote> {
for (start, tok, end) in lexer::make_tokenizer(contents).flatten() {
for (start, tok, end) in lexer::lex(contents, Mode::Module).flatten() {
if let Tok::String { .. } = tok {
let content = locator.slice(&Range::new(start, end));
if let Some(pattern) = leading_quote(content) {

View File

@@ -12,7 +12,7 @@ use crate::packaging::detect_package_root;
use crate::registry::Diagnostic;
use crate::settings::{flags, Settings};
use crate::source_code::{Indexer, Locator, Stylist};
use crate::{directives, fs, rustpython_helpers};
use crate::{directives, rustpython_helpers};
pub fn test_resource_path(path: impl AsRef<Path>) -> std::path::PathBuf {
Path::new("./resources/test/").join(path)
@@ -22,7 +22,7 @@ pub fn test_resource_path(path: impl AsRef<Path>) -> std::path::PathBuf {
/// asserts that autofixes converge after 10 iterations.
pub fn test_path(path: &Path, settings: &Settings) -> Result<Vec<Diagnostic>> {
let path = test_resource_path("fixtures").join(path);
let contents = fs::read_file(&path)?;
let contents = std::fs::read_to_string(&path)?;
let tokens: Vec<LexResult> = rustpython_helpers::tokenize(&contents);
let locator = Locator::new(&contents);
let stylist = Stylist::from_contents(&contents, &locator);

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_cli"
version = "0.0.251"
version = "0.0.252"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
edition = { workspace = true }
rust-version = { workspace = true }
@@ -50,6 +50,7 @@ regex = { workspace = true }
rustc-hash = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
shellexpand = { version = "3.0.0" }
similar = { version = "2.2.1" }
strum = { version = "0.24.1" }
textwrap = { version = "0.16.0" }

View File

@@ -11,23 +11,18 @@ use path_absolutize::Absolutize;
use ruff::message::Message;
use ruff::settings::{flags, AllSettings, Settings};
use serde::{Deserialize, Serialize};
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
#[derive(Serialize, Deserialize)]
struct CacheMetadata {
mtime: i64,
}
#[derive(Serialize)]
struct CheckResultRef<'a> {
metadata: &'a CacheMetadata,
messages: &'a [Message],
}
#[derive(Deserialize)]
struct CheckResult {
metadata: CacheMetadata,
messages: Vec<Message>,
}
@@ -38,6 +33,7 @@ fn content_dir() -> &'static Path {
fn cache_key<P: AsRef<Path>>(
path: P,
package: Option<&P>,
metadata: &fs::Metadata,
settings: &Settings,
autofix: flags::Autofix,
) -> u64 {
@@ -48,6 +44,9 @@ fn cache_key<P: AsRef<Path>>(
.as_ref()
.map(|path| path.as_ref().absolutize().unwrap())
.hash(&mut hasher);
FileTime::from_last_modification_time(metadata).hash(&mut hasher);
#[cfg(unix)]
metadata.permissions().mode().hash(&mut hasher);
settings.hash(&mut hasher);
autofix.hash(&mut hasher);
hasher.finish()
@@ -99,23 +98,16 @@ pub fn get<P: AsRef<Path>>(
) -> Option<Vec<Message>> {
let encoded = read_sync(
&settings.cli.cache_dir,
cache_key(path, package, &settings.lib, autofix),
cache_key(path, package, metadata, &settings.lib, autofix),
)
.ok()?;
let (mtime, messages) = match bincode::deserialize::<CheckResult>(&encoded[..]) {
Ok(CheckResult {
metadata: CacheMetadata { mtime },
messages,
}) => (mtime, messages),
match bincode::deserialize::<CheckResult>(&encoded[..]) {
Ok(CheckResult { messages }) => Some(messages),
Err(e) => {
error!("Failed to deserialize encoded cache entry: {e:?}");
return None;
None
}
};
if FileTime::from_last_modification_time(metadata).unix_seconds() != mtime {
return None;
}
Some(messages)
}
/// Set a value in the cache.
@@ -127,15 +119,10 @@ pub fn set<P: AsRef<Path>>(
autofix: flags::Autofix,
messages: &[Message],
) {
let check_result = CheckResultRef {
metadata: &CacheMetadata {
mtime: FileTime::from_last_modification_time(metadata).unix_seconds(),
},
messages,
};
let check_result = CheckResultRef { messages };
if let Err(e) = write_sync(
&settings.cli.cache_dir,
cache_key(path, package, &settings.lib, autofix),
cache_key(path, package, metadata, &settings.lib, autofix),
&bincode::serialize(&check_result).unwrap(),
) {
error!("Failed to write to cache: {e:?}");
@@ -146,11 +133,12 @@ pub fn set<P: AsRef<Path>>(
pub fn del<P: AsRef<Path>>(
path: P,
package: Option<&P>,
metadata: &fs::Metadata,
settings: &AllSettings,
autofix: flags::Autofix,
) {
drop(del_sync(
&settings.cli.cache_dir,
cache_key(path, package, &settings.lib, autofix),
cache_key(path, package, metadata, &settings.lib, autofix),
));
}

View File

@@ -81,7 +81,7 @@ pub fn lint_path(
};
// Read the file from disk.
let contents = fs::read_file(path)?;
let contents = std::fs::read_to_string(path)?;
// Lint the file.
let (
@@ -129,7 +129,9 @@ pub fn lint_path(
);
// Purge the cache.
cache::del(path, package.as_ref(), settings, autofix.into());
if let Some(metadata) = metadata {
cache::del(path, package.as_ref(), &metadata, settings, autofix.into());
}
} else {
// Re-populate the cache.
if let Some(metadata) = metadata {

View File

@@ -1,7 +1,8 @@
use std::path::Path;
use std::path::{Path, PathBuf};
use anyhow::Result;
use path_absolutize::path_dedot;
use ruff::resolver::{
resolve_settings_with_processor, ConfigProcessor, PyprojectDiscovery, Relativity,
};
@@ -29,8 +30,12 @@ pub fn resolve(
// Second priority: the user specified a `pyproject.toml` file. Use that
// `pyproject.toml` for _all_ configuration, and resolve paths relative to the
// current working directory. (This matches ESLint's behavior.)
if let Some(pyproject) = config {
let settings = resolve_settings_with_processor(pyproject, &Relativity::Cwd, overrides)?;
if let Some(pyproject) = config
.map(|config| config.display().to_string())
.map(|config| shellexpand::full(&config).map(|config| PathBuf::from(config.as_ref())))
.transpose()?
{
let settings = resolve_settings_with_processor(&pyproject, &Relativity::Cwd, overrides)?;
return Ok(PyprojectDiscovery::Fixed(settings));
}

View File

@@ -5,7 +5,7 @@ use std::fs;
use std::path::PathBuf;
use anyhow::Result;
use rustpython_parser::parser;
use rustpython_parser as parser;
#[derive(clap::Args)]
pub struct Args {

View File

@@ -5,7 +5,7 @@ use std::fs;
use std::path::PathBuf;
use anyhow::Result;
use rustpython_parser::lexer;
use rustpython_parser::{lexer, Mode};
#[derive(clap::Args)]
pub struct Args {
@@ -16,7 +16,7 @@ pub struct Args {
pub fn main(args: &Args) -> Result<()> {
let contents = fs::read_to_string(&args.file)?;
for (_, tok, _) in lexer::make_tokenizer(&contents).flatten() {
for (_, tok, _) in lexer::lex(&contents, Mode::Module).flatten() {
println!("{tok:#?}");
}
Ok(())

View File

@@ -333,7 +333,7 @@ impl std::fmt::Debug for StaticTextSlice {
}
fn debug_assert_no_newlines(text: &str) {
debug_assert!(!text.contains('\r'), "The content '{}' contains an unsupported '\\r' line terminator character but text must only use line feeds '\\n' as line separator. Use '\\n' instead of '\\r' and '\\r\\n' to insert a line break in strings.", text);
debug_assert!(!text.contains('\r'), "The content '{text}' contains an unsupported '\\r' line terminator character but text must only use line feeds '\\n' as line separator. Use '\\n' instead of '\\r' and '\\r\\n' to insert a line break in strings.");
}
/// Pushes some content to the end of the current line

View File

@@ -101,7 +101,7 @@ impl std::fmt::Display for IndentStyle {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
IndentStyle::Tab => std::write!(f, "Tab"),
IndentStyle::Space(size) => std::write!(f, "Spaces, size: {}", size),
IndentStyle::Space(size) => std::write!(f, "Spaces, size: {size}"),
}
}
}

View File

@@ -18,3 +18,5 @@ rustpython-parser = { workspace = true }
[dev-dependencies]
insta = { version = "1.19.0", features = [] }
test-case = { version = "2.2.2" }
ruff_testing_macros = { path = "../ruff_testing_macros" }
similar = "2.2.1"

View File

@@ -0,0 +1,6 @@
# Check http://editorconfig.org for more information
# This is the main config file for this project:
root = true
[*.py.expect]
insert_final_newline = false

View File

@@ -1,8 +1,3 @@
---
source: src/source_code/mod.rs
assertion_line: 0
expression: formatted
---
x = (123456789).bit_count()
x = (123456).__abs__()
x = (0.1).is_integer()
@@ -24,5 +19,4 @@ if (10).real:
...
y = 100[no]
y = 100(no)
y = 100(no)

View File

@@ -0,0 +1 @@
print("hello, world")

View File

@@ -0,0 +1,4 @@
for ((x in {}) or {})["a"] in x:
pass
pem_spam = lambda l, spam={"x": 3}: not spam.get(l.strip())
lambda x=lambda y={1: 3}: y["x" : lambda y: {1: 2}]: x

View File

@@ -1,8 +1,3 @@
---
source: src/source_code/mod.rs
assertion_line: 0
expression: formatted
---
class SimpleClassWithBlankParentheses:
pass
@@ -32,5 +27,4 @@ def class_under_the_func_with_blank_parentheses():
class NormalClass:
def func_for_testing(self, first, second):
sum = first + second
return sum
return sum

View File

@@ -1,8 +1,3 @@
---
source: src/source_code/mod.rs
assertion_line: 0
expression: formatted
---
class ClassSimplest:
pass
@@ -167,5 +162,4 @@ class ClassWithDecoInitAndVarsAndDocstringWithInner2:
@deco
def __init__(self):
pass
pass

View File

@@ -1,8 +1,3 @@
---
source: src/source_code/mod.rs
assertion_line: 0
expression: formatted
---
import core, time, a
from . import A, B, C
@@ -101,5 +96,4 @@ if True:
WaiterConfig={
"Delay": 5,
},
)
)

View File

@@ -0,0 +1,6 @@
def bob(): # pylint: disable=W9016
pass
def bobtwo(): # some comment here
pass

View File

@@ -1,8 +1,3 @@
---
source: src/source_code/mod.rs
assertion_line: 0
expression: formatted
---
#!/usr/bin/env python3
# fmt: on
# Some license here.
@@ -98,5 +93,4 @@ async def wat():
# Some closing comments.
# Maybe Vim or Emacs directives for formatting.
# Who knows.
# Who knows.

Some files were not shown because too many files have changed in this diff Show More