Compare commits

..

5 Commits

Author SHA1 Message Date
Micha Reiser
ca4c006f7d Experiment with Located trait 2024-10-26 13:10:14 +02:00
Micha Reiser
6aaf1d9446 [red-knot] Remove lint-phase (#13922)
Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
2024-10-25 18:40:52 +00:00
Alex Waygood
5eb87aa56e [red-knot] Infer Todo, not Unknown, for PEP-604 unions in annotations (#13908) 2024-10-25 18:21:31 +00:00
David Peter
085a43a262 [red-knot] knot benchmark: fix --knot-path arg (#13923)
## Summary

Previously, this would fail with

```
AttributeError: 'str' object has no attribute 'is_file'
```

if I tried to use the `--knot-path` option. I wish we had a type checker
for Python*.

## Test Plan

```sh
uv run benchmark --knot-path ~/.cargo-target/release/red_knot
```

\* to be fair, this would probably require special handling for
`argparse` in the typechecker.
2024-10-25 11:43:39 +02:00
Micha Reiser
32b57b2ee4 Enable nursery rules: 'redundant_clone', 'debug_assert_with_mut_call', and 'unused_peekable' (#13920) 2024-10-25 09:46:30 +02:00
133 changed files with 1353 additions and 1020 deletions

View File

@@ -202,6 +202,10 @@ get_unwrap = "warn"
rc_buffer = "warn"
rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
# nursery rules
redundant_clone = "warn"
debug_assert_with_mut_call = "warn"
unused_peekable = "warn"
[profile.release]
# Note that we set these explicitly, and these values

View File

@@ -144,7 +144,7 @@ pub fn main() -> ExitStatus {
}
fn run() -> anyhow::Result<ExitStatus> {
let args = Args::parse_from(std::env::args().collect::<Vec<_>>());
let args = Args::parse_from(std::env::args());
if matches!(args.command, Some(Command::Server)) {
return run_server().map(|()| ExitStatus::Success);

View File

@@ -22,3 +22,13 @@ x: int = "foo" # error: [invalid-assignment] "Object of type `Literal["foo"]` i
x: int
x = "foo" # error: [invalid-assignment] "Object of type `Literal["foo"]` is not assignable to `int`"
```
## PEP-604 annotations not yet supported
```py
def f() -> str | None:
return None
# TODO: should be `str | None` (but Todo is better than `Unknown`)
reveal_type(f()) # revealed: @Todo
```

View File

@@ -3,9 +3,12 @@
## Unbound
```py
x = foo
x = foo # error: [unresolved-reference] "Name `foo` used when not defined"
foo = 1
reveal_type(x) # revealed: Unbound
# error: [unresolved-reference]
# revealed: Unbound
reveal_type(x)
```
## Unbound class variable
@@ -13,6 +16,10 @@ reveal_type(x) # revealed: Unbound
Name lookups within a class scope fall back to globals, but lookups of class attributes don't.
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1
class C:

View File

@@ -3,6 +3,11 @@
## Union of attributes
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
class C:
x = 1

View File

@@ -3,6 +3,11 @@
## Union of return types
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
def f() -> int:
@@ -21,6 +26,11 @@ reveal_type(f()) # revealed: int | str
```py
from nonexistent import f # error: [unresolved-import] "Cannot resolve import `nonexistent`"
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
def f() -> int:
@@ -34,6 +44,11 @@ reveal_type(f()) # revealed: Unknown | int
Calling a union with a non-callable element should emit a diagnostic.
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
f = 1
else:
@@ -50,6 +65,11 @@ reveal_type(x) # revealed: Unknown | int
Calling a union with multiple non-callable elements should mention all of them in the diagnostic.
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
if flag:
f = 1
elif flag2:
@@ -69,6 +89,11 @@ reveal_type(f())
Calling a union with no callable elements can emit a simpler diagnostic.
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
f = 1
else:

View File

@@ -5,6 +5,10 @@
Comparisons on union types need to consider all possible cases:
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
one_or_two = 1 if flag else 2
reveal_type(one_or_two <= 2) # revealed: Literal[True]
@@ -52,6 +56,10 @@ With unions on both sides, we need to consider the full cross product of
options when building the resulting (union) type:
```py
def bool_instance() -> bool:
return True
flag_s, flag_l = bool_instance(), bool_instance()
small = 1 if flag_s else 2
large = 2 if flag_l else 3
@@ -69,6 +77,10 @@ unsupported. For now, we fall back to `bool` for the result type instead of
trying to infer something more precise from the other (supported) variants:
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = [1, 2] if flag else 1
result = 1 in x # error: "Operator `in` is not supported"

View File

@@ -1,6 +1,9 @@
# Comparison: Unsupported operators
```py
def bool_instance() -> bool:
return True
a = 1 in 7 # error: "Operator `in` is not supported for types `Literal[1]` and `Literal[7]`"
reveal_type(a) # revealed: bool
@@ -15,6 +18,7 @@ d = 5 < object()
# TODO: should be `Unknown`
reveal_type(d) # revealed: bool
flag = bool_instance()
int_literal_or_str_literal = 1 if flag else "foo"
# error: "Operator `in` is not supported for types `Literal[42]` and `Literal[1]`, in comparing `Literal[42]` with `Literal[1] | Literal["foo"]`"
e = 42 in int_literal_or_str_literal

View File

@@ -3,6 +3,10 @@
## Simple if-expression
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else 2
reveal_type(x) # revealed: Literal[1, 2]
```
@@ -10,6 +14,10 @@ reveal_type(x) # revealed: Literal[1, 2]
## If-expression with walrus operator
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
y = 0
z = 0
x = (y := 1) if flag else (z := 2)
@@ -21,6 +29,10 @@ reveal_type(z) # revealed: Literal[0, 2]
## Nested if-expression
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
x = 1 if flag else 2 if flag2 else 3
reveal_type(x) # revealed: Literal[1, 2, 3]
```
@@ -28,6 +40,10 @@ reveal_type(x) # revealed: Literal[1, 2, 3]
## None
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else None
reveal_type(x) # revealed: Literal[1] | None
```

View File

@@ -3,6 +3,10 @@
## Simple if
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
y = 1
y = 2
@@ -15,6 +19,10 @@ reveal_type(y) # revealed: Literal[2, 3]
## Simple if-elif-else
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
y = 1
y = 2
if flag:
@@ -28,13 +36,24 @@ else:
x = y
reveal_type(x) # revealed: Literal[3, 4, 5]
reveal_type(r) # revealed: Unbound | Literal[2]
reveal_type(s) # revealed: Unbound | Literal[5]
# revealed: Unbound | Literal[2]
# error: [possibly-unresolved-reference]
reveal_type(r)
# revealed: Unbound | Literal[5]
# error: [possibly-unresolved-reference]
reveal_type(s)
```
## Single symbol across if-elif-else
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
if flag:
y = 1
elif flag2:
@@ -47,6 +66,10 @@ reveal_type(y) # revealed: Literal[1, 2, 3]
## if-elif-else without else assignment
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
y = 0
if flag:
y = 1
@@ -60,6 +83,10 @@ reveal_type(y) # revealed: Literal[0, 1, 2]
## if-elif-else with intervening assignment
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
y = 0
if flag:
y = 1
@@ -74,6 +101,10 @@ reveal_type(y) # revealed: Literal[0, 1, 2]
## Nested if statement
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
y = 0
if flag:
if flag2:
@@ -84,6 +115,10 @@ reveal_type(y) # revealed: Literal[0, 1]
## if-elif without else
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
y = 1
y = 2
if flag:

View File

@@ -21,7 +21,9 @@ match 0:
case 2:
y = 3
reveal_type(y) # revealed: Unbound | Literal[2, 3]
# revealed: Unbound | Literal[2, 3]
# error: [possibly-unresolved-reference]
reveal_type(y)
```
## Basic match

View File

@@ -10,6 +10,10 @@ x: str # error: [invalid-declaration] "Cannot declare type `str` for inferred t
## Incompatible declarations
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
x: str
else:
@@ -20,6 +24,10 @@ x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`:
## Partial declarations
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
x: int
x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`: Unknown, int"
@@ -28,6 +36,10 @@ x = 1 # error: [conflicting-declarations] "Conflicting declared types for `x`:
## Incompatible declarations with bad assignment
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
x: str
else:

View File

@@ -6,7 +6,7 @@
import re
try:
x
help()
except NameError as e:
reveal_type(e) # revealed: NameError
except re.error as f:
@@ -19,7 +19,7 @@ except re.error as f:
from nonexistent_module import foo # error: [unresolved-import]
try:
x
help()
except foo as e:
reveal_type(foo) # revealed: Unknown
reveal_type(e) # revealed: Unknown
@@ -31,7 +31,7 @@ except foo as e:
EXCEPTIONS = (AttributeError, TypeError)
try:
x
help()
except (RuntimeError, OSError) as e:
reveal_type(e) # revealed: RuntimeError | OSError
except EXCEPTIONS as f:
@@ -43,7 +43,7 @@ except EXCEPTIONS as f:
```py
def foo(x: type[AttributeError], y: tuple[type[OSError], type[RuntimeError]], z: tuple[type[BaseException], ...]):
try:
w
help()
except x as e:
# TODO: should be `AttributeError`
reveal_type(e) # revealed: @Todo

View File

@@ -4,7 +4,7 @@
```py
try:
x
help()
except* BaseException as e:
reveal_type(e) # revealed: BaseExceptionGroup
```
@@ -13,7 +13,7 @@ except* BaseException as e:
```py
try:
x
help()
except* OSError as e:
# TODO(Alex): more precise would be `ExceptionGroup[OSError]`
reveal_type(e) # revealed: BaseExceptionGroup
@@ -23,7 +23,7 @@ except* OSError as e:
```py
try:
x
help()
except* (TypeError, AttributeError) as e:
# TODO(Alex): more precise would be `ExceptionGroup[TypeError | AttributeError]`.
reveal_type(e) # revealed: BaseExceptionGroup

View File

@@ -6,9 +6,13 @@ Basic PEP 695 generics
```py
class MyBox[T]:
# TODO: `T` is defined here
# error: [unresolved-reference] "Name `T` used when not defined"
data: T
box_model_number = 695
# TODO: `T` is defined here
# error: [unresolved-reference] "Name `T` used when not defined"
def __init__(self, data: T):
self.data = data
@@ -26,13 +30,19 @@ reveal_type(MyBox.box_model_number) # revealed: Literal[695]
```py
class MyBox[T]:
# TODO: `T` is defined here
# error: [unresolved-reference] "Name `T` used when not defined"
data: T
# TODO: `T` is defined here
# error: [unresolved-reference] "Name `T` used when not defined"
def __init__(self, data: T):
self.data = data
# TODO not error on the subscripting
class MySecureBox[T](MyBox[T]): ... # error: [non-subscriptable]
# TODO not error on the subscripting or the use of type param
# error: [unresolved-reference] "Name `T` used when not defined"
# error: [non-subscriptable]
class MySecureBox[T](MyBox[T]): ...
secure_box: MySecureBox[int] = MySecureBox(5)
reveal_type(secure_box) # revealed: MySecureBox

View File

@@ -3,11 +3,22 @@
## Maybe unbound
```py path=maybe_unbound.py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
y = 3
x = y
reveal_type(x) # revealed: Unbound | Literal[3]
reveal_type(y) # revealed: Unbound | Literal[3]
x = y # error: [possibly-unresolved-reference]
# revealed: Unbound | Literal[3]
# error: [possibly-unresolved-reference]
reveal_type(x)
# revealed: Unbound | Literal[3]
# error: [possibly-unresolved-reference]
reveal_type(y)
```
```py
@@ -20,11 +31,22 @@ reveal_type(y) # revealed: Literal[3]
## Maybe unbound annotated
```py path=maybe_unbound_annotated.py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
y: int = 3
x = y
reveal_type(x) # revealed: Unbound | Literal[3]
reveal_type(y) # revealed: Unbound | Literal[3]
x = y # error: [possibly-unresolved-reference]
# revealed: Unbound | Literal[3]
# error: [possibly-unresolved-reference]
reveal_type(x)
# revealed: Unbound | Literal[3]
# error: [possibly-unresolved-reference]
reveal_type(y)
```
Importing an annotated name prefers the declared type over the inferred type:
@@ -43,6 +65,10 @@ def f(): ...
```
```py path=b.py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
from c import f
else:
@@ -67,6 +93,10 @@ x: int
```
```py path=b.py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
from c import x
else:

View File

@@ -102,7 +102,7 @@ reveal_type(X) # revealed: Literal[42]
```
```py path=package/foo.py
x
x # error: [unresolved-reference]
```
```py path=package/bar.py

View File

@@ -17,8 +17,10 @@ async def foo():
async for x in Iterator():
pass
# TODO
reveal_type(x) # revealed: Unbound | @Todo
# TODO: should reveal `Unbound | Unknown` because `__aiter__` is not defined
# revealed: Unbound | @Todo
# error: [possibly-unresolved-reference]
reveal_type(x)
```
## Basic async for loop
@@ -37,5 +39,7 @@ async def foo():
async for x in IntAsyncIterable():
pass
reveal_type(x) # revealed: Unbound | @Todo
# error: [possibly-unresolved-reference]
# revealed: Unbound | @Todo
reveal_type(x)
```

View File

@@ -14,7 +14,9 @@ class IntIterable:
for x in IntIterable():
pass
reveal_type(x) # revealed: Unbound | int
# revealed: Unbound | int
# error: [possibly-unresolved-reference]
reveal_type(x)
```
## With previous definition
@@ -85,7 +87,9 @@ class OldStyleIterable:
for x in OldStyleIterable():
pass
reveal_type(x) # revealed: Unbound | int
# revealed: Unbound | int
# error: [possibly-unresolved-reference]
reveal_type(x)
```
## With heterogeneous tuple
@@ -94,12 +98,19 @@ reveal_type(x) # revealed: Unbound | int
for x in (1, "a", b"foo"):
pass
reveal_type(x) # revealed: Unbound | Literal[1] | Literal["a"] | Literal[b"foo"]
# revealed: Unbound | Literal[1] | Literal["a"] | Literal[b"foo"]
# error: [possibly-unresolved-reference]
reveal_type(x)
```
## With non-callable iterator
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
class NotIterable:
if flag:
__iter__ = 1
@@ -109,7 +120,9 @@ class NotIterable:
for x in NotIterable(): # error: "Object of type `NotIterable` is not iterable"
pass
reveal_type(x) # revealed: Unbound | Unknown
# revealed: Unbound | Unknown
# error: [possibly-unresolved-reference]
reveal_type(x)
```
## Invalid iterable

View File

@@ -3,6 +3,10 @@
## Basic While Loop
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1
while flag:
x = 2
@@ -13,6 +17,10 @@ reveal_type(x) # revealed: Literal[1, 2]
## While with else (no break)
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1
while flag:
x = 2
@@ -26,6 +34,10 @@ reveal_type(x) # revealed: Literal[3]
## While with Else (may break)
```py
def bool_instance() -> bool:
return True
flag, flag2 = bool_instance(), bool_instance()
x = 1
y = 0
while flag:

View File

@@ -3,6 +3,10 @@
## `is None`
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = None if flag else 1
if x is None:
@@ -14,6 +18,11 @@ reveal_type(x) # revealed: None | Literal[1]
## `is` for other types
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
class A: ...
x = A()
@@ -28,6 +37,10 @@ reveal_type(y) # revealed: A | None
## `is` in chained comparisons
```py
def bool_instance() -> bool:
return True
x_flag, y_flag = bool_instance(), bool_instance()
x = True if x_flag else False
y = True if y_flag else False

View File

@@ -5,6 +5,10 @@
The type guard removes `None` from the union type:
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = None if flag else 1
if x is not None:
@@ -16,6 +20,10 @@ reveal_type(x) # revealed: None | Literal[1]
## `is not` for other singleton types
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = True if flag else False
reveal_type(x) # revealed: bool
@@ -42,6 +50,10 @@ if x is not y:
The type guard removes `False` from the union type of the tested value only.
```py
def bool_instance() -> bool:
return True
x_flag, y_flag = bool_instance(), bool_instance()
x = True if x_flag else False
y = True if y_flag else False

View File

@@ -16,6 +16,10 @@ if x != 1:
## Multiple negative contributions with simplification
```py
def bool_instance() -> bool:
return True
flag1, flag2 = bool_instance(), bool_instance()
x = 1 if flag1 else 2 if flag2 else 3
if x != 1:

View File

@@ -3,6 +3,10 @@
## `x != None`
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = None if flag else 1
if x != None:
@@ -12,6 +16,10 @@ if x != None:
## `!=` for other singleton types
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = True if flag else False
if x != False:
@@ -21,6 +29,10 @@ if x != False:
## `x != y` where `y` is of literal type
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else 2
if x != 1:
@@ -30,6 +42,11 @@ if x != 1:
## `x != y` where `y` is a single-valued type
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
class A: ...
class B: ...
@@ -44,8 +61,13 @@ if C != A:
Only single-valued types should narrow the type:
```py
def int_instance() -> int: ...
def bool_instance() -> bool:
return True
def int_instance() -> int:
return 42
flag = bool_instance()
x = int_instance() if flag else None
y = int_instance()

View File

@@ -5,6 +5,11 @@ Narrowing for `isinstance(object, classinfo)` expressions.
## `classinfo` is a single type
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else "a"
if isinstance(x, int):
@@ -26,6 +31,11 @@ Note: `isinstance(x, (int, str))` should not be confused with
`isinstance(x, int | str)`:
```py
def bool_instance() -> bool:
return True
flag, flag1, flag2 = bool_instance(), bool_instance(), bool_instance()
x = 1 if flag else "a"
if isinstance(x, (int, str)):
@@ -56,6 +66,11 @@ if isinstance(y, (str, bytes)):
## `classinfo` is a nested tuple of types
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else "a"
if isinstance(x, (bool, (bytes, int))):
@@ -81,6 +96,11 @@ if isinstance(x, A):
## No narrowing for instances of `builtins.type`
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
t = type("t", (), {})
# This isn't testing what we want it to test if we infer anything more precise here:
@@ -94,6 +114,11 @@ if isinstance(x, t):
## Do not use custom `isinstance` for narrowing
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
def isinstance(x, t):
return True
@@ -105,6 +130,11 @@ if isinstance(x, int):
## Do support narrowing if `isinstance` is aliased
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
isinstance_alias = isinstance
x = 1 if flag else "a"
@@ -117,6 +147,10 @@ if isinstance_alias(x, int):
```py
from builtins import isinstance as imported_isinstance
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else "a"
if imported_isinstance(x, int):
reveal_type(x) # revealed: Literal[1]
@@ -125,6 +159,10 @@ if imported_isinstance(x, int):
## Do not narrow if second argument is not a type
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else "a"
# TODO: this should cause us to emit a diagnostic during
@@ -141,6 +179,10 @@ if isinstance(x, "int"):
## Do not narrow if there are keyword arguments
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = 1 if flag else "a"
# TODO: this should cause us to emit a diagnostic

View File

@@ -3,6 +3,11 @@
## Single `match` pattern
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
x = None if flag else 1
reveal_type(x) # revealed: None | Literal[1]

View File

@@ -3,6 +3,11 @@
## Shadow after incompatible declarations is OK
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
x: str
else:

View File

@@ -37,6 +37,11 @@ y = 1
## Union
```py
def bool_instance() -> bool:
return True
flag = bool_instance()
if flag:
p = 1
q = 3.3

View File

@@ -132,7 +132,7 @@ mod tests {
#[test]
fn inequality() {
let parsed_raw = parse_unchecked_source("1 + 2", PySourceType::Python);
let parsed = ParsedModule::new(parsed_raw.clone());
let parsed = ParsedModule::new(parsed_raw);
let stmt = &parsed.syntax().body[0];
let node = unsafe { AstNodeRef::new(parsed.clone(), stmt) };
@@ -150,7 +150,7 @@ mod tests {
#[allow(unsafe_code)]
fn debug() {
let parsed_raw = parse_unchecked_source("1 + 2", PySourceType::Python);
let parsed = ParsedModule::new(parsed_raw.clone());
let parsed = ParsedModule::new(parsed_raw);
let stmt = &parsed.syntax().body[0];

View File

@@ -1294,7 +1294,7 @@ mod tests {
search_paths: SearchPathSettings {
extra_paths: vec![],
src_root: src.clone(),
custom_typeshed: Some(custom_typeshed.clone()),
custom_typeshed: Some(custom_typeshed),
site_packages: SitePackages::Known(vec![site_packages]),
},
},
@@ -1445,7 +1445,7 @@ mod tests {
assert_function_query_was_not_run(
&db,
resolve_module_query,
ModuleNameIngredient::new(&db, functools_module_name.clone()),
ModuleNameIngredient::new(&db, functools_module_name),
&events,
);
assert_eq!(functools_module.search_path(), &stdlib);

View File

@@ -296,7 +296,7 @@ impl DefinitionNodeRef<'_> {
handler,
is_star,
}) => DefinitionKind::ExceptHandler(ExceptHandlerDefinitionKind {
handler: AstNodeRef::new(parsed.clone(), handler),
handler: AstNodeRef::new(parsed, handler),
is_star,
}),
}

View File

@@ -211,7 +211,13 @@ fn declarations_ty<'db>(
let declared_ty = if let Some(second) = all_types.next() {
let mut builder = UnionBuilder::new(db).add(first);
for other in [second].into_iter().chain(all_types) {
if !first.is_equivalent_to(db, other) {
// Make sure not to emit spurious errors relating to `Type::Todo`,
// since we only infer this type due to a limitation in our current model.
//
// `Unknown` is different here, since we might infer `Unknown`
// for one of these due to a variable being defined in one possible
// control-flow branch but not another one.
if !first.is_equivalent_to(db, other) && !first.is_todo() && !other.is_todo() {
conflicting.push(other);
}
builder = builder.add(other);
@@ -292,6 +298,10 @@ impl<'db> Type<'db> {
matches!(self, Type::Never)
}
pub const fn is_todo(&self) -> bool {
matches!(self, Type::Todo)
}
pub const fn into_class_literal_type(self) -> Option<ClassType<'db>> {
match self {
Type::ClassLiteral(class_type) => Some(class_type),

View File

@@ -320,9 +320,7 @@ impl<'db> TypeInferenceBuilder<'db> {
db,
index,
region,
file,
types: TypeInference::empty(scope),
}
}
@@ -2418,7 +2416,23 @@ impl<'db> TypeInferenceBuilder<'db> {
None
};
bindings_ty(self.db, definitions, unbound_ty)
let ty = bindings_ty(self.db, definitions, unbound_ty);
if ty.is_unbound() {
self.add_diagnostic(
name.into(),
"unresolved-reference",
format_args!("Name `{id}` used when not defined"),
);
} else if ty.may_be_unbound(self.db) {
self.add_diagnostic(
name.into(),
"possibly-unresolved-reference",
format_args!("Name `{id}` used when possibly not defined"),
);
}
ty
}
ExprContext::Store | ExprContext::Del => Type::None,
ExprContext::Invalid => Type::Unknown,
@@ -3471,6 +3485,17 @@ impl<'db> TypeInferenceBuilder<'db> {
Type::Todo
}
// TODO PEP-604 unions
ast::Expr::BinOp(binary) => {
self.infer_binary_expression(binary);
match binary.op {
// PEP-604 unions are okay
ast::Operator::BitOr => Type::Todo,
// anything else is an invalid annotation:
_ => Type::Unknown,
}
}
// Forms which are invalid in the context of annotation expressions: we infer their
// nested expressions as normal expressions, but the type of the top-level expression is
// always `Type::Unknown` in these cases.
@@ -3482,10 +3507,6 @@ impl<'db> TypeInferenceBuilder<'db> {
self.infer_named_expression(named);
Type::Unknown
}
ast::Expr::BinOp(binary) => {
self.infer_binary_expression(binary);
Type::Unknown
}
ast::Expr::UnaryOp(unary) => {
self.infer_unary_expression(unary);
Type::Unknown
@@ -3801,6 +3822,7 @@ mod tests {
Ok(db)
}
#[track_caller]
fn assert_public_ty(db: &TestDb, file_name: &str, symbol_name: &str, expected: &str) {
let file = system_path_to_file(db, file_name).expect("file to exist");
@@ -3812,6 +3834,7 @@ mod tests {
);
}
#[track_caller]
fn assert_scope_ty(
db: &TestDb,
file_name: &str,
@@ -3837,6 +3860,7 @@ mod tests {
assert_eq!(ty.display(db).to_string(), expected);
}
#[track_caller]
fn assert_diagnostic_messages(diagnostics: &TypeCheckDiagnostics, expected: &[&str]) {
let messages: Vec<&str> = diagnostics
.iter()
@@ -3845,6 +3869,7 @@ mod tests {
assert_eq!(&messages, expected);
}
#[track_caller]
fn assert_file_diagnostics(db: &TestDb, filename: &str, expected: &[&str]) {
let file = system_path_to_file(db, filename).unwrap();
let diagnostics = check_types(db, file);
@@ -4432,7 +4457,7 @@ mod tests {
from typing_extensions import reveal_type
try:
x
print
except as e:
reveal_type(e)
",
@@ -4569,7 +4594,10 @@ mod tests {
assert_file_diagnostics(
&db,
"src/a.py",
&["Object of type `Unbound` is not iterable"],
&[
"Name `x` used when not defined",
"Object of type `Unbound` is not iterable",
],
);
Ok(())
@@ -4704,7 +4732,7 @@ mod tests {
assert_scope_ty(&db, "src/a.py", &["foo", "<listcomp>"], "z", "Unbound");
// (There is a diagnostic for invalid syntax that's emitted, but it's not listed by `assert_file_diagnostics`)
assert_file_diagnostics(&db, "src/a.py", &[]);
assert_file_diagnostics(&db, "src/a.py", &["Name `z` used when not defined"]);
Ok(())
}

View File

@@ -18,6 +18,7 @@ impl<I, T: DoubleEndedIterator<Item = I>> PythonSubscript for T {
}
#[cfg(test)]
#[allow(clippy::redundant_clone)]
mod tests {
use super::PythonSubscript;

View File

@@ -37,7 +37,7 @@ impl SyncNotificationHandler for DidOpenNotebookHandler {
params.cell_text_documents,
)
.with_failure_code(ErrorCode::InternalError)?;
session.open_notebook_document(params.notebook_document.uri.clone(), notebook);
session.open_notebook_document(params.notebook_document.uri, notebook);
match path {
AnySystemPath::System(path) => {

View File

@@ -76,7 +76,7 @@ impl InlineFileAssertions {
}
fn is_own_line_comment(&self, ranged_assertion: &AssertionWithRange) -> bool {
CommentRanges::is_own_line(ranged_assertion.start(), &self.locator())
CommentRanges::is_own_line(ranged_assertion.start(), self.source.as_str())
}
}

View File

@@ -110,14 +110,14 @@ impl Workspace {
pub fn check_file(&self, file_id: &FileHandle) -> Result<Vec<String>, Error> {
let result = self.db.check_file(file_id.file).map_err(into_error)?;
Ok(result.clone())
Ok(result)
}
/// Checks all open files
pub fn check(&self) -> Result<Vec<String>, Error> {
let result = self.db.check().map_err(into_error)?;
Ok(result.clone())
Ok(result)
}
/// Returns the parsed AST for `path`

View File

@@ -1,4 +1,3 @@
pub mod db;
pub mod lint;
pub mod watch;
pub mod workspace;

View File

@@ -1,318 +0,0 @@
use std::cell::RefCell;
use std::time::Duration;
use tracing::debug_span;
use red_knot_python_semantic::types::Type;
use red_knot_python_semantic::{HasTy, ModuleName, SemanticModel};
use ruff_db::files::File;
use ruff_db::parsed::{parsed_module, ParsedModule};
use ruff_db::source::{source_text, SourceText};
use ruff_python_ast as ast;
use ruff_python_ast::visitor::{walk_expr, walk_stmt, Visitor};
use ruff_text_size::{Ranged, TextSize};
use crate::db::Db;
/// Workaround query to test for if the computation should be cancelled.
/// Ideally, push for Salsa to expose an API for testing if cancellation was requested.
#[salsa::tracked]
#[allow(unused_variables)]
pub(crate) fn unwind_if_cancelled(db: &dyn Db) {}
#[salsa::tracked(return_ref)]
pub(crate) fn lint_syntax(db: &dyn Db, file_id: File) -> Vec<String> {
#[allow(clippy::print_stdout)]
if std::env::var("RED_KNOT_SLOW_LINT").is_ok() {
for i in 0..10 {
unwind_if_cancelled(db);
println!("RED_KNOT_SLOW_LINT is set, sleeping for {i}/10 seconds");
std::thread::sleep(Duration::from_secs(1));
}
}
let mut diagnostics = Vec::new();
let source = source_text(db.upcast(), file_id);
lint_lines(&source, &mut diagnostics);
let parsed = parsed_module(db.upcast(), file_id);
if parsed.errors().is_empty() {
let ast = parsed.syntax();
let mut visitor = SyntaxLintVisitor {
diagnostics,
source: &source,
};
visitor.visit_body(&ast.body);
diagnostics = visitor.diagnostics;
}
diagnostics
}
fn lint_lines(source: &str, diagnostics: &mut Vec<String>) {
for (line_number, line) in source.lines().enumerate() {
if line.len() < 88 {
continue;
}
let char_count = line.chars().count();
if char_count > 88 {
diagnostics.push(format!(
"Line {} is too long ({} characters)",
line_number + 1,
char_count
));
}
}
}
#[allow(unreachable_pub)]
#[salsa::tracked(return_ref)]
pub fn lint_semantic(db: &dyn Db, file_id: File) -> Vec<String> {
let _span = debug_span!("lint_semantic", file=%file_id.path(db)).entered();
let source = source_text(db.upcast(), file_id);
let parsed = parsed_module(db.upcast(), file_id);
let semantic = SemanticModel::new(db.upcast(), file_id);
if !parsed.is_valid() {
return vec![];
}
let context = SemanticLintContext {
source,
parsed,
semantic,
diagnostics: RefCell::new(Vec::new()),
};
SemanticVisitor { context: &context }.visit_body(parsed.suite());
context.diagnostics.take()
}
fn format_diagnostic(context: &SemanticLintContext, message: &str, start: TextSize) -> String {
let source_location = context
.semantic
.line_index()
.source_location(start, context.source_text());
format!(
"{}:{}:{}: {}",
context.semantic.file_path(),
source_location.row,
source_location.column,
message,
)
}
fn lint_maybe_undefined(context: &SemanticLintContext, name: &ast::ExprName) {
if !matches!(name.ctx, ast::ExprContext::Load) {
return;
}
let semantic = &context.semantic;
let ty = name.ty(semantic);
if ty.is_unbound() {
context.push_diagnostic(format_diagnostic(
context,
&format!("Name `{}` used when not defined", &name.id),
name.start(),
));
} else if ty.may_be_unbound(semantic.db()) {
context.push_diagnostic(format_diagnostic(
context,
&format!("Name `{}` used when possibly not defined", &name.id),
name.start(),
));
}
}
fn lint_bad_override(context: &SemanticLintContext, class: &ast::StmtClassDef) {
let semantic = &context.semantic;
// TODO we should have a special marker on the real typing module (from typeshed) so if you
// have your own "typing" module in your project, we don't consider it THE typing module (and
// same for other stdlib modules that our lint rules care about)
let Some(typing) = semantic.resolve_module(&ModuleName::new("typing").unwrap()) else {
return;
};
let override_ty = semantic.global_symbol_ty(&typing, "override");
let Type::ClassLiteral(class_ty) = class.ty(semantic) else {
return;
};
for function in class
.body
.iter()
.filter_map(|stmt| stmt.as_function_def_stmt())
{
let Type::FunctionLiteral(ty) = function.ty(semantic) else {
return;
};
// TODO this shouldn't make direct use of the Db; see comment on SemanticModel::db
let db = semantic.db();
if ty.has_decorator(db, override_ty) {
let method_name = ty.name(db);
if class_ty
.inherited_class_member(db, method_name)
.is_unbound()
{
// TODO should have a qualname() method to support nested classes
context.push_diagnostic(
format!(
"Method {}.{} is decorated with `typing.override` but does not override any base class method",
class_ty.name(db),
method_name,
));
}
}
}
}
pub(crate) struct SemanticLintContext<'a> {
source: SourceText,
parsed: &'a ParsedModule,
semantic: SemanticModel<'a>,
diagnostics: RefCell<Vec<String>>,
}
impl<'db> SemanticLintContext<'db> {
#[allow(unused)]
pub(crate) fn source_text(&self) -> &str {
self.source.as_str()
}
#[allow(unused)]
pub(crate) fn ast(&self) -> &'db ast::ModModule {
self.parsed.syntax()
}
pub(crate) fn push_diagnostic(&self, diagnostic: String) {
self.diagnostics.borrow_mut().push(diagnostic);
}
#[allow(unused)]
pub(crate) fn extend_diagnostics(&mut self, diagnostics: impl IntoIterator<Item = String>) {
self.diagnostics.get_mut().extend(diagnostics);
}
}
#[derive(Debug)]
struct SyntaxLintVisitor<'a> {
diagnostics: Vec<String>,
source: &'a str,
}
impl Visitor<'_> for SyntaxLintVisitor<'_> {
fn visit_string_literal(&mut self, string_literal: &'_ ast::StringLiteral) {
// A very naive implementation of use double quotes
let text = &self.source[string_literal.range];
if text.starts_with('\'') {
self.diagnostics
.push("Use double quotes for strings".to_string());
}
}
}
struct SemanticVisitor<'a> {
context: &'a SemanticLintContext<'a>,
}
impl Visitor<'_> for SemanticVisitor<'_> {
fn visit_stmt(&mut self, stmt: &ast::Stmt) {
if let ast::Stmt::ClassDef(class) = stmt {
lint_bad_override(self.context, class);
}
walk_stmt(self, stmt);
}
fn visit_expr(&mut self, expr: &ast::Expr) {
match expr {
ast::Expr::Name(name) if matches!(name.ctx, ast::ExprContext::Load) => {
lint_maybe_undefined(self.context, name);
}
_ => {}
}
walk_expr(self, expr);
}
}
#[cfg(test)]
mod tests {
use red_knot_python_semantic::{Program, ProgramSettings, PythonVersion, SearchPathSettings};
use ruff_db::files::system_path_to_file;
use ruff_db::system::{DbWithTestSystem, SystemPathBuf};
use crate::db::tests::TestDb;
use super::lint_semantic;
fn setup_db() -> TestDb {
setup_db_with_root(SystemPathBuf::from("/src"))
}
fn setup_db_with_root(src_root: SystemPathBuf) -> TestDb {
let db = TestDb::new();
db.memory_file_system()
.create_directory_all(&src_root)
.unwrap();
Program::from_settings(
&db,
&ProgramSettings {
target_version: PythonVersion::default(),
search_paths: SearchPathSettings::new(src_root),
},
)
.expect("Valid program settings");
db
}
#[test]
fn undefined_variable() {
let mut db = setup_db();
db.write_dedented(
"/src/a.py",
"
x = int
if flag:
y = x
y
",
)
.unwrap();
let file = system_path_to_file(&db, "/src/a.py").expect("file to exist");
let messages = lint_semantic(&db, file);
assert_ne!(messages, &[] as &[String], "expected some diagnostics");
assert_eq!(
*messages,
if cfg!(windows) {
vec![
"\\src\\a.py:3:4: Name `flag` used when not defined",
"\\src\\a.py:5:1: Name `y` used when possibly not defined",
]
} else {
vec![
"/src/a.py:3:4: Name `flag` used when not defined",
"/src/a.py:5:1: Name `y` used when possibly not defined",
]
}
);
}
}

View File

@@ -15,12 +15,9 @@ use ruff_db::{
use ruff_python_ast::{name::Name, PySourceType};
use ruff_text_size::Ranged;
use crate::db::Db;
use crate::db::RootDatabase;
use crate::workspace::files::{Index, Indexed, IndexedIter, PackageFiles};
use crate::{
db::Db,
lint::{lint_semantic, lint_syntax},
};
mod files;
mod metadata;
@@ -423,8 +420,6 @@ pub(super) fn check_file(db: &dyn Db, file: File) -> Vec<String> {
));
}
diagnostics.extend_from_slice(lint_syntax(db, file));
diagnostics.extend_from_slice(lint_semantic(db, file));
diagnostics
}
@@ -540,17 +535,17 @@ impl Iterator for WorkspaceFilesIter<'_> {
#[cfg(test)]
mod tests {
use red_knot_python_semantic::types::check_types;
use ruff_db::files::system_path_to_file;
use ruff_db::source::source_text;
use ruff_db::system::{DbWithTestSystem, SystemPath};
use ruff_db::testing::assert_function_query_was_not_run;
use crate::db::tests::TestDb;
use crate::lint::lint_syntax;
use crate::workspace::check_file;
#[test]
fn check_file_skips_linting_when_file_cant_be_read() -> ruff_db::system::Result<()> {
fn check_file_skips_type_checking_when_file_cant_be_read() -> ruff_db::system::Result<()> {
let mut db = TestDb::new();
let path = SystemPath::new("test.py");
@@ -568,7 +563,7 @@ mod tests {
);
let events = db.take_salsa_events();
assert_function_query_was_not_run(&db, lint_syntax, file, &events);
assert_function_query_was_not_run(&db, check_types, file, &events);
// The user now creates a new file with an empty text. The source text
// content returned by `source_text` remains unchanged, but the diagnostics should get updated.

View File

@@ -28,30 +28,18 @@ static EXPECTED_DIAGNOSTICS: &[&str] = &[
// We don't support `*` imports yet:
"/src/tomllib/_parser.py:7:29: Module `collections.abc` has no member `Iterable`",
// We don't support terminal statements in control flow yet:
"/src/tomllib/_parser.py:353:5: Method `__getitem__` of type `Unbound | @Todo` is not callable on object of type `Unbound | @Todo`",
"/src/tomllib/_parser.py:455:9: Method `__getitem__` of type `Unbound | @Todo` is not callable on object of type `Unbound | @Todo`",
// True positives!
"Line 69 is too long (89 characters)",
"Use double quotes for strings",
"Use double quotes for strings",
"Use double quotes for strings",
"Use double quotes for strings",
"Use double quotes for strings",
"Use double quotes for strings",
"Use double quotes for strings",
// We don't support terminal statements in control flow yet:
"/src/tomllib/_parser.py:66:18: Name `s` used when possibly not defined",
"/src/tomllib/_parser.py:98:12: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:101:12: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:104:14: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:104:14: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:115:14: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:115:14: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:126:12: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:348:20: Name `nest` used when possibly not defined",
"/src/tomllib/_parser.py:353:5: Name `nest` used when possibly not defined",
"/src/tomllib/_parser.py:353:5: Method `__getitem__` of type `Unbound | @Todo` is not callable on object of type `Unbound | @Todo`",
"/src/tomllib/_parser.py:453:24: Name `nest` used when possibly not defined",
"/src/tomllib/_parser.py:455:9: Name `nest` used when possibly not defined",
"/src/tomllib/_parser.py:455:9: Method `__getitem__` of type `Unbound | @Todo` is not callable on object of type `Unbound | @Todo`",
"/src/tomllib/_parser.py:482:16: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:566:12: Name `char` used when possibly not defined",
"/src/tomllib/_parser.py:573:12: Name `char` used when possibly not defined",

View File

@@ -26,11 +26,11 @@ pub(crate) fn bindings(checker: &mut Checker) {
&& !checker
.settings
.dummy_variable_rgx
.is_match(binding.name(checker.locator))
.is_match(binding.name(checker.source()))
{
let mut diagnostic = Diagnostic::new(
pyflakes::rules::UnusedVariable {
name: binding.name(checker.locator).to_string(),
name: binding.name(checker.source()).to_string(),
},
binding.range(),
);
@@ -52,7 +52,7 @@ pub(crate) fn bindings(checker: &mut Checker) {
}
}
if checker.enabled(Rule::NonAsciiName) {
if let Some(diagnostic) = pylint::rules::non_ascii_name(binding, checker.locator) {
if let Some(diagnostic) = pylint::rules::non_ascii_name(binding, checker.source()) {
checker.diagnostics.push(diagnostic);
}
}

View File

@@ -17,7 +17,7 @@ pub(crate) fn unresolved_references(checker: &mut Checker) {
if checker.enabled(Rule::UndefinedLocalWithImportStarUsage) {
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedLocalWithImportStarUsage {
name: reference.name(checker.locator).to_string(),
name: reference.name(checker.source()).to_string(),
},
reference.range(),
));
@@ -31,12 +31,12 @@ pub(crate) fn unresolved_references(checker: &mut Checker) {
// Allow __path__.
if checker.path.ends_with("__init__.py") {
if reference.name(checker.locator) == "__path__" {
if reference.name(checker.source()) == "__path__" {
continue;
}
}
let symbol_name = reference.name(checker.locator);
let symbol_name = reference.name(checker.source());
checker.diagnostics.push(Diagnostic::new(
pyflakes::rules::UndefinedName {

View File

@@ -352,6 +352,10 @@ impl<'a> Checker<'a> {
self.locator
}
pub(crate) const fn source(&self) -> &'a str {
self.locator.contents()
}
/// The [`Stylist`] for the current file, which detects the current line ending, quote, and
/// indentation style.
pub(crate) const fn stylist(&self) -> &'a Stylist<'a> {

View File

@@ -106,8 +106,8 @@ mod tests {
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
let locator = Locator::new(line);
let parsed = parse_module(line).unwrap();
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), line);
let stylist = Stylist::from_tokens(parsed.tokens(), line);
let check_with_max_line_length = |line_length: LineLength| {
check_physical_lines(

View File

@@ -377,7 +377,7 @@ mod tests {
fn noqa_mappings(contents: &str) -> NoqaMapping {
let parsed = parse_module(contents).unwrap();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), contents);
extract_noqa_line_for(parsed.tokens(), &locator, &indexer)
}

View File

@@ -48,9 +48,11 @@ pub(crate) fn delete_stmt(
if let Some(semicolon) = trailing_semicolon(stmt.end(), locator) {
let next = next_stmt_break(semicolon, locator);
Edit::deletion(stmt.start(), next)
} else if has_leading_content(stmt.start(), locator) {
} else if has_leading_content(stmt.start(), locator.contents()) {
Edit::range_deletion(stmt.range())
} else if let Some(start) = indexer.preceded_by_continuations(stmt.start(), locator) {
} else if let Some(start) =
indexer.preceded_by_continuations(stmt.start(), locator.contents())
{
Edit::deletion(start, stmt.end())
} else {
let range = locator.full_lines_range(stmt.range());
@@ -726,7 +728,7 @@ x = 1 \
let locator = Locator::new(raw);
let edits = {
let parsed = parse_expression(raw)?;
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), raw);
add_to_dunder_all(names.iter().copied(), parsed.expr(), &stylist)
};
let diag = {

View File

@@ -329,7 +329,7 @@ mod tests {
fn insert(contents: &str) -> Result<Insertion> {
let parsed = parse_module(contents)?;
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
Ok(Insertion::start_of_file(parsed.suite(), &locator, &stylist))
}
@@ -440,7 +440,7 @@ x = 1
fn insert(contents: &str, offset: TextSize) -> Insertion {
let parsed = parse_module(contents).unwrap();
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
Insertion::start_of_block(offset, &locator, &stylist, parsed.tokens())
}

View File

@@ -206,40 +206,53 @@ pub fn check_path(
}
let diagnostic = match test_rule {
Rule::StableTestRule => {
test_rules::StableTestRule::diagnostic(locator, comment_ranges)
}
Rule::StableTestRuleSafeFix => {
test_rules::StableTestRuleSafeFix::diagnostic(locator, comment_ranges)
}
Rule::StableTestRuleUnsafeFix => {
test_rules::StableTestRuleUnsafeFix::diagnostic(locator, comment_ranges)
test_rules::StableTestRule::diagnostic(locator.contents(), comment_ranges)
}
Rule::StableTestRuleSafeFix => test_rules::StableTestRuleSafeFix::diagnostic(
locator.contents(),
comment_ranges,
),
Rule::StableTestRuleUnsafeFix => test_rules::StableTestRuleUnsafeFix::diagnostic(
locator.contents(),
comment_ranges,
),
Rule::StableTestRuleDisplayOnlyFix => {
test_rules::StableTestRuleDisplayOnlyFix::diagnostic(locator, comment_ranges)
test_rules::StableTestRuleDisplayOnlyFix::diagnostic(
locator.contents(),
comment_ranges,
)
}
Rule::PreviewTestRule => {
test_rules::PreviewTestRule::diagnostic(locator, comment_ranges)
test_rules::PreviewTestRule::diagnostic(locator.contents(), comment_ranges)
}
Rule::DeprecatedTestRule => {
test_rules::DeprecatedTestRule::diagnostic(locator, comment_ranges)
test_rules::DeprecatedTestRule::diagnostic(locator.contents(), comment_ranges)
}
Rule::AnotherDeprecatedTestRule => {
test_rules::AnotherDeprecatedTestRule::diagnostic(locator, comment_ranges)
test_rules::AnotherDeprecatedTestRule::diagnostic(
locator.contents(),
comment_ranges,
)
}
Rule::RemovedTestRule => {
test_rules::RemovedTestRule::diagnostic(locator, comment_ranges)
}
Rule::AnotherRemovedTestRule => {
test_rules::AnotherRemovedTestRule::diagnostic(locator, comment_ranges)
test_rules::RemovedTestRule::diagnostic(locator.contents(), comment_ranges)
}
Rule::AnotherRemovedTestRule => test_rules::AnotherRemovedTestRule::diagnostic(
locator.contents(),
comment_ranges,
),
Rule::RedirectedToTestRule => {
test_rules::RedirectedToTestRule::diagnostic(locator, comment_ranges)
}
Rule::RedirectedFromTestRule => {
test_rules::RedirectedFromTestRule::diagnostic(locator, comment_ranges)
test_rules::RedirectedToTestRule::diagnostic(locator.contents(), comment_ranges)
}
Rule::RedirectedFromTestRule => test_rules::RedirectedFromTestRule::diagnostic(
locator.contents(),
comment_ranges,
),
Rule::RedirectedFromPrefixTestRule => {
test_rules::RedirectedFromPrefixTestRule::diagnostic(locator, comment_ranges)
test_rules::RedirectedFromPrefixTestRule::diagnostic(
locator.contents(),
comment_ranges,
)
}
_ => unreachable!("All test rules must have an implementation"),
};
@@ -335,10 +348,10 @@ pub fn add_noqa_to_path(
let locator = Locator::new(source_kind.source_code());
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), source_kind.source_code());
// Extra indices from the code.
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), source_kind.source_code());
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(
@@ -393,10 +406,10 @@ pub fn lint_only(
let locator = Locator::new(source_kind.source_code());
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), source_kind.source_code());
// Extra indices from the code.
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), source_kind.source_code());
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(
@@ -495,10 +508,10 @@ pub fn lint_fix<'a>(
let locator = Locator::new(transformed.source_code());
// Detect the current code style (lazily).
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), transformed.source_code());
// Extra indices from the code.
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), transformed.source_code());
// Extract the `# noqa` and `# isort: skip` directives from the source.
let directives = directives::extract_directives(

View File

@@ -369,7 +369,7 @@ impl<'a> FileNoqaDirectives<'a> {
warn!("Invalid `# ruff: noqa` directive at {path_display}:{line}: {err}");
}
Ok(Some(exemption)) => {
if indentation_at_offset(range.start(), locator).is_none() {
if indentation_at_offset(range.start(), contents).is_none() {
#[allow(deprecated)]
let line = locator.compute_line_index(range.start());
let path_display = relativize_path(path);

View File

@@ -88,7 +88,7 @@ where
let line_end = locator.full_line_end(script_start.end());
let rest = locator.after(line_end);
let mut end_offset = None;
let mut lines = UniversalNewlineIterator::with_offset(rest, line_end).peekable();
let mut lines = UniversalNewlineIterator::with_offset(rest, line_end);
while let Some(line) = lines.next() {
let Some(content) = script_line_content(&line) else {

View File

@@ -145,7 +145,7 @@ fn move_initialization(
// Avoid attempting to fix single-line functions.
let statement = body.peek()?;
if indexer.preceded_by_multi_statement_line(statement, locator) {
if indexer.preceded_by_multi_statement_line(statement, locator.contents()) {
return None;
}
@@ -170,7 +170,7 @@ fn move_initialization(
content.push_str(stylist.line_ending().as_str());
// Determine the indentation depth of the function body.
let indentation = indentation_at_offset(statement.start(), locator)?;
let indentation = indentation_at_offset(statement.start(), locator.contents())?;
// Indent the edit to match the body indentation.
let mut content = textwrap::indent(&content, indentation).to_string();
@@ -186,7 +186,7 @@ fn move_initialization(
if let Some(next) = body.peek() {
// If there's a second statement, insert _before_ it, but ensure this isn't a
// multi-statement line.
if indexer.in_multi_statement_line(statement, locator) {
if indexer.in_multi_statement_line(statement, locator.contents()) {
continue;
}
pos = locator.line_start(next.start());

View File

@@ -190,7 +190,7 @@ pub(crate) fn string_in_exception(checker: &mut Checker, stmt: &Stmt, exc: &Expr
let mut diagnostic =
Diagnostic::new(RawStringInException, first.range());
if let Some(indentation) =
whitespace::indentation(checker.locator(), stmt)
whitespace::indentation(checker.source(), stmt)
{
diagnostic.set_fix(generate_fix(
stmt,
@@ -208,8 +208,7 @@ pub(crate) fn string_in_exception(checker: &mut Checker, stmt: &Stmt, exc: &Expr
Expr::FString(_) => {
if checker.enabled(Rule::FStringInException) {
let mut diagnostic = Diagnostic::new(FStringInException, first.range());
if let Some(indentation) = whitespace::indentation(checker.locator(), stmt)
{
if let Some(indentation) = whitespace::indentation(checker.source(), stmt) {
diagnostic.set_fix(generate_fix(
stmt,
first,
@@ -231,7 +230,7 @@ pub(crate) fn string_in_exception(checker: &mut Checker, stmt: &Stmt, exc: &Expr
let mut diagnostic =
Diagnostic::new(DotFormatInException, first.range());
if let Some(indentation) =
whitespace::indentation(checker.locator(), stmt)
whitespace::indentation(checker.source(), stmt)
{
diagnostic.set_fix(generate_fix(
stmt,

View File

@@ -65,7 +65,7 @@ pub(crate) fn unconventional_import_alias(
let qualified_name = import.qualified_name().to_string();
let expected_alias = conventions.get(qualified_name.as_str())?;
let name = binding.name(checker.locator());
let name = binding.name(checker.source());
if name == expected_alias {
return None;
}

View File

@@ -105,7 +105,7 @@ pub(crate) fn unnecessary_placeholder(checker: &mut Checker, body: &[Stmt]) {
};
let mut diagnostic = Diagnostic::new(UnnecessaryPlaceholder { kind }, stmt.range());
let edit = if let Some(index) = trailing_comment_start_offset(stmt, checker.locator()) {
let edit = if let Some(index) = trailing_comment_start_offset(stmt, checker.source()) {
Edit::range_deletion(stmt.range().add_end(index))
} else {
fix::edits::delete_stmt(stmt, None, checker.locator(), checker.indexer())

View File

@@ -73,7 +73,7 @@ pub(crate) fn unaliased_collections_abc_set_import(
return None;
}
let name = binding.name(checker.locator());
let name = binding.name(checker.source());
if name == "AbstractSet" {
return None;
}

View File

@@ -386,7 +386,7 @@ pub(crate) fn unittest_raises_assertion(
);
if !checker
.comment_ranges()
.has_comments(call, checker.locator())
.has_comments(call, checker.source())
{
if let Some(args) = to_pytest_raises_args(checker, attr.as_str(), &call.arguments) {
diagnostic.try_set_fix(|| {
@@ -622,8 +622,8 @@ fn parenthesize<'a>(expression: &Expression<'a>, parent: &Expression<'a>) -> Exp
/// `assert a == "hello"` and `assert b == "world"`.
fn fix_composite_condition(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Result<Edit> {
// Infer the indentation of the outer block.
let outer_indent =
whitespace::indentation(locator, stmt).context("Unable to fix multiline statement")?;
let outer_indent = whitespace::indentation(locator.contents(), stmt)
.context("Unable to fix multiline statement")?;
// Extract the module text.
let contents = locator.lines(stmt.range());
@@ -747,7 +747,7 @@ pub(crate) fn composite_condition(
&& !checker.comment_ranges().intersects(stmt.range())
&& !checker
.indexer()
.in_multi_statement_line(stmt, checker.locator())
.in_multi_statement_line(stmt, checker.source())
{
diagnostic.try_set_fix(|| {
fix_composite_condition(stmt, checker.locator(), checker.stylist())

View File

@@ -453,7 +453,7 @@ fn is_noreturn_func(func: &Expr, semantic: &SemanticModel) -> bool {
fn add_return_none(checker: &mut Checker, stmt: &Stmt, range: TextRange) {
let mut diagnostic = Diagnostic::new(ImplicitReturn, range);
if let Some(indent) = indentation(checker.locator(), stmt) {
if let Some(indent) = indentation(checker.source(), stmt) {
let mut content = String::new();
content.push_str(checker.stylist().line_ending().as_str());
content.push_str(indent);
@@ -851,14 +851,14 @@ fn remove_else(
};
// get the indentation of the `else`, since that is the indent level we want to end with
let Some(desired_indentation) = indentation(locator, elif_else) else {
let Some(desired_indentation) = indentation(locator.contents(), elif_else) else {
return Err(anyhow::anyhow!("Compound statement cannot be inlined"));
};
// If the statement is on the same line as the `else`, just remove the `else: `.
// Ex) `else: return True` -> `return True`
if let Some(first) = elif_else.body.first() {
if indexer.preceded_by_multi_statement_line(first, locator) {
if indexer.preceded_by_multi_statement_line(first, locator.contents()) {
return Ok(Fix::safe_edit(Edit::deletion(
elif_else.start(),
first.start(),

View File

@@ -536,7 +536,7 @@ pub(crate) fn compare_with_tuple(checker: &mut Checker, expr: &Expr) {
// Avoid removing comments.
if checker
.comment_ranges()
.has_comments(expr, checker.locator())
.has_comments(expr, checker.source())
{
continue;
}

View File

@@ -292,7 +292,7 @@ pub(super) fn collapse_nested_if(
nested_if: NestedIf,
) -> Result<Edit> {
// Infer the indentation of the outer block.
let Some(outer_indent) = whitespace::indentation(locator, &nested_if) else {
let Some(outer_indent) = whitespace::indentation(locator.contents(), &nested_if) else {
bail!("Unable to fix multiline statement");
};

View File

@@ -18,7 +18,7 @@ pub(crate) fn fix_multiple_with_statements(
with_stmt: &ast::StmtWith,
) -> Result<Edit> {
// Infer the indentation of the outer block.
let Some(outer_indent) = whitespace::indentation(locator, with_stmt) else {
let Some(outer_indent) = whitespace::indentation(locator.contents(), with_stmt) else {
bail!("Unable to fix multiline statement");
};

View File

@@ -211,7 +211,7 @@ pub(crate) fn if_else_block_instead_of_dict_get(checker: &mut Checker, stmt_if:
);
if !checker
.comment_ranges()
.has_comments(stmt_if, checker.locator())
.has_comments(stmt_if, checker.source())
{
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
contents,
@@ -300,7 +300,7 @@ pub(crate) fn if_exp_instead_of_dict_get(
);
if !checker
.comment_ranges()
.has_comments(expr, checker.locator())
.has_comments(expr, checker.source())
{
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
contents,

View File

@@ -227,7 +227,7 @@ pub(crate) fn if_else_block_instead_of_if_exp(checker: &mut Checker, stmt_if: &a
);
if !checker
.comment_ranges()
.has_comments(stmt_if, checker.locator())
.has_comments(stmt_if, checker.source())
{
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
contents,

View File

@@ -200,7 +200,7 @@ pub(crate) fn needless_bool(checker: &mut Checker, stmt: &Stmt) {
// Generate the replacement condition.
let condition = if checker
.comment_ranges()
.has_comments(&range, checker.locator())
.has_comments(&range, checker.source())
{
None
} else {

View File

@@ -127,7 +127,7 @@ pub(crate) fn suppressible_exception(
);
if !checker
.comment_ranges()
.has_comments(stmt, checker.locator())
.has_comments(stmt, checker.source())
{
diagnostic.try_set_fix(|| {
// let range = statement_range(stmt, checker.locator(), checker.indexer());

View File

@@ -1,7 +1,6 @@
use std::path::Path;
use itertools::{EitherOrBoth, Itertools};
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::whitespace::trailing_lines_end;
@@ -98,8 +97,9 @@ pub(crate) fn organize_imports(
// Special-cases: there's leading or trailing content in the import block. These
// are too hard to get right, and relatively rare, so flag but don't fix.
if indexer.preceded_by_multi_statement_line(block.imports.first().unwrap(), locator)
|| indexer.followed_by_multi_statement_line(block.imports.last().unwrap(), locator)
if indexer.preceded_by_multi_statement_line(block.imports.first().unwrap(), locator.contents())
|| indexer
.followed_by_multi_statement_line(block.imports.last().unwrap(), locator.contents())
{
return Some(Diagnostic::new(UnsortedImports, range));
}
@@ -114,7 +114,7 @@ pub(crate) fn organize_imports(
let trailing_line_end = if block.trailer.is_none() {
locator.full_line_end(range.end())
} else {
trailing_lines_end(block.imports.last().unwrap(), locator)
trailing_lines_end(block.imports.last().unwrap(), locator.contents())
};
// Generate the sorted import block.

View File

@@ -170,7 +170,7 @@ pub(crate) fn compound_statements(
let mut diagnostic = Diagnostic::new(UselessSemicolon, range);
diagnostic.set_fix(Fix::safe_edit(Edit::deletion(
indexer
.preceded_by_continuations(range.start(), locator)
.preceded_by_continuations(range.start(), locator.contents())
.unwrap_or(range.start()),
range.end(),
)));

View File

@@ -79,8 +79,8 @@ pub(crate) fn lambda_assignment(
stmt.range(),
);
if !has_leading_content(stmt.start(), checker.locator())
&& !has_trailing_content(stmt.end(), checker.locator())
if !has_leading_content(stmt.start(), checker.source())
&& !has_trailing_content(stmt.end(), checker.source())
{
let first_line = checker.locator().line(stmt.start());
let indentation = leading_indentation(first_line);

View File

@@ -336,7 +336,7 @@ pub(crate) fn literal_comparisons(checker: &mut Checker, compare: &ast::ExprComp
&compare.comparators,
compare.into(),
checker.comment_ranges(),
checker.locator(),
checker.source(),
);
for diagnostic in &mut diagnostics {
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(

View File

@@ -68,7 +68,7 @@ fn split_imports(
indexer: &Indexer,
stylist: &Stylist,
) -> Fix {
if indexer.in_multi_statement_line(stmt, locator) {
if indexer.in_multi_statement_line(stmt, locator.contents()) {
// Ex) `x = 1; import os, sys` (convert to `x = 1; import os; import sys`)
let replacement = names
.iter()
@@ -90,7 +90,8 @@ fn split_imports(
Fix::safe_edit(Edit::range_replacement(replacement, stmt.range()))
} else {
// Ex) `import os, sys` (convert to `import os\nimport sys`)
let indentation = indentation_at_offset(stmt.start(), locator).unwrap_or_default();
let indentation =
indentation_at_offset(stmt.start(), locator.contents()).unwrap_or_default();
// Generate newline-delimited imports.
let replacement = names

View File

@@ -105,7 +105,7 @@ pub(crate) fn not_tests(checker: &mut Checker, unary_op: &ast::ExprUnaryOp) {
comparators,
unary_op.into(),
checker.comment_ranges(),
checker.locator(),
checker.source(),
),
unary_op.range(),
checker.locator(),
@@ -126,7 +126,7 @@ pub(crate) fn not_tests(checker: &mut Checker, unary_op: &ast::ExprUnaryOp) {
comparators,
unary_op.into(),
checker.comment_ranges(),
checker.locator(),
checker.source(),
),
unary_op.range(),
checker.locator(),

View File

@@ -100,7 +100,7 @@ pub(crate) fn trailing_whitespace(
diagnostic.set_fix(Fix::applicable_edit(
Edit::range_deletion(TextRange::new(
indexer
.preceded_by_continuations(line.start(), locator)
.preceded_by_continuations(line.start(), locator.contents())
.unwrap_or(range.start()),
range.end(),
)),

View File

@@ -240,7 +240,7 @@ pub(crate) fn blank_before_after_class(checker: &mut Checker, docstring: &Docstr
if let Some(first_line) = &first_line {
let trailing = first_line.as_str().trim_whitespace_start();
if let Some(next_statement) = trailing.strip_prefix(';') {
let indentation = indentation_at_offset(docstring.start(), checker.locator())
let indentation = indentation_at_offset(docstring.start(), checker.source())
.expect("Own line docstring must have indentation");
let mut diagnostic = Diagnostic::new(OneBlankLineAfterClass, docstring.range());
let line_ending = checker.stylist().line_ending().as_str();

View File

@@ -1850,7 +1850,7 @@ static GOOGLE_ARGS_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^\s*(\*?\*?\w+)\s*(\(.*?\))?\s*:(\r\n|\n)?\s*.+").unwrap());
fn args_section(context: &SectionContext) -> FxHashSet<String> {
let mut following_lines = context.following_lines().peekable();
let mut following_lines = context.following_lines();
let Some(first_line) = following_lines.next() else {
return FxHashSet::default();
};

View File

@@ -712,8 +712,8 @@ mod tests {
let parsed =
ruff_python_parser::parse_unchecked_source(source_kind.source_code(), source_type);
let locator = Locator::new(&contents);
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), &contents);
let indexer = Indexer::from_tokens(parsed.tokens(), &contents);
let directives = directives::extract_directives(
parsed.tokens(),
directives::Flags::from_settings(&settings),

View File

@@ -295,7 +295,7 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
continue;
};
let name = binding.name(checker.locator());
let name = binding.name(checker.source());
// If an import is marked as required, avoid treating it as unused, regardless of whether
// it was _actually_ used.

View File

@@ -183,7 +183,7 @@ fn remove_unused_variable(binding: &Binding, checker: &Checker) -> Option<Fix> {
};
}
} else {
let name = binding.name(checker.locator());
let name = binding.name(checker.source());
let renamed = format!("_{name}");
if checker.settings.dummy_variable_rgx.is_match(&renamed) {
let edit = Edit::range_replacement(renamed, binding.range());

View File

@@ -113,7 +113,7 @@ fn convert_to_elif(
let trivia_range = TextRange::new(else_line_end, inner_if_line_start);
// Identify the indentation of the outer clause
let Some(indentation) = indentation(locator, else_clause) else {
let Some(indentation) = indentation(locator.contents(), else_clause) else {
return Err(anyhow::anyhow!("`else` is expected to be on its own line"));
};

View File

@@ -47,7 +47,7 @@ pub(crate) fn empty_comments(
comment_ranges: &CommentRanges,
locator: &Locator,
) {
let block_comments = comment_ranges.block_comments(locator);
let block_comments = comment_ranges.block_comments(locator.contents());
for range in comment_ranges {
// Ignore comments that are part of multi-line "comment blocks".

View File

@@ -157,7 +157,7 @@ pub(crate) fn nested_min_max(
let mut diagnostic = Diagnostic::new(NestedMinMax { func: min_max }, expr.range());
if !checker
.comment_ranges()
.has_comments(expr, checker.locator())
.has_comments(expr, checker.source())
{
let flattened_expr = Expr::Call(ast::ExprCall {
func: Box::new(func.clone()),

View File

@@ -175,7 +175,7 @@ fn get_undecorated_methods(checker: &mut Checker, class_stmt: &Stmt, method_type
TextRange::new(stmt.range().start(), stmt.range().start()),
);
let indentation = indentation_at_offset(stmt.range().start(), checker.locator());
let indentation = indentation_at_offset(stmt.range().start(), checker.source());
match indentation {
Some(indentation) => {

View File

@@ -3,7 +3,6 @@ use std::fmt;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_semantic::{Binding, BindingKind};
use ruff_source_file::Locator;
use ruff_text_size::Ranged;
/// ## What it does
@@ -43,8 +42,8 @@ impl Violation for NonAsciiName {
}
/// PLC2401
pub(crate) fn non_ascii_name(binding: &Binding, locator: &Locator) -> Option<Diagnostic> {
let name = binding.name(locator);
pub(crate) fn non_ascii_name(binding: &Binding, source: &str) -> Option<Diagnostic> {
let name = binding.name(source);
if name.is_ascii() {
return None;
}

View File

@@ -146,7 +146,7 @@ fn remove_else(
return Err(anyhow::anyhow!("Empty `else` clause"));
};
let start_indentation = indentation(locator, start);
let start_indentation = indentation(locator.contents(), start);
if start_indentation.is_none() {
// Inline `else` block (e.g., `else: x = 1`).
Ok(Fix::safe_edit(Edit::deletion(
@@ -155,7 +155,7 @@ fn remove_else(
)))
} else {
// Identify the indentation of the loop itself (e.g., the `while` or `for`).
let Some(desired_indentation) = indentation(locator, stmt) else {
let Some(desired_indentation) = indentation(locator.contents(), stmt) else {
return Err(anyhow::anyhow!("Compound statement cannot be inlined"));
};

View File

@@ -617,7 +617,7 @@ impl<'a> ImportReplacer<'a> {
let fix = Some(matched);
Some((operation, fix))
} else {
let indentation = indentation(self.locator, self.import_from_stmt);
let indentation = indentation(self.locator.contents(), self.import_from_stmt);
// If we have matched _and_ unmatched names, but the import is not on its own
// line, we can't add a statement after it. For example, if we have

View File

@@ -282,7 +282,7 @@ pub(crate) fn deprecated_mock_import(checker: &mut Checker, stmt: &Stmt) {
.any(|name| &name.name == "mock" || &name.name == "mock.mock")
{
// Generate the fix, if needed, which is shared between all `mock` imports.
let content = if let Some(indent) = indentation(checker.locator(), stmt) {
let content = if let Some(indent) = indentation(checker.source(), stmt) {
match format_import(stmt, indent, checker.locator(), checker.stylist()) {
Ok(content) => Some(content),
Err(e) => {
@@ -330,7 +330,7 @@ pub(crate) fn deprecated_mock_import(checker: &mut Checker, stmt: &Stmt) {
},
stmt.range(),
);
if let Some(indent) = indentation(checker.locator(), stmt) {
if let Some(indent) = indentation(checker.source(), stmt) {
diagnostic.try_set_fix(|| {
format_import_from(stmt, indent, checker.locator(), checker.stylist())
.map(|content| Edit::range_replacement(content, stmt.range()))

View File

@@ -299,7 +299,7 @@ fn fix_always_false_branch(
}) => {
let start = body.first()?;
let end = body.last()?;
if indentation(checker.locator(), start).is_none() {
if indentation(checker.source(), start).is_none() {
// Inline `else` block (e.g., `else: x = 1`).
Some(Fix::unsafe_edit(Edit::range_replacement(
checker
@@ -309,7 +309,7 @@ fn fix_always_false_branch(
stmt_if.range(),
)))
} else {
indentation(checker.locator(), stmt_if)
indentation(checker.source(), stmt_if)
.and_then(|indentation| {
adjust_indentation(
TextRange::new(
@@ -377,7 +377,7 @@ fn fix_always_true_branch(
// the rest.
let start = branch.body.first()?;
let end = branch.body.last()?;
if indentation(checker.locator(), start).is_none() {
if indentation(checker.source(), start).is_none() {
// Inline `if` block (e.g., `if ...: x = 1`).
Some(Fix::unsafe_edit(Edit::range_replacement(
checker
@@ -387,7 +387,7 @@ fn fix_always_true_branch(
stmt_if.range,
)))
} else {
indentation(checker.locator(), &stmt_if)
indentation(checker.source(), &stmt_if)
.and_then(|indentation| {
adjust_indentation(
TextRange::new(checker.locator().line_start(start.start()), end.end()),

View File

@@ -251,7 +251,7 @@ fn clean_params_dictionary(right: &Expr, locator: &Locator, stylist: &Stylist) -
seen.push(key_string.to_str());
if is_multi_line {
if indent.is_none() {
indent = indentation(locator, key);
indent = indentation(locator.contents(), key);
}
}

View File

@@ -75,7 +75,7 @@ pub(crate) fn unnecessary_coding_comment(
// x = 2
// ```
if indexer
.preceded_by_continuations(line_range.start(), locator)
.preceded_by_continuations(line_range.start(), locator.contents())
.is_some()
{
continue;

View File

@@ -84,7 +84,7 @@ pub(crate) fn single_item_membership_test(
&[item.clone()],
expr.into(),
checker.comment_ranges(),
checker.locator(),
checker.source(),
),
expr.range(),
checker.locator(),

View File

@@ -200,7 +200,7 @@ pub(crate) fn collection_literal_concatenation(checker: &mut Checker, expr: &Exp
);
if !checker
.comment_ranges()
.has_comments(expr, checker.locator())
.has_comments(expr, checker.source())
{
// This suggestion could be unsafe if the non-literal expression in the
// expression has overridden the `__add__` (or `__radd__`) magic methods.

View File

@@ -166,11 +166,14 @@ impl<'src, 'loc> UselessSuppressionComments<'src, 'loc> {
{
if following.is_first_statement_in_alternate_body(enclosing) {
// check indentation
let comment_indentation =
comment_indentation_after(preceding, comment.range, self.locator);
let comment_indentation = comment_indentation_after(
preceding,
comment.range,
self.locator.contents(),
);
let preceding_indentation =
indentation_at_offset(preceding.start(), self.locator)
indentation_at_offset(preceding.start(), self.locator.contents())
.unwrap_or_default()
.text_len();
if comment_indentation != preceding_indentation {

View File

@@ -175,7 +175,7 @@ fn use_initvar(
}
};
let indentation = indentation_at_offset(post_init_def.start(), checker.locator())
let indentation = indentation_at_offset(post_init_def.start(), checker.source())
.context("Failed to calculate leading indentation of `__post_init__` method")?;
let content = textwrap::indent(&content, indentation);

View File

@@ -146,9 +146,11 @@ where
// We want `# fmt: on` to be considered a trailing comment of `func(x)` instead of a leading comment
// on `func2(y)`.
if line_position.is_own_line() {
let comment_indent = comment_indentation_after(node, range, self.locator);
let comment_indent =
comment_indentation_after(node, range, self.locator.contents());
let node_indent = TextSize::of(
indentation_at_offset(node.start(), self.locator).unwrap_or_default(),
indentation_at_offset(node.start(), self.locator.contents())
.unwrap_or_default(),
);
if node_indent >= comment_indent {
break;

View File

@@ -16,15 +16,15 @@
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_trivia::CommentRanges;
use ruff_source_file::Locator;
use ruff_source_file::Located;
use ruff_text_size::TextSize;
use crate::registry::Rule;
/// Check if a comment exists anywhere in a given file
fn comment_exists(text: &str, locator: &Locator, comment_ranges: &CommentRanges) -> bool {
fn comment_exists(text: &str, source: &str, comment_ranges: &CommentRanges) -> bool {
for range in comment_ranges {
let comment_text = locator.slice(range);
let comment_text = source.slice(range);
if text.trim_end() == comment_text {
return true;
}
@@ -48,7 +48,7 @@ pub(crate) const TEST_RULES: &[Rule] = &[
];
pub(crate) trait TestRule {
fn diagnostic(locator: &Locator, comment_ranges: &CommentRanges) -> Option<Diagnostic>;
fn diagnostic(source: &str, comment_ranges: &CommentRanges) -> Option<Diagnostic>;
}
/// ## What it does
@@ -79,7 +79,7 @@ impl Violation for StableTestRule {
}
impl TestRule for StableTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
StableTestRule,
ruff_text_size::TextRange::default(),
@@ -115,17 +115,14 @@ impl Violation for StableTestRuleSafeFix {
}
impl TestRule for StableTestRuleSafeFix {
fn diagnostic(locator: &Locator, comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(source: &str, comment_ranges: &CommentRanges) -> Option<Diagnostic> {
let comment = format!("# fix from stable-test-rule-safe-fix\n");
if comment_exists(&comment, locator, comment_ranges) {
if comment_exists(&comment, source, comment_ranges) {
None
} else {
Some(
Diagnostic::new(StableTestRuleSafeFix, ruff_text_size::TextRange::default())
.with_fix(Fix::safe_edit(Edit::insertion(
comment.to_string(),
TextSize::new(0),
))),
.with_fix(Fix::safe_edit(Edit::insertion(comment, TextSize::new(0)))),
)
}
}
@@ -159,9 +156,9 @@ impl Violation for StableTestRuleUnsafeFix {
}
impl TestRule for StableTestRuleUnsafeFix {
fn diagnostic(locator: &Locator, comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(source: &str, comment_ranges: &CommentRanges) -> Option<Diagnostic> {
let comment = format!("# fix from stable-test-rule-unsafe-fix\n");
if comment_exists(&comment, locator, comment_ranges) {
if comment_exists(&comment, source, comment_ranges) {
None
} else {
Some(
@@ -169,10 +166,7 @@ impl TestRule for StableTestRuleUnsafeFix {
StableTestRuleUnsafeFix,
ruff_text_size::TextRange::default(),
)
.with_fix(Fix::unsafe_edit(Edit::insertion(
comment.to_string(),
TextSize::new(0),
))),
.with_fix(Fix::unsafe_edit(Edit::insertion(comment, TextSize::new(0)))),
)
}
}
@@ -206,9 +200,9 @@ impl Violation for StableTestRuleDisplayOnlyFix {
}
impl TestRule for StableTestRuleDisplayOnlyFix {
fn diagnostic(locator: &Locator, comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(source: &str, comment_ranges: &CommentRanges) -> Option<Diagnostic> {
let comment = format!("# fix from stable-test-rule-display-only-fix\n");
if comment_exists(&comment, locator, comment_ranges) {
if comment_exists(&comment, source, comment_ranges) {
None
} else {
Some(
@@ -217,7 +211,7 @@ impl TestRule for StableTestRuleDisplayOnlyFix {
ruff_text_size::TextRange::default(),
)
.with_fix(Fix::display_only_edit(Edit::insertion(
comment.to_string(),
comment,
TextSize::new(0),
))),
)
@@ -253,7 +247,7 @@ impl Violation for PreviewTestRule {
}
impl TestRule for PreviewTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
PreviewTestRule,
ruff_text_size::TextRange::default(),
@@ -289,7 +283,7 @@ impl Violation for DeprecatedTestRule {
}
impl TestRule for DeprecatedTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
DeprecatedTestRule,
ruff_text_size::TextRange::default(),
@@ -325,7 +319,7 @@ impl Violation for AnotherDeprecatedTestRule {
}
impl TestRule for AnotherDeprecatedTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
AnotherDeprecatedTestRule,
ruff_text_size::TextRange::default(),
@@ -361,7 +355,7 @@ impl Violation for RemovedTestRule {
}
impl TestRule for RemovedTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
RemovedTestRule,
ruff_text_size::TextRange::default(),
@@ -397,7 +391,7 @@ impl Violation for AnotherRemovedTestRule {
}
impl TestRule for AnotherRemovedTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
AnotherRemovedTestRule,
ruff_text_size::TextRange::default(),
@@ -433,7 +427,7 @@ impl Violation for RedirectedFromTestRule {
}
impl TestRule for RedirectedFromTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
RedirectedFromTestRule,
ruff_text_size::TextRange::default(),
@@ -469,7 +463,7 @@ impl Violation for RedirectedToTestRule {
}
impl TestRule for RedirectedToTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
RedirectedToTestRule,
ruff_text_size::TextRange::default(),
@@ -505,7 +499,7 @@ impl Violation for RedirectedFromPrefixTestRule {
}
impl TestRule for RedirectedFromPrefixTestRule {
fn diagnostic(_locator: &Locator, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
fn diagnostic(_source: &str, _comment_ranges: &CommentRanges) -> Option<Diagnostic> {
Some(Diagnostic::new(
RedirectedFromPrefixTestRule,
ruff_text_size::TextRange::default(),

View File

@@ -111,8 +111,8 @@ pub(crate) fn test_contents<'a>(
let source_type = PySourceType::from(path);
let parsed = ruff_python_parser::parse_unchecked_source(source_kind.source_code(), source_type);
let locator = Locator::new(source_kind.source_code());
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), source_kind.source_code());
let indexer = Indexer::from_tokens(parsed.tokens(), source_kind.source_code());
let directives = directives::extract_directives(
parsed.tokens(),
directives::Flags::from_settings(settings),
@@ -174,8 +174,8 @@ pub(crate) fn test_contents<'a>(
let parsed =
ruff_python_parser::parse_unchecked_source(transformed.source_code(), source_type);
let locator = Locator::new(transformed.source_code());
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let indexer = Indexer::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), transformed.source_code());
let indexer = Indexer::from_tokens(parsed.tokens(), transformed.source_code());
let directives = directives::extract_directives(
parsed.tokens(),
directives::Flags::from_settings(settings),

View File

@@ -60,15 +60,13 @@ pub(crate) fn derive_impl(input: DeriveInput) -> syn::Result<TokenStream> {
}
}
let docs: Vec<&Attribute> = struct_attributes
let docs = struct_attributes
.iter()
.filter(|attr| attr.path().is_ident("doc"))
.collect();
.filter(|attr| attr.path().is_ident("doc"));
// Convert the list of `doc` attributes into a single string.
let doc = dedent(
&docs
.into_iter()
.map(parse_doc)
.collect::<syn::Result<Vec<_>>>()?
.join("\n"),

View File

@@ -4,7 +4,7 @@ use std::path::Path;
use rustc_hash::FxHashMap;
use ruff_python_trivia::{indentation_at_offset, CommentRanges, SimpleTokenKind, SimpleTokenizer};
use ruff_source_file::Locator;
use ruff_source_file::Located;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::name::{Name, QualifiedName, QualifiedNameBuilder};
@@ -1333,19 +1333,16 @@ pub fn generate_comparison(
comparators: &[Expr],
parent: AnyNodeRef,
comment_ranges: &CommentRanges,
locator: &Locator,
source: &str,
) -> String {
let start = left.start();
let end = comparators.last().map_or_else(|| left.end(), Ranged::end);
let mut contents = String::with_capacity(usize::from(end - start));
// Add the left side of the comparison.
contents.push_str(
locator.slice(
parenthesized_range(left.into(), parent, comment_ranges, locator.contents())
.unwrap_or(left.range()),
),
);
contents.push_str(source.slice(
parenthesized_range(left.into(), parent, comment_ranges, source).unwrap_or(left.range()),
));
for (op, comparator) in ops.iter().zip(comparators) {
// Add the operator.
@@ -1364,14 +1361,9 @@ pub fn generate_comparison(
// Add the right side of the comparison.
contents.push_str(
locator.slice(
parenthesized_range(
comparator.into(),
parent,
comment_ranges,
locator.contents(),
)
.unwrap_or(comparator.range()),
source.slice(
parenthesized_range(comparator.into(), parent, comment_ranges, source)
.unwrap_or(comparator.range()),
),
);
}
@@ -1512,17 +1504,17 @@ pub fn typing_union(elts: &[Expr], binding: Name) -> Expr {
pub fn comment_indentation_after(
preceding: AnyNodeRef,
comment_range: TextRange,
locator: &Locator,
source: &str,
) -> TextSize {
let tokenizer = SimpleTokenizer::new(
locator.contents(),
TextRange::new(locator.full_line_end(preceding.end()), comment_range.end()),
source,
TextRange::new(source.full_line_end(preceding.end()), comment_range.end()),
);
tokenizer
.filter_map(|token| {
if token.kind() == SimpleTokenKind::Comment {
indentation_at_offset(token.start(), locator).map(TextLen::text_len)
indentation_at_offset(token.start(), source).map(TextLen::text_len)
} else {
None
}

View File

@@ -1,35 +1,35 @@
use ruff_python_trivia::{indentation_at_offset, is_python_whitespace, PythonWhitespace};
use ruff_source_file::{Locator, UniversalNewlineIterator};
use ruff_source_file::{Located, UniversalNewlineIterator};
use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::Stmt;
/// Extract the leading indentation from a line.
#[inline]
pub fn indentation<'a, T>(locator: &'a Locator, located: &T) -> Option<&'a str>
pub fn indentation<'a, T>(source: &'a str, ranged: &T) -> Option<&'a str>
where
T: Ranged,
{
indentation_at_offset(located.start(), locator)
indentation_at_offset(ranged.start(), source)
}
/// Return the end offset at which the empty lines following a statement.
pub fn trailing_lines_end(stmt: &Stmt, locator: &Locator) -> TextSize {
let line_end = locator.full_line_end(stmt.end());
UniversalNewlineIterator::with_offset(locator.after(line_end), line_end)
pub fn trailing_lines_end(stmt: &Stmt, source: &str) -> TextSize {
let line_end = source.full_line_end(stmt.end());
UniversalNewlineIterator::with_offset(source.after(line_end), line_end)
.take_while(|line| line.trim_whitespace().is_empty())
.last()
.map_or(line_end, |line| line.full_end())
}
/// If a [`Ranged`] has a trailing comment, return the index of the hash.
pub fn trailing_comment_start_offset<T>(located: &T, locator: &Locator) -> Option<TextSize>
pub fn trailing_comment_start_offset<T>(located: &T, source: &str) -> Option<TextSize>
where
T: Ranged,
{
let line_end = locator.line_end(located.end());
let line_end = source.line_end(located.end());
let trailing = locator.slice(TextRange::new(located.end(), line_end));
let trailing = source.slice(TextRange::new(located.end(), line_end));
for (index, char) in trailing.char_indices() {
if char == '#' {

View File

@@ -3,14 +3,12 @@ mod stylist;
pub use generator::Generator;
use ruff_python_parser::{parse_module, ParseError};
use ruff_source_file::Locator;
pub use stylist::Stylist;
/// Run round-trip source code generation on a given Python code.
pub fn round_trip(code: &str) -> Result<String, ParseError> {
let locator = Locator::new(code);
let parsed = parse_module(code)?;
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), code);
let mut generator: Generator = (&stylist).into();
generator.unparse_suite(parsed.suite());
Ok(generator.generate())

View File

@@ -5,12 +5,12 @@ use std::ops::Deref;
use ruff_python_ast::str::Quote;
use ruff_python_parser::{Token, TokenKind, Tokens};
use ruff_source_file::{find_newline, LineEnding, Locator};
use ruff_source_file::{find_newline, LineEnding, Located};
use ruff_text_size::Ranged;
#[derive(Debug, Clone)]
pub struct Stylist<'a> {
locator: &'a Locator<'a>,
source: &'a str,
indentation: Indentation,
quote: Quote,
line_ending: OnceCell<LineEnding>,
@@ -27,18 +27,17 @@ impl<'a> Stylist<'a> {
pub fn line_ending(&'a self) -> LineEnding {
*self.line_ending.get_or_init(|| {
let contents = self.locator.contents();
find_newline(contents)
find_newline(self.source)
.map(|(_, ending)| ending)
.unwrap_or_default()
})
}
pub fn from_tokens(tokens: &Tokens, locator: &'a Locator<'a>) -> Self {
let indentation = detect_indentation(tokens, locator);
pub fn from_tokens(tokens: &Tokens, source: &'a str) -> Self {
let indentation = detect_indentation(tokens, source);
Self {
locator,
source,
indentation,
quote: detect_quote(tokens),
line_ending: OnceCell::default(),
@@ -59,7 +58,7 @@ fn detect_quote(tokens: &[Token]) -> Quote {
Quote::default()
}
fn detect_indentation(tokens: &[Token], locator: &Locator) -> Indentation {
fn detect_indentation(tokens: &[Token], source: &str) -> Indentation {
let indent_range = tokens.iter().find_map(|token| {
if matches!(token.kind(), TokenKind::Indent) {
Some(token.range())
@@ -69,7 +68,7 @@ fn detect_indentation(tokens: &[Token], locator: &Locator) -> Indentation {
});
if let Some(indent_range) = indent_range {
let mut whitespace = locator.slice(indent_range);
let mut whitespace = source.slice(indent_range);
// https://docs.python.org/3/reference/lexical_analysis.html#indentation
// > A formfeed character may be present at the start of the line; it will be ignored for
// > the indentation calculations above. Formfeed characters occurring elsewhere in the
@@ -98,7 +97,7 @@ fn detect_indentation(tokens: &[Token], locator: &Locator) -> Indentation {
// ```
for token in tokens {
if token.kind() == TokenKind::NonLogicalNewline {
let line = locator.line(token.end());
let line = source.line_str(token.end());
let indent_index = line.find(|c: char| !c.is_whitespace());
if let Some(indent_index) = indent_index {
if indent_index > 0 {
@@ -154,41 +153,36 @@ mod tests {
use ruff_source_file::{find_newline, LineEnding};
use super::{Indentation, Quote, Stylist};
use ruff_source_file::Locator;
#[test]
fn indentation() {
let contents = r"x = 1";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation::default());
let contents = r"
if True:
pass
";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
let contents = r"
if True:
pass
";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
let contents = r"
if True:
pass
";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation("\t".to_string()));
let contents = r"
@@ -198,9 +192,8 @@ x = (
3,
)
";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
// formfeed indent, see `detect_indentation` comment.
@@ -209,9 +202,8 @@ class FormFeedIndent:
def __init__(self, a=[]):
print(a)
";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.indentation(), &Indentation(" ".to_string()));
}
@@ -224,10 +216,9 @@ x = (
 3,
)
";
let locator = Locator::new(contents);
let parsed = parse_unchecked(contents, Mode::Module);
assert_eq!(
Stylist::from_tokens(parsed.tokens(), &locator).indentation(),
Stylist::from_tokens(parsed.tokens(), contents).indentation(),
&Indentation(" ".to_string())
);
}
@@ -235,39 +226,33 @@ x = (
#[test]
fn quote() {
let contents = r"x = 1";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::default());
let contents = r"x = '1'";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r"x = f'1'";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r#"x = "1""#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
let contents = r#"x = f"1""#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
let contents = r#"s = "It's done.""#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
// No style if only double quoted docstring (will take default Double)
@@ -276,9 +261,8 @@ def f():
"""Docstring."""
pass
"#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::default());
// Detect from string literal appearing after docstring
@@ -287,9 +271,8 @@ def f():
a = 'v'
"#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r#"
@@ -297,9 +280,8 @@ a = 'v'
a = "v"
"#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
// Detect from f-string appearing after docstring
@@ -308,9 +290,8 @@ a = "v"
a = f'v'
"#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
let contents = r#"
@@ -318,17 +299,15 @@ a = f'v'
a = f"v"
"#;
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Double);
let contents = r"
f'''Module docstring.'''
";
let locator = Locator::new(contents);
let parsed = parse_module(contents).unwrap();
let stylist = Stylist::from_tokens(parsed.tokens(), &locator);
let stylist = Stylist::from_tokens(parsed.tokens(), contents);
assert_eq!(stylist.quote(), Quote::Single);
}

Some files were not shown because too many files have changed in this diff Show More