Compare commits

..

3 Commits

Author SHA1 Message Date
Carl Meyer
3140beb6a4 WIP: start on passing context to call 2024-09-17 14:13:50 -07:00
Carl Meyer
09812b3c23 WIP: convert IterationOutcome to use InferenceContext 2024-09-17 14:10:52 -07:00
Carl Meyer
1cb570cdd2 WIP: extracted TypeInferenceContext 2024-09-17 14:10:51 -07:00
99 changed files with 1242 additions and 5501 deletions

View File

@@ -1,5 +1,3 @@
# This file was autogenerated by cargo-dist: https://opensource.axo.dev/cargo-dist/
#
# Copyright 2022-2024, axodotdev
# SPDX-License-Identifier: MIT or Apache-2.0
#
@@ -66,7 +64,7 @@ jobs:
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.22.1/cargo-dist-installer.sh | sh"
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.18.0/cargo-dist-installer.sh | sh"
- name: Cache cargo-dist
uses: actions/upload-artifact@v4
with:

View File

@@ -1,31 +1,5 @@
# Changelog
## 0.6.6
### Preview features
- \[`refurb`\] Skip `slice-to-remove-prefix-or-suffix` (`FURB188`) when non-trivial slice steps are present ([#13405](https://github.com/astral-sh/ruff/pull/13405))
- Add a subcommand to generate dependency graphs ([#13402](https://github.com/astral-sh/ruff/pull/13402))
### Formatter
- Fix placement of inline parameter comments ([#13379](https://github.com/astral-sh/ruff/pull/13379))
### Server
- Fix off-by one error in the `LineIndex::offset` calculation ([#13407](https://github.com/astral-sh/ruff/pull/13407))
### Bug fixes
- \[`fastapi`\] Respect FastAPI aliases in route definitions ([#13394](https://github.com/astral-sh/ruff/pull/13394))
- \[`pydocstyle`\] Respect word boundaries when detecting function signature in docs ([#13388](https://github.com/astral-sh/ruff/pull/13388))
### Documentation
- Add backlinks to rule overview linter ([#13368](https://github.com/astral-sh/ruff/pull/13368))
- Fix documentation for editor vim plugin ALE ([#13348](https://github.com/astral-sh/ruff/pull/13348))
- Fix rendering of `FURB188` docs ([#13406](https://github.com/astral-sh/ruff/pull/13406))
## 0.6.5
### Preview features

104
Cargo.lock generated
View File

@@ -161,21 +161,6 @@ version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
[[package]]
name = "assert_fs"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7efdb1fdb47602827a342857666feb372712cbc64b414172bd6b167a02927674"
dependencies = [
"anstyle",
"doc-comment",
"globwalk",
"predicates",
"predicates-core",
"predicates-tree",
"tempfile",
]
[[package]]
name = "autocfg"
version = "1.2.0"
@@ -255,9 +240,6 @@ name = "camino"
version = "1.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3"
dependencies = [
"serde",
]
[[package]]
name = "cast"
@@ -740,12 +722,6 @@ version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
[[package]]
name = "difflib"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
[[package]]
name = "digest"
version = "0.10.7"
@@ -797,12 +773,6 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "doc-comment"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
[[package]]
name = "drop_bomb"
version = "0.1.5"
@@ -998,17 +968,6 @@ dependencies = [
"regex-syntax 0.8.3",
]
[[package]]
name = "globwalk"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757"
dependencies = [
"bitflags 2.6.0",
"ignore",
"walkdir",
]
[[package]]
name = "half"
version = "2.4.1"
@@ -1905,33 +1864,6 @@ version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "predicates"
version = "3.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97"
dependencies = [
"anstyle",
"difflib",
"predicates-core",
]
[[package]]
name = "predicates-core"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931"
[[package]]
name = "predicates-tree"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13"
dependencies = [
"predicates-core",
"termtree",
]
[[package]]
name = "pretty_assertions"
version = "1.4.0"
@@ -2255,11 +2187,10 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.6.6"
version = "0.6.5"
dependencies = [
"anyhow",
"argfile",
"assert_fs",
"bincode",
"bitflags 2.6.0",
"cachedir",
@@ -2269,9 +2200,7 @@ dependencies = [
"clearscreen",
"colored",
"filetime",
"globwalk",
"ignore",
"indoc",
"insta",
"insta-cmd",
"is-macro",
@@ -2283,9 +2212,7 @@ dependencies = [
"rayon",
"regex",
"ruff_cache",
"ruff_db",
"ruff_diagnostics",
"ruff_graph",
"ruff_linter",
"ruff_macros",
"ruff_notebook",
@@ -2368,7 +2295,6 @@ dependencies = [
"ruff_text_size",
"rustc-hash 2.0.0",
"salsa",
"serde",
"tempfile",
"thiserror",
"tracing",
@@ -2444,23 +2370,6 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "ruff_graph"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"red_knot_python_semantic",
"ruff_cache",
"ruff_db",
"ruff_linter",
"ruff_macros",
"ruff_python_ast",
"salsa",
"schemars",
"serde",
]
[[package]]
name = "ruff_index"
version = "0.0.0"
@@ -2471,7 +2380,7 @@ dependencies = [
[[package]]
name = "ruff_linter"
version = "0.6.6"
version = "0.6.5"
dependencies = [
"aho-corasick",
"annotate-snippets 0.9.2",
@@ -2791,7 +2700,7 @@ dependencies = [
[[package]]
name = "ruff_wasm"
version = "0.6.6"
version = "0.6.5"
dependencies = [
"console_error_panic_hook",
"console_log",
@@ -2834,7 +2743,6 @@ dependencies = [
"regex",
"ruff_cache",
"ruff_formatter",
"ruff_graph",
"ruff_linter",
"ruff_macros",
"ruff_python_ast",
@@ -3289,12 +3197,6 @@ dependencies = [
"phf_codegen",
]
[[package]]
name = "termtree"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "test-case"
version = "3.3.1"

View File

@@ -17,7 +17,6 @@ ruff_cache = { path = "crates/ruff_cache" }
ruff_db = { path = "crates/ruff_db" }
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
ruff_formatter = { path = "crates/ruff_formatter" }
ruff_graph = { path = "crates/ruff_graph" }
ruff_index = { path = "crates/ruff_index" }
ruff_linter = { path = "crates/ruff_linter" }
ruff_macros = { path = "crates/ruff_macros" }
@@ -43,7 +42,6 @@ red_knot_workspace = { path = "crates/red_knot_workspace" }
aho-corasick = { version = "1.1.3" }
annotate-snippets = { version = "0.9.2", features = ["color"] }
anyhow = { version = "1.0.80" }
assert_fs = { version = "1.1.0" }
argfile = { version = "0.2.0" }
bincode = { version = "1.3.3" }
bitflags = { version = "2.5.0" }
@@ -70,7 +68,6 @@ fern = { version = "0.6.1" }
filetime = { version = "0.2.23" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
globwalk = { version = "0.9.1" }
hashbrown = "0.14.3"
ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" }
@@ -233,9 +230,9 @@ inherits = "release"
# Config for 'cargo dist'
[workspace.metadata.dist]
# The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax)
cargo-dist-version = "0.22.1"
cargo-dist-version = "0.18.0"
# CI backends to support
ci = "github"
ci = ["github"]
# The installers to generate for each app
installers = ["shell", "powershell"]
# The archive format to use for windows builds (defaults .zip)
@@ -266,11 +263,11 @@ targets = [
auto-includes = false
# Whether cargo-dist should create a GitHub Release or use an existing draft
create-release = true
# Which actions to run on pull requests
# Publish jobs to run in CI
pr-run-mode = "skip"
# Whether CI should trigger releases with dispatches instead of tag pushes
dispatch-releases = true
# Which phase cargo-dist should use to create the GitHub release
# The stage during which the GitHub Release should be created
github-release = "announce"
# Whether CI should include auto-generated code to build local artifacts
build-local-artifacts = false
@@ -278,11 +275,9 @@ build-local-artifacts = false
local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Post-announce jobs to run in CI
# Announcement jobs to run in CI
post-announce-jobs = ["./notify-dependents", "./publish-docs", "./publish-playground"]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program
install-updater = false
# Path that installers should place binaries in
install-path = "CARGO_HOME"

View File

@@ -136,8 +136,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.6.6/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.6/install.ps1 | iex"
curl -LsSf https://astral.sh/ruff/0.6.5/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.6.5/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -170,7 +170,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.6
rev: v0.6.5
hooks:
# Run the linter.
- id: ruff

View File

@@ -38,12 +38,7 @@ test-case = { workspace = true }
[build-dependencies]
path-slash = { workspace = true }
walkdir = { workspace = true }
[target.'cfg(not(target_arch = "powerpc64"))'.build-dependencies]
zip = { workspace = true, features = ["deflate", "zstd"] }
[target.'cfg(target_arch = "powerpc64")'.build-dependencies]
zip = { workspace = true, features = ["deflate"] }
zip = { workspace = true, features = ["zstd", "deflate"] }
[dev-dependencies]
ruff_db = { workspace = true, features = ["os", "testing"] }

View File

@@ -30,17 +30,10 @@ fn zip_dir(directory_path: &str, writer: File) -> ZipResult<File> {
// We can't use `#[cfg(...)]` here because the target-arch in a build script is the
// architecture of the system running the build script and not the architecture of the build-target.
// That's why we use the `TARGET` environment variable here.
#[cfg(target_arch = "powerpc64")]
let method = CompressionMethod::Deflated;
#[cfg(not(target_arch = "powerpc64"))]
let method = {
let target = std::env::var("TARGET").unwrap();
if target.contains("wasm32") || target.contains("powerpc64") {
CompressionMethod::Deflated
} else {
CompressionMethod::Zstd
}
let method = if std::env::var("TARGET").unwrap().contains("wasm32") {
CompressionMethod::Deflated
} else {
CompressionMethod::Zstd
};
let options = FileOptions::default()

View File

@@ -4,9 +4,7 @@ use rustc_hash::FxHasher;
pub use db::Db;
pub use module_name::ModuleName;
pub use module_resolver::{
resolve_module, system_module_search_paths, vendored_typeshed_stubs, Module,
};
pub use module_resolver::{resolve_module, system_module_search_paths, vendored_typeshed_stubs};
pub use program::{Program, ProgramSettings, SearchPathSettings, SitePackages};
pub use python_version::PythonVersion;
pub use semantic_model::{HasTy, SemanticModel};

View File

@@ -1,6 +1,6 @@
use std::iter::FusedIterator;
pub use module::Module;
pub(crate) use module::Module;
pub use resolver::resolve_module;
pub(crate) use resolver::{file_to_module, SearchPaths};
use ruff_db::system::SystemPath;

View File

@@ -54,13 +54,6 @@ impl TryFrom<(&str, &str)> for PythonVersion {
}
}
impl From<(u8, u8)> for PythonVersion {
fn from(value: (u8, u8)) -> Self {
let (major, minor) = value;
Self { major, minor }
}
}
impl fmt::Display for PythonVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let PythonVersion { major, minor } = self;

View File

@@ -115,9 +115,6 @@ pub(crate) struct SemanticIndex<'db> {
/// Note: We should not depend on this map when analysing other files or
/// changing a file invalidates all dependents.
ast_ids: IndexVec<FileScopeId, AstIds>,
/// Flags about the global scope (code usage impacting inference)
has_future_annotations: bool,
}
impl<'db> SemanticIndex<'db> {
@@ -218,12 +215,6 @@ impl<'db> SemanticIndex<'db> {
pub(crate) fn node_scope(&self, node: NodeWithScopeRef) -> FileScopeId {
self.scopes_by_node[&node.node_key()]
}
/// Checks if there is an import of `__future__.annotations` in the global scope, which affects
/// the logic for type inference.
pub(super) fn has_future_annotations(&self) -> bool {
self.has_future_annotations
}
}
pub struct AncestorsIter<'a> {
@@ -1036,7 +1027,7 @@ class C[T]:
}
let TestCase { db, file } = test_case(
r"
r#"
class Test:
def foo():
def bar():
@@ -1045,7 +1036,7 @@ class Test:
pass
def x():
pass",
pass"#,
);
let index = semantic_index(&db, file);

View File

@@ -45,9 +45,6 @@ pub(super) struct SemanticIndexBuilder<'db> {
/// Flow states at each `break` in the current loop.
loop_break_states: Vec<FlowSnapshot>,
/// Flags about the file's global scope
has_future_annotations: bool,
// Semantic Index fields
scopes: IndexVec<FileScopeId, Scope>,
scope_ids_by_scope: IndexVec<FileScopeId, ScopeId<'db>>,
@@ -71,8 +68,6 @@ impl<'db> SemanticIndexBuilder<'db> {
current_match_case: None,
loop_break_states: vec![],
has_future_annotations: false,
scopes: IndexVec::new(),
symbol_tables: IndexVec::new(),
ast_ids: IndexVec::new(),
@@ -455,7 +450,6 @@ impl<'db> SemanticIndexBuilder<'db> {
scopes_by_expression: self.scopes_by_expression,
scopes_by_node: self.scopes_by_node,
use_def_maps,
has_future_annotations: self.has_future_annotations,
}
}
}
@@ -549,16 +543,7 @@ where
&alias.name.id
};
// Look for imports `from __future__ import annotations`, ignore `as ...`
// We intentionally don't enforce the rules about location of `__future__`
// imports here, we assume the user's intent was to apply the `__future__`
// import, so we still check using it (and will also emit a diagnostic about a
// miss-placed `__future__` import.)
self.has_future_annotations |= alias.name.id == "annotations"
&& node.module.as_deref() == Some("__future__");
let symbol = self.add_symbol(symbol_name.clone());
self.add_definition(symbol, ImportFromDefinitionNodeRef { node, alias_index });
}
}

View File

@@ -330,7 +330,7 @@ impl DefinitionCategory {
/// If so, any assignments reached by this definition are in error if they assign a value of a
/// type not assignable to the declared type.
///
/// Annotations establish a declared type. So do function and class definitions, and imports.
/// Annotations establish a declared type. So do function and class definition.
pub(crate) fn is_declaration(self) -> bool {
matches!(
self,
@@ -371,11 +371,10 @@ pub enum DefinitionKind {
impl DefinitionKind {
pub(crate) fn category(&self) -> DefinitionCategory {
match self {
// functions, classes, and imports always bind, and we consider them declarations
DefinitionKind::Function(_)
| DefinitionKind::Class(_)
| DefinitionKind::Import(_)
| DefinitionKind::ImportFrom(_) => DefinitionCategory::DeclarationAndBinding,
// functions and classes always bind a value, and we always consider them declarations
DefinitionKind::Function(_) | DefinitionKind::Class(_) => {
DefinitionCategory::DeclarationAndBinding
}
// a parameter always binds a value, but is only a declaration if annotated
DefinitionKind::Parameter(parameter) => {
if parameter.annotation.is_some() {
@@ -401,7 +400,9 @@ impl DefinitionKind {
}
}
// all of these bind values without declaring a type
DefinitionKind::NamedExpression(_)
DefinitionKind::Import(_)
| DefinitionKind::ImportFrom(_)
| DefinitionKind::NamedExpression(_)
| DefinitionKind::Assignment(_)
| DefinitionKind::AugmentedAssignment(_)
| DefinitionKind::For(_)

View File

@@ -11,7 +11,6 @@ enum CoreStdlibModule {
Builtins,
Types,
Typeshed,
TypingExtensions,
}
impl CoreStdlibModule {
@@ -20,7 +19,6 @@ impl CoreStdlibModule {
Self::Builtins => "builtins",
Self::Types => "types",
Self::Typeshed => "_typeshed",
Self::TypingExtensions => "typing_extensions",
};
ModuleName::new_static(module_name)
.unwrap_or_else(|| panic!("{module_name} should be a valid module name!"))
@@ -64,14 +62,6 @@ pub(crate) fn typeshed_symbol_ty<'db>(db: &'db dyn Db, symbol: &str) -> Type<'db
core_module_symbol_ty(db, CoreStdlibModule::Typeshed, symbol)
}
/// Lookup the type of `symbol` in the `typing_extensions` module namespace.
///
/// Returns `Unbound` if the `typing_extensions` module isn't available for some reason.
#[inline]
pub(crate) fn typing_extensions_symbol_ty<'db>(db: &'db dyn Db, symbol: &str) -> Type<'db> {
core_module_symbol_ty(db, CoreStdlibModule::TypingExtensions, symbol)
}
/// Get the scope of a core stdlib module.
///
/// Can return `None` if a custom typeshed is used that is missing the core module in question.

View File

@@ -1,4 +1,4 @@
use infer::TypeInferenceBuilder;
use infer::TypeInferenceContext;
use ruff_db::files::File;
use ruff_python_ast as ast;
@@ -10,9 +10,7 @@ use crate::semantic_index::{
global_scope, semantic_index, symbol_table, use_def_map, BindingWithConstraints,
BindingWithConstraintsIterator, DeclarationsIterator,
};
use crate::stdlib::{
builtins_symbol_ty, types_symbol_ty, typeshed_symbol_ty, typing_extensions_symbol_ty,
};
use crate::stdlib::{builtins_symbol_ty, types_symbol_ty, typeshed_symbol_ty};
use crate::types::narrow::narrowing_constraint;
use crate::{Db, FxOrderSet};
@@ -53,21 +51,9 @@ fn symbol_ty_by_id<'db>(db: &'db dyn Db, scope: ScopeId<'db>, symbol: ScopedSymb
// on inference from bindings.
if use_def.has_public_declarations(symbol) {
let declarations = use_def.public_declarations(symbol);
// If the symbol is undeclared in some paths, include the inferred type in the public type.
let undeclared_ty = if declarations.may_be_undeclared() {
Some(bindings_ty(
db,
use_def.public_bindings(symbol),
use_def
.public_may_be_unbound(symbol)
.then_some(Type::Unknown),
))
} else {
None
};
// Intentionally ignore conflicting declared types; that's not our problem, it's the
// problem of the module we are importing from.
declarations_ty(db, declarations, undeclared_ty).unwrap_or_else(|(ty, _)| ty)
declarations_ty(db, declarations).unwrap_or_else(|(ty, _)| ty)
} else {
bindings_ty(
db,
@@ -187,21 +173,26 @@ type DeclaredTypeResult<'db> = Result<Type<'db>, (Type<'db>, Box<[Type<'db>]>)>;
/// `Ok(declared_type)`. If there are conflicting declarations, returns
/// `Err((union_of_declared_types, conflicting_declared_types))`.
///
/// If undeclared is a possibility, `undeclared_ty` type will be part of the return type (and may
/// If undeclared is a possibility, `Unknown` type will be part of the return type (and may
/// conflict with other declarations.)
///
/// # Panics
/// Will panic if there are no declarations and no `undeclared_ty` is provided. This is a logic
/// error, as any symbol with zero live declarations clearly must be undeclared, and the caller
/// should provide an `undeclared_ty`.
/// Will panic if there are no declarations and no possibility of undeclared. This is a logic
/// error, as any symbol with zero live declarations clearly must be undeclared.
fn declarations_ty<'db>(
db: &'db dyn Db,
declarations: DeclarationsIterator<'_, 'db>,
undeclared_ty: Option<Type<'db>>,
) -> DeclaredTypeResult<'db> {
let may_be_undeclared = declarations.may_be_undeclared();
let decl_types = declarations.map(|declaration| declaration_ty(db, declaration));
let mut all_types = undeclared_ty.into_iter().chain(decl_types);
let mut all_types = (if may_be_undeclared {
Some(Type::Unknown)
} else {
None
})
.into_iter()
.chain(decl_types);
let first = all_types.next().expect(
"declarations_ty must not be called with zero declarations and no may-be-undeclared.",
@@ -247,8 +238,6 @@ pub enum Type<'db> {
None,
/// a specific function object
Function(FunctionType<'db>),
/// The `typing.reveal_type` function, which has special `__call__` behavior.
RevealTypeFunction(FunctionType<'db>),
/// a specific module object
Module(File),
/// a specific class object
@@ -335,16 +324,14 @@ impl<'db> Type<'db> {
pub const fn into_function_type(self) -> Option<FunctionType<'db>> {
match self {
Type::Function(function_type) | Type::RevealTypeFunction(function_type) => {
Some(function_type)
}
Type::Function(function_type) => Some(function_type),
_ => None,
}
}
pub fn expect_function(self) -> FunctionType<'db> {
self.into_function_type()
.expect("Expected a variant wrapping a FunctionType")
.expect("Expected a Type::Function variant")
}
pub const fn into_int_literal_type(self) -> Option<i64> {
@@ -362,7 +349,7 @@ impl<'db> Type<'db> {
pub fn may_be_unbound(&self, db: &'db dyn Db) -> bool {
match self {
Type::Unbound => true,
Type::Union(union) => union.elements(db).contains(&Type::Unbound),
Type::Union(union) => union.contains(db, Type::Unbound),
// Unbound can't appear in an intersection, because an intersection with Unbound
// simplifies to just Unbound.
_ => false,
@@ -380,28 +367,16 @@ impl<'db> Type<'db> {
}
}
pub fn is_stdlib_symbol(&self, db: &'db dyn Db, module_name: &str, name: &str) -> bool {
match self {
Type::Class(class) => class.is_stdlib_symbol(db, module_name, name),
Type::Function(function) | Type::RevealTypeFunction(function) => {
function.is_stdlib_symbol(db, module_name, name)
}
_ => false,
}
}
/// Return true if this type is a [subtype of] type `target`.
/// Return true if this type is [assignable to] type `target`.
///
/// [subtype of]: https://typing.readthedocs.io/en/latest/spec/concepts.html#subtype-supertype-and-type-equivalence
pub(crate) fn is_subtype_of(self, db: &'db dyn Db, target: Type<'db>) -> bool {
/// [assignable to]: https://typing.readthedocs.io/en/latest/spec/concepts.html#the-assignable-to-or-consistent-subtyping-relation
pub(crate) fn is_assignable_to(self, db: &'db dyn Db, target: Type<'db>) -> bool {
if self.is_equivalent_to(db, target) {
return true;
}
match (self, target) {
(Type::Unknown | Type::Any, _) => false,
(_, Type::Unknown | Type::Any) => false,
(Type::Never, _) => true,
(_, Type::Never) => false,
(Type::Unknown | Type::Any | Type::Never, _) => true,
(_, Type::Unknown | Type::Any) => true,
(Type::IntLiteral(_), Type::Instance(class))
if class.is_stdlib_symbol(db, "builtins", "int") =>
{
@@ -421,30 +396,12 @@ impl<'db> Type<'db> {
(ty, Type::Union(union)) => union
.elements(db)
.iter()
.any(|&elem_ty| ty.is_subtype_of(db, elem_ty)),
(_, Type::Instance(class)) if class.is_stdlib_symbol(db, "builtins", "object") => true,
(Type::Instance(class), _) if class.is_stdlib_symbol(db, "builtins", "object") => false,
.any(|&elem_ty| ty.is_assignable_to(db, elem_ty)),
// TODO
_ => false,
}
}
/// Return true if this type is [assignable to] type `target`.
///
/// [assignable to]: https://typing.readthedocs.io/en/latest/spec/concepts.html#the-assignable-to-or-consistent-subtyping-relation
pub(crate) fn is_assignable_to(self, db: &'db dyn Db, target: Type<'db>) -> bool {
match (self, target) {
(Type::Unknown | Type::Any, _) => true,
(_, Type::Unknown | Type::Any) => true,
(ty, Type::Union(union)) => union
.elements(db)
.iter()
.any(|&elem_ty| ty.is_assignable_to(db, elem_ty)),
// TODO other types containing gradual forms (e.g. generics containing Any/Unknown)
_ => self.is_subtype_of(db, target),
}
}
/// Return true if this type is equivalent to type `other`.
pub(crate) fn is_equivalent_to(self, _db: &'db dyn Db, other: Type<'db>) -> bool {
// TODO equivalent but not identical structural types, differently-ordered unions and
@@ -479,7 +436,7 @@ impl<'db> Type<'db> {
// TODO: attribute lookup on None type
Type::Unknown
}
Type::Function(_) | Type::RevealTypeFunction(_) => {
Type::Function(_) => {
// TODO: attribute lookup on function type
Type::Unknown
}
@@ -525,39 +482,26 @@ impl<'db> Type<'db> {
///
/// Returns `None` if `self` is not a callable type.
#[must_use]
fn call(self, db: &'db dyn Db, arg_types: &[Type<'db>]) -> CallOutcome<'db> {
fn call(&self, db: &'db dyn Db, _context: &mut TypeInferenceContext<'db>) -> Option<Type<'db>> {
match self {
// TODO validate typed call arguments vs callable signature
Type::Function(function_type) => CallOutcome::callable(function_type.return_type(db)),
Type::RevealTypeFunction(function_type) => CallOutcome::revealed(
function_type.return_type(db),
*arg_types.first().unwrap_or(&Type::Unknown),
),
Type::Function(function_type) => Some(function_type.return_type(db)),
// TODO annotated return type on `__new__` or metaclass `__call__`
Type::Class(class) => CallOutcome::callable(Type::Instance(class)),
Type::Class(class) => Some(Type::Instance(*class)),
// TODO: handle classes which implement the `__call__` protocol
Type::Instance(_instance_ty) => CallOutcome::callable(Type::Unknown),
// TODO: handle classes which implement `__call__`
Type::Instance(_instance_ty) => Some(Type::Unknown),
// `Any` is callable, and its return type is also `Any`.
Type::Any => CallOutcome::callable(Type::Any),
Type::Any => Some(Type::Any),
Type::Unknown => CallOutcome::callable(Type::Unknown),
Type::Unknown => Some(Type::Unknown),
Type::Union(union) => CallOutcome::union(
self,
union
.elements(db)
.iter()
.map(|elem| elem.call(db, arg_types))
.collect::<Box<[CallOutcome<'db>]>>(),
),
// TODO: union and intersection types
Type::Union(_) => Some(Type::Unknown),
Type::Intersection(_) => Some(Type::Unknown),
// TODO: intersection types
Type::Intersection(_) => CallOutcome::callable(Type::Unknown),
_ => CallOutcome::not_callable(self),
_ => None,
}
}
@@ -569,11 +513,14 @@ impl<'db> Type<'db> {
/// for y in x:
/// pass
/// ```
fn iterate(self, db: &'db dyn Db) -> IterationOutcome<'db> {
/// Return None and emit a diagnostic if this type is not iterable.
fn iterate(
&self,
db: &'db dyn Db,
context: &mut TypeInferenceContext<'db>,
) -> Option<Type<'db>> {
if let Type::Tuple(tuple_type) = self {
return IterationOutcome::Iterable {
element_ty: UnionType::from_elements(db, &**tuple_type.elements(db)),
};
return Some(UnionType::from_elements(db, &**tuple_type.elements(db)));
}
// `self` represents the type of the iterable;
@@ -582,23 +529,16 @@ impl<'db> Type<'db> {
let dunder_iter_method = iterable_meta_type.member(db, "__iter__");
if !dunder_iter_method.is_unbound() {
let CallOutcome::Callable {
return_ty: iterator_ty,
} = dunder_iter_method.call(db, &[])
else {
return IterationOutcome::NotIterable {
not_iterable_ty: self,
};
let Some(iterator_ty) = dunder_iter_method.call(db, context) else {
context.not_iterable_diagnostic(*self);
return None;
};
let dunder_next_method = iterator_ty.to_meta_type(db).member(db, "__next__");
return dunder_next_method
.call(db, &[])
.return_ty(db)
.map(|element_ty| IterationOutcome::Iterable { element_ty })
.unwrap_or(IterationOutcome::NotIterable {
not_iterable_ty: self,
});
return dunder_next_method.call(db, context).or_else(|| {
context.not_iterable_diagnostic(*self);
None
});
}
// Although it's not considered great practice,
@@ -609,13 +549,10 @@ impl<'db> Type<'db> {
// accepting `int` or `SupportsIndex`
let dunder_get_item_method = iterable_meta_type.member(db, "__getitem__");
dunder_get_item_method
.call(db, &[])
.return_ty(db)
.map(|element_ty| IterationOutcome::Iterable { element_ty })
.unwrap_or(IterationOutcome::NotIterable {
not_iterable_ty: self,
})
dunder_get_item_method.call(db, context).or_else(|| {
context.not_iterable_diagnostic(*self);
None
})
}
#[must_use]
@@ -634,7 +571,6 @@ impl<'db> Type<'db> {
Type::BooleanLiteral(_)
| Type::BytesLiteral(_)
| Type::Function(_)
| Type::RevealTypeFunction(_)
| Type::Instance(_)
| Type::Module(_)
| Type::IntLiteral(_)
@@ -657,7 +593,7 @@ impl<'db> Type<'db> {
Type::BooleanLiteral(_) => builtins_symbol_ty(db, "bool"),
Type::BytesLiteral(_) => builtins_symbol_ty(db, "bytes"),
Type::IntLiteral(_) => builtins_symbol_ty(db, "int"),
Type::Function(_) | Type::RevealTypeFunction(_) => types_symbol_ty(db, "FunctionType"),
Type::Function(_) => types_symbol_ty(db, "FunctionType"),
Type::Module(_) => types_symbol_ty(db, "ModuleType"),
Type::None => typeshed_symbol_ty(db, "NoneType"),
// TODO not accurate if there's a custom metaclass...
@@ -681,195 +617,6 @@ impl<'db> From<&Type<'db>> for Type<'db> {
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum CallOutcome<'db> {
Callable {
return_ty: Type<'db>,
},
RevealType {
return_ty: Type<'db>,
revealed_ty: Type<'db>,
},
NotCallable {
not_callable_ty: Type<'db>,
},
Union {
called_ty: Type<'db>,
outcomes: Box<[CallOutcome<'db>]>,
},
}
impl<'db> CallOutcome<'db> {
/// Create a new `CallOutcome::Callable` with given return type.
fn callable(return_ty: Type<'db>) -> CallOutcome {
CallOutcome::Callable { return_ty }
}
/// Create a new `CallOutcome::NotCallable` with given not-callable type.
fn not_callable(not_callable_ty: Type<'db>) -> CallOutcome {
CallOutcome::NotCallable { not_callable_ty }
}
/// Create a new `CallOutcome::RevealType` with given revealed and return types.
fn revealed(return_ty: Type<'db>, revealed_ty: Type<'db>) -> CallOutcome<'db> {
CallOutcome::RevealType {
return_ty,
revealed_ty,
}
}
/// Create a new `CallOutcome::Union` with given wrapped outcomes.
fn union(called_ty: Type<'db>, outcomes: impl Into<Box<[CallOutcome<'db>]>>) -> CallOutcome {
CallOutcome::Union {
called_ty,
outcomes: outcomes.into(),
}
}
/// Get the return type of the call, or `None` if not callable.
fn return_ty(&self, db: &'db dyn Db) -> Option<Type<'db>> {
match self {
Self::Callable { return_ty } => Some(*return_ty),
Self::RevealType {
return_ty,
revealed_ty: _,
} => Some(*return_ty),
Self::NotCallable { not_callable_ty: _ } => None,
Self::Union {
outcomes,
called_ty: _,
} => outcomes
.iter()
// If all outcomes are NotCallable, we return None; if some outcomes are callable
// and some are not, we return a union including Unknown.
.fold(None, |acc, outcome| {
let ty = outcome.return_ty(db);
match (acc, ty) {
(None, None) => None,
(None, Some(ty)) => Some(UnionBuilder::new(db).add(ty)),
(Some(builder), ty) => Some(builder.add(ty.unwrap_or(Type::Unknown))),
}
})
.map(UnionBuilder::build),
}
}
/// Get the return type of the call, emitting diagnostics if needed.
fn unwrap_with_diagnostic<'a>(
&self,
db: &'db dyn Db,
node: ast::AnyNodeRef,
builder: &'a mut TypeInferenceBuilder<'db>,
) -> Type<'db> {
match self {
Self::Callable { return_ty } => *return_ty,
Self::RevealType {
return_ty,
revealed_ty,
} => {
builder.add_diagnostic(
node,
"revealed-type",
format_args!("Revealed type is '{}'.", revealed_ty.display(db)),
);
*return_ty
}
Self::NotCallable { not_callable_ty } => {
builder.add_diagnostic(
node,
"call-non-callable",
format_args!(
"Object of type '{}' is not callable.",
not_callable_ty.display(db)
),
);
Type::Unknown
}
Self::Union {
outcomes,
called_ty,
} => {
let mut not_callable = vec![];
let mut union_builder = UnionBuilder::new(db);
let mut revealed = false;
for outcome in &**outcomes {
let return_ty = match outcome {
Self::NotCallable { not_callable_ty } => {
not_callable.push(*not_callable_ty);
Type::Unknown
}
Self::RevealType {
return_ty,
revealed_ty: _,
} => {
if revealed {
*return_ty
} else {
revealed = true;
outcome.unwrap_with_diagnostic(db, node, builder)
}
}
_ => outcome.unwrap_with_diagnostic(db, node, builder),
};
union_builder = union_builder.add(return_ty);
}
match not_callable[..] {
[] => {}
[elem] => builder.add_diagnostic(
node,
"call-non-callable",
format_args!(
"Object of type '{}' is not callable (due to union element '{}').",
called_ty.display(db),
elem.display(db),
),
),
_ if not_callable.len() == outcomes.len() => builder.add_diagnostic(
node,
"call-non-callable",
format_args!(
"Object of type '{}' is not callable.",
called_ty.display(db)
),
),
_ => builder.add_diagnostic(
node,
"call-non-callable",
format_args!(
"Object of type '{}' is not callable (due to union elements {}).",
called_ty.display(db),
not_callable.display(db),
),
),
}
union_builder.build()
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum IterationOutcome<'db> {
Iterable { element_ty: Type<'db> },
NotIterable { not_iterable_ty: Type<'db> },
}
impl<'db> IterationOutcome<'db> {
fn unwrap_with_diagnostic(
self,
iterable_node: ast::AnyNodeRef,
inference_builder: &mut TypeInferenceBuilder<'db>,
) -> Type<'db> {
match self {
Self::Iterable { element_ty } => element_ty,
Self::NotIterable { not_iterable_ty } => {
inference_builder.not_iterable_diagnostic(iterable_node, not_iterable_ty);
Type::Unknown
}
}
}
}
#[salsa::interned]
pub struct FunctionType<'db> {
/// name of the function at definition
@@ -883,23 +630,6 @@ pub struct FunctionType<'db> {
}
impl<'db> FunctionType<'db> {
/// Return true if this is a standard library function with given module name and name.
pub(crate) fn is_stdlib_symbol(self, db: &'db dyn Db, module_name: &str, name: &str) -> bool {
name == self.name(db)
&& file_to_module(db, self.definition(db).file(db)).is_some_and(|module| {
module.search_path().is_standard_library() && module.name() == module_name
})
}
/// Return true if this is a symbol with given name from `typing` or `typing_extensions`.
pub(crate) fn is_typing_symbol(self, db: &'db dyn Db, name: &str) -> bool {
name == self.name(db)
&& file_to_module(db, self.definition(db).file(db)).is_some_and(|module| {
module.search_path().is_standard_library()
&& matches!(&**module.name(), "typing" | "typing_extensions")
})
}
pub fn has_decorator(self, db: &dyn Db, decorator: Type<'_>) -> bool {
self.decorators(db).contains(&decorator)
}
@@ -1004,16 +734,16 @@ impl<'db> ClassType<'db> {
pub struct UnionType<'db> {
/// The union type includes values in any of these types.
#[return_ref]
elements_boxed: Box<[Type<'db>]>,
elements: FxOrderSet<Type<'db>>,
}
impl<'db> UnionType<'db> {
fn elements(self, db: &'db dyn Db) -> &'db [Type<'db>] {
self.elements_boxed(db)
pub fn contains(&self, db: &'db dyn Db, ty: Type<'db>) -> bool {
self.elements(db).contains(&ty)
}
/// Create a union from a list of elements
/// (which may be eagerly simplified into a different variant of [`Type`] altogether).
/// (which may be eagerly simplified into a different variant of [`Type`] altogether)
pub fn from_elements<T: Into<Type<'db>>>(
db: &'db dyn Db,
elements: impl IntoIterator<Item = T>,
@@ -1027,13 +757,13 @@ impl<'db> UnionType<'db> {
}
/// Apply a transformation function to all elements of the union,
/// and create a new union from the resulting set of types.
/// and create a new union from the resulting set of types
pub fn map(
&self,
db: &'db dyn Db,
transform_fn: impl Fn(&Type<'db>) -> Type<'db>,
) -> Type<'db> {
Self::from_elements(db, self.elements(db).iter().map(transform_fn))
Self::from_elements(db, self.elements(db).into_iter().map(transform_fn))
}
}
@@ -1137,8 +867,6 @@ mod tests {
}
}
#[test_case(Ty::BuiltinInstance("str"), Ty::BuiltinInstance("object"))]
#[test_case(Ty::BuiltinInstance("int"), Ty::BuiltinInstance("object"))]
#[test_case(Ty::Unknown, Ty::IntLiteral(1))]
#[test_case(Ty::Any, Ty::IntLiteral(1))]
#[test_case(Ty::Never, Ty::IntLiteral(1))]
@@ -1156,7 +884,6 @@ mod tests {
assert!(from.into_type(&db).is_assignable_to(&db, to.into_type(&db)));
}
#[test_case(Ty::BuiltinInstance("object"), Ty::BuiltinInstance("int"))]
#[test_case(Ty::IntLiteral(1), Ty::BuiltinInstance("str"))]
#[test_case(Ty::BuiltinInstance("int"), Ty::BuiltinInstance("str"))]
#[test_case(Ty::BuiltinInstance("int"), Ty::IntLiteral(1))]
@@ -1165,34 +892,6 @@ mod tests {
assert!(!from.into_type(&db).is_assignable_to(&db, to.into_type(&db)));
}
#[test_case(Ty::BuiltinInstance("str"), Ty::BuiltinInstance("object"))]
#[test_case(Ty::BuiltinInstance("int"), Ty::BuiltinInstance("object"))]
#[test_case(Ty::Never, Ty::IntLiteral(1))]
#[test_case(Ty::IntLiteral(1), Ty::BuiltinInstance("int"))]
#[test_case(Ty::StringLiteral("foo"), Ty::BuiltinInstance("str"))]
#[test_case(Ty::StringLiteral("foo"), Ty::LiteralString)]
#[test_case(Ty::LiteralString, Ty::BuiltinInstance("str"))]
#[test_case(Ty::BytesLiteral("foo"), Ty::BuiltinInstance("bytes"))]
#[test_case(Ty::IntLiteral(1), Ty::Union(vec![Ty::BuiltinInstance("int"), Ty::BuiltinInstance("str")]))]
fn is_subtype_of(from: Ty, to: Ty) {
let db = setup_db();
assert!(from.into_type(&db).is_subtype_of(&db, to.into_type(&db)));
}
#[test_case(Ty::BuiltinInstance("object"), Ty::BuiltinInstance("int"))]
#[test_case(Ty::Unknown, Ty::IntLiteral(1))]
#[test_case(Ty::Any, Ty::IntLiteral(1))]
#[test_case(Ty::IntLiteral(1), Ty::Unknown)]
#[test_case(Ty::IntLiteral(1), Ty::Any)]
#[test_case(Ty::IntLiteral(1), Ty::Union(vec![Ty::Unknown, Ty::BuiltinInstance("str")]))]
#[test_case(Ty::IntLiteral(1), Ty::BuiltinInstance("str"))]
#[test_case(Ty::BuiltinInstance("int"), Ty::BuiltinInstance("str"))]
#[test_case(Ty::BuiltinInstance("int"), Ty::IntLiteral(1))]
fn is_not_subtype_of(from: Ty, to: Ty) {
let db = setup_db();
assert!(!from.into_type(&db).is_subtype_of(&db, to.into_type(&db)));
}
#[test_case(
Ty::Union(vec![Ty::IntLiteral(1), Ty::IntLiteral(2)]),
Ty::Union(vec![Ty::IntLiteral(1), Ty::IntLiteral(2)])

View File

@@ -27,10 +27,10 @@
//! * An intersection containing two non-overlapping types should simplify to [`Type::Never`].
use crate::types::{builtins_symbol_ty, IntersectionType, Type, UnionType};
use crate::{Db, FxOrderSet};
use smallvec::SmallVec;
use ordermap::set::MutableValues;
pub(crate) struct UnionBuilder<'db> {
elements: Vec<Type<'db>>,
elements: FxOrderSet<Type<'db>>,
db: &'db dyn Db,
}
@@ -38,7 +38,7 @@ impl<'db> UnionBuilder<'db> {
pub(crate) fn new(db: &'db dyn Db) -> Self {
Self {
db,
elements: vec![],
elements: FxOrderSet::default(),
}
}
@@ -46,70 +46,47 @@ impl<'db> UnionBuilder<'db> {
pub(crate) fn add(mut self, ty: Type<'db>) -> Self {
match ty {
Type::Union(union) => {
let new_elements = union.elements(self.db);
self.elements.reserve(new_elements.len());
for element in new_elements {
self = self.add(*element);
}
self.elements.extend(union.elements(self.db));
}
Type::Never => {}
_ => {
let bool_pair = if let Type::BooleanLiteral(b) = ty {
Some(Type::BooleanLiteral(!b))
} else {
None
};
let mut to_add = ty;
let mut to_remove = SmallVec::<[usize; 2]>::new();
for (index, element) in self.elements.iter().enumerate() {
if Some(*element) == bool_pair {
to_add = builtins_symbol_ty(self.db, "bool");
to_remove.push(index);
// The type we are adding is a BooleanLiteral, which doesn't have any
// subtypes. And we just found that the union already contained our
// mirror-image BooleanLiteral, so it can't also contain bool or any
// supertype of bool. Therefore, we are done.
break;
}
if ty.is_subtype_of(self.db, *element) {
return self;
} else if element.is_subtype_of(self.db, ty) {
to_remove.push(index);
}
}
match to_remove[..] {
[] => self.elements.push(to_add),
[index] => self.elements[index] = to_add,
_ => {
let mut current_index = 0;
let mut to_remove = to_remove.into_iter();
let mut next_to_remove_index = to_remove.next();
self.elements.retain(|_| {
let retain = if Some(current_index) == next_to_remove_index {
next_to_remove_index = to_remove.next();
false
} else {
true
};
current_index += 1;
retain
});
self.elements.push(to_add);
}
}
self.elements.insert(ty);
}
}
self
}
pub(crate) fn build(self) -> Type<'db> {
/// Performs the following normalizations:
/// - Replaces `Literal[True,False]` with `bool`.
/// - TODO For enums `E` with members `X1`,...,`Xn`, replaces
/// `Literal[E.X1,...,E.Xn]` with `E`.
fn simplify(&mut self) {
if let Some(true_index) = self.elements.get_index_of(&Type::BooleanLiteral(true)) {
if self.elements.contains(&Type::BooleanLiteral(false)) {
*self.elements.get_index_mut2(true_index).unwrap() =
builtins_symbol_ty(self.db, "bool");
self.elements.remove(&Type::BooleanLiteral(false));
}
}
}
pub(crate) fn build(mut self) -> Type<'db> {
match self.elements.len() {
0 => Type::Never,
1 => self.elements[0],
_ => Type::Union(UnionType::new(self.db, self.elements.into())),
_ => {
self.simplify();
match self.elements.len() {
0 => Type::Never,
1 => self.elements[0],
_ => {
self.elements.shrink_to_fit();
Type::Union(UnionType::new(self.db, self.elements))
}
}
}
}
}
}
@@ -303,6 +280,12 @@ mod tests {
use crate::ProgramSettings;
use ruff_db::system::{DbWithTestSystem, SystemPathBuf};
impl<'db> UnionType<'db> {
fn elements_vec(self, db: &'db TestDb) -> Vec<Type<'db>> {
self.elements(db).into_iter().copied().collect()
}
}
fn setup_db() -> TestDb {
let db = TestDb::new();
@@ -330,7 +313,7 @@ mod tests {
let t1 = Type::IntLiteral(1);
let union = UnionType::from_elements(&db, [t0, t1]).expect_union();
assert_eq!(union.elements(&db), &[t0, t1]);
assert_eq!(union.elements_vec(&db), &[t0, t1]);
}
#[test]
@@ -367,10 +350,10 @@ mod tests {
let t3 = Type::IntLiteral(17);
let union = UnionType::from_elements(&db, [t0, t1, t3]).expect_union();
assert_eq!(union.elements(&db), &[t0, t3]);
assert_eq!(union.elements_vec(&db), &[t0, t3]);
let union = UnionType::from_elements(&db, [t0, t1, t2, t3]).expect_union();
assert_eq!(union.elements(&db), &[bool_ty, t3]);
assert_eq!(union.elements_vec(&db), &[bool_ty, t3]);
}
#[test]
@@ -382,44 +365,7 @@ mod tests {
let u1 = UnionType::from_elements(&db, [t0, t1]);
let union = UnionType::from_elements(&db, [u1, t2]).expect_union();
assert_eq!(union.elements(&db), &[t0, t1, t2]);
}
#[test]
fn build_union_simplify_subtype() {
let db = setup_db();
let t0 = builtins_symbol_ty(&db, "str").to_instance(&db);
let t1 = Type::LiteralString;
let u0 = UnionType::from_elements(&db, [t0, t1]);
let u1 = UnionType::from_elements(&db, [t1, t0]);
assert_eq!(u0, t0);
assert_eq!(u1, t0);
}
#[test]
fn build_union_no_simplify_unknown() {
let db = setup_db();
let t0 = builtins_symbol_ty(&db, "str").to_instance(&db);
let t1 = Type::Unknown;
let u0 = UnionType::from_elements(&db, [t0, t1]);
let u1 = UnionType::from_elements(&db, [t1, t0]);
assert_eq!(u0.expect_union().elements(&db), &[t0, t1]);
assert_eq!(u1.expect_union().elements(&db), &[t1, t0]);
}
#[test]
fn build_union_subsume_multiple() {
let db = setup_db();
let str_ty = builtins_symbol_ty(&db, "str").to_instance(&db);
let int_ty = builtins_symbol_ty(&db, "int").to_instance(&db);
let object_ty = builtins_symbol_ty(&db, "object").to_instance(&db);
let unknown_ty = Type::Unknown;
let u0 = UnionType::from_elements(&db, [str_ty, unknown_ty, int_ty, object_ty]);
assert_eq!(u0.expect_union().elements(&db), &[unknown_ty, object_ty]);
assert_eq!(union.elements_vec(&db), &[t0, t1, t2]);
}
impl<'db> IntersectionType<'db> {
@@ -500,7 +446,7 @@ mod tests {
.add_positive(u0)
.build()
.expect_union();
let [Type::Intersection(i0), Type::Intersection(i1)] = union.elements(&db)[..] else {
let [Type::Intersection(i0), Type::Intersection(i1)] = union.elements_vec(&db)[..] else {
panic!("expected a union of two intersections");
};
assert_eq!(i0.pos_vec(&db), &[ta, t0]);

View File

@@ -36,7 +36,6 @@ impl Display for DisplayType<'_> {
| Type::BytesLiteral(_)
| Type::Class(_)
| Type::Function(_)
| Type::RevealTypeFunction(_)
) {
write!(f, "Literal[{representation}]",)
} else {
@@ -73,9 +72,7 @@ impl Display for DisplayRepresentation<'_> {
// TODO functions and classes should display using a fully qualified name
Type::Class(class) => f.write_str(class.name(self.db)),
Type::Instance(class) => f.write_str(class.name(self.db)),
Type::Function(function) | Type::RevealTypeFunction(function) => {
f.write_str(function.name(self.db))
}
Type::Function(function) => f.write_str(function.name(self.db)),
Type::Union(union) => union.display(self.db).fmt(f),
Type::Intersection(intersection) => intersection.display(self.db).fmt(f),
Type::IntLiteral(n) => n.fmt(f),
@@ -194,7 +191,7 @@ impl TryFrom<Type<'_>> for LiteralTypeKind {
fn try_from(value: Type<'_>) -> Result<Self, Self::Error> {
match value {
Type::Class(_) => Ok(Self::Class),
Type::Function(_) | Type::RevealTypeFunction(_) => Ok(Self::Function),
Type::Function(_) => Ok(Self::Function),
Type::IntLiteral(_) => Ok(Self::IntLiteral),
Type::StringLiteral(_) => Ok(Self::StringLiteral),
Type::BytesLiteral(_) => Ok(Self::BytesLiteral),

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +0,0 @@
with foo() as self.bar:
pass

View File

@@ -114,19 +114,22 @@ fn lint_maybe_undefined(context: &SemanticLintContext, name: &ast::ExprName) {
return;
}
let semantic = &context.semantic;
let ty = name.ty(semantic);
if ty.is_unbound() {
context.push_diagnostic(format_diagnostic(
context,
&format!("Name '{}' used when not defined.", &name.id),
name.start(),
));
} else if ty.may_be_unbound(semantic.db()) {
context.push_diagnostic(format_diagnostic(
context,
&format!("Name '{}' used when possibly not defined.", &name.id),
name.start(),
));
match name.ty(semantic) {
Type::Unbound => {
context.push_diagnostic(format_diagnostic(
context,
&format!("Name '{}' used when not defined.", &name.id),
name.start(),
));
}
Type::Union(union) if union.contains(semantic.db(), Type::Unbound) => {
context.push_diagnostic(format_diagnostic(
context,
&format!("Name '{}' used when possibly not defined.", &name.id),
name.start(),
));
}
_ => {}
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.6.6"
version = "0.6.5"
publish = true
authors = { workspace = true }
edition = { workspace = true }
@@ -14,9 +14,7 @@ default-run = "ruff"
[dependencies]
ruff_cache = { workspace = true }
ruff_db = { workspace = true }
ruff_diagnostics = { workspace = true }
ruff_graph = { workspace = true, features = ["serde", "clap"] }
ruff_linter = { workspace = true, features = ["clap"] }
ruff_macros = { workspace = true }
ruff_notebook = { workspace = true }
@@ -38,7 +36,6 @@ clap_complete_command = { workspace = true }
clearscreen = { workspace = true }
colored = { workspace = true }
filetime = { workspace = true }
globwalk = { workspace = true }
ignore = { workspace = true }
is-macro = { workspace = true }
itertools = { workspace = true }
@@ -62,11 +59,8 @@ wild = { workspace = true }
[dev-dependencies]
# Enable test rules during development
ruff_linter = { workspace = true, features = ["clap", "test-rules"] }
assert_fs = { workspace = true }
# Avoid writing colored snapshots when running tests from the terminal
colored = { workspace = true, features = ["no-color"] }
indoc = { workspace = true }
insta = { workspace = true, features = ["filters", "json"] }
insta-cmd = { workspace = true }
tempfile = { workspace = true }

View File

@@ -7,11 +7,13 @@ use std::sync::Arc;
use anyhow::{anyhow, bail};
use clap::builder::{TypedValueParser, ValueParserFactory};
use clap::{command, Parser, Subcommand};
use clap::{command, Parser};
use colored::Colorize;
use path_absolutize::path_dedot;
use regex::Regex;
use ruff_graph::Direction;
use rustc_hash::FxHashMap;
use toml;
use ruff_linter::line_width::LineLength;
use ruff_linter::logging::LogLevel;
use ruff_linter::registry::Rule;
@@ -25,8 +27,6 @@ use ruff_text_size::TextRange;
use ruff_workspace::configuration::{Configuration, RuleSelection};
use ruff_workspace::options::{Options, PycodestyleOptions};
use ruff_workspace::resolver::ConfigurationTransformer;
use rustc_hash::FxHashMap;
use toml;
/// All configuration options that can be passed "globally",
/// i.e., can be passed to all subcommands
@@ -132,9 +132,6 @@ pub enum Command {
Format(FormatCommand),
/// Run the language server.
Server(ServerCommand),
/// Run analysis over Python source code.
#[clap(subcommand)]
Analyze(AnalyzeCommand),
/// Display Ruff's version
Version {
#[arg(long, value_enum, default_value = "text")]
@@ -142,35 +139,6 @@ pub enum Command {
},
}
#[derive(Debug, Subcommand)]
pub enum AnalyzeCommand {
/// Generate a map of Python file dependencies or dependents.
Graph(AnalyzeGraphCommand),
}
#[derive(Clone, Debug, clap::Parser)]
pub struct AnalyzeGraphCommand {
/// List of files or directories to include.
#[clap(help = "List of files or directories to include [default: .]")]
files: Vec<PathBuf>,
/// The direction of the import map. By default, generates a dependency map, i.e., a map from
/// file to files that it depends on. Use `--direction dependents` to generate a map from file
/// to files that depend on it.
#[clap(long, value_enum, default_value_t)]
direction: Direction,
/// Attempt to detect imports from string literals.
#[clap(long)]
detect_string_imports: bool,
/// Enable preview mode. Use `--no-preview` to disable.
#[arg(long, overrides_with("no_preview"))]
preview: bool,
#[clap(long, overrides_with("preview"), hide = true)]
no_preview: bool,
/// The minimum Python version that should be supported.
#[arg(long, value_enum)]
target_version: Option<PythonVersion>,
}
// The `Parser` derive is for ruff_dev, for ruff `Args` would be sufficient
#[derive(Clone, Debug, clap::Parser)]
#[allow(clippy::struct_excessive_bools)]
@@ -732,7 +700,6 @@ impl CheckCommand {
output_format: resolve_output_format(self.output_format)?,
show_fixes: resolve_bool_arg(self.show_fixes, self.no_show_fixes),
extension: self.extension,
..ExplicitConfigOverrides::default()
};
let config_args = ConfigArguments::from_cli_arguments(global_options, cli_overrides)?;
@@ -765,34 +732,8 @@ impl FormatCommand {
target_version: self.target_version,
cache_dir: self.cache_dir,
extension: self.extension,
..ExplicitConfigOverrides::default()
};
let config_args = ConfigArguments::from_cli_arguments(global_options, cli_overrides)?;
Ok((format_arguments, config_args))
}
}
impl AnalyzeGraphCommand {
/// Partition the CLI into command-line arguments and configuration
/// overrides.
pub fn partition(
self,
global_options: GlobalConfigArgs,
) -> anyhow::Result<(AnalyzeGraphArgs, ConfigArguments)> {
let format_arguments = AnalyzeGraphArgs {
files: self.files,
direction: self.direction,
};
let cli_overrides = ExplicitConfigOverrides {
detect_string_imports: if self.detect_string_imports {
Some(true)
} else {
None
},
preview: resolve_bool_arg(self.preview, self.no_preview).map(PreviewMode::from),
target_version: self.target_version,
// Unsupported on the formatter CLI, but required on `Overrides`.
..ExplicitConfigOverrides::default()
};
@@ -955,7 +896,7 @@ A `--config` flag must either be a path to a `.toml` configuration file
// the user was trying to pass in a path to a configuration file
// or some inline TOML.
// We want to display the most helpful error to the user as possible.
if Path::new(value)
if std::path::Path::new(value)
.extension()
.map_or(false, |ext| ext.eq_ignore_ascii_case("toml"))
{
@@ -1215,13 +1156,6 @@ impl LineColumnParseError {
}
}
/// CLI settings that are distinct from configuration (commands, lists of files, etc.).
#[derive(Clone, Debug)]
pub struct AnalyzeGraphArgs {
pub files: Vec<PathBuf>,
pub direction: Direction,
}
/// Configuration overrides provided via dedicated CLI flags:
/// `--line-length`, `--respect-gitignore`, etc.
#[derive(Clone, Default)]
@@ -1253,7 +1187,6 @@ struct ExplicitConfigOverrides {
output_format: Option<OutputFormat>,
show_fixes: Option<bool>,
extension: Option<Vec<ExtensionPair>>,
detect_string_imports: Option<bool>,
}
impl ConfigurationTransformer for ExplicitConfigOverrides {
@@ -1338,9 +1271,6 @@ impl ConfigurationTransformer for ExplicitConfigOverrides {
if let Some(extension) = &self.extension {
config.extension = Some(extension.iter().cloned().collect());
}
if let Some(detect_string_imports) = &self.detect_string_imports {
config.analyze.detect_string_imports = Some(*detect_string_imports);
}
config
}

View File

@@ -10,9 +10,7 @@ use ruff_linter::linter::add_noqa_to_path;
use ruff_linter::source_kind::SourceKind;
use ruff_linter::warn_user_once;
use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{
match_exclusion, python_files_in_path, PyprojectConfig, ResolvedFile,
};
use ruff_workspace::resolver::{python_files_in_path, PyprojectConfig, ResolvedFile};
use crate::args::ConfigArguments;
@@ -59,15 +57,6 @@ pub(crate) fn add_noqa(
.and_then(|parent| package_roots.get(parent))
.and_then(|package| *package);
let settings = resolver.resolve(path);
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.linter.exclude,
)
{
return None;
}
let source_kind = match SourceKind::from_path(path, source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => return None,

View File

@@ -1,241 +0,0 @@
use crate::args::{AnalyzeGraphArgs, ConfigArguments};
use crate::resolve::resolve;
use crate::{resolve_default_files, ExitStatus};
use anyhow::Result;
use log::{debug, warn};
use path_absolutize::CWD;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_graph::{Direction, ImportMap, ModuleDb, ModuleImports};
use ruff_linter::{warn_user, warn_user_once};
use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{match_exclusion, python_files_in_path, ResolvedFile};
use rustc_hash::FxHashMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
/// Generate an import map.
pub(crate) fn analyze_graph(
args: AnalyzeGraphArgs,
config_arguments: &ConfigArguments,
) -> Result<ExitStatus> {
// Construct the "default" settings. These are used when no `pyproject.toml`
// files are present, or files are injected from outside the hierarchy.
let pyproject_config = resolve(config_arguments, None)?;
if pyproject_config.settings.analyze.preview.is_disabled() {
warn_user!("`ruff analyze graph` is experimental and may change without warning");
}
// Write all paths relative to the current working directory.
let root =
SystemPathBuf::from_path_buf(CWD.clone()).expect("Expected a UTF-8 working directory");
// Find all Python files.
let files = resolve_default_files(args.files, false);
let (paths, resolver) = python_files_in_path(&files, &pyproject_config, config_arguments)?;
if paths.is_empty() {
warn_user_once!("No Python files found under the given path(s)");
return Ok(ExitStatus::Success);
}
// Resolve all package roots.
let package_roots = resolver
.package_roots(
&paths
.iter()
.flatten()
.map(ResolvedFile::path)
.collect::<Vec<_>>(),
)
.into_iter()
.map(|(path, package)| (path.to_path_buf(), package.map(Path::to_path_buf)))
.collect::<FxHashMap<_, _>>();
// Create a database from the source roots.
let db = ModuleDb::from_src_roots(
package_roots
.values()
.filter_map(|package| package.as_deref())
.filter_map(|package| package.parent())
.map(Path::to_path_buf)
.filter_map(|path| SystemPathBuf::from_path_buf(path).ok()),
pyproject_config
.settings
.analyze
.target_version
.as_tuple()
.into(),
)?;
// Create a cache for resolved globs.
let glob_resolver = Arc::new(Mutex::new(GlobResolver::default()));
// Collect and resolve the imports for each file.
let result = Arc::new(Mutex::new(Vec::new()));
let inner_result = Arc::clone(&result);
rayon::scope(move |scope| {
for resolved_file in paths {
let Ok(resolved_file) = resolved_file else {
continue;
};
let path = resolved_file.path();
let package = path
.parent()
.and_then(|parent| package_roots.get(parent))
.and_then(Clone::clone);
// Resolve the per-file settings.
let settings = resolver.resolve(path);
let string_imports = settings.analyze.detect_string_imports;
let include_dependencies = settings.analyze.include_dependencies.get(path).cloned();
// Skip excluded files.
if (settings.file_resolver.force_exclude || !resolved_file.is_root())
&& match_exclusion(
resolved_file.path(),
resolved_file.file_name(),
&settings.analyze.exclude,
)
{
continue;
}
// Ignore non-Python files.
let source_type = match settings.analyze.extension.get(path) {
None => match SourceType::from(&path) {
SourceType::Python(source_type) => source_type,
SourceType::Toml(_) => {
debug!("Ignoring TOML file: {}", path.display());
continue;
}
},
Some(language) => PySourceType::from(language),
};
if matches!(source_type, PySourceType::Ipynb) {
debug!("Ignoring Jupyter notebook: {}", path.display());
continue;
}
// Convert to system paths.
let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else {
warn!("Failed to convert package to system path");
continue;
};
let Ok(path) = SystemPathBuf::from_path_buf(resolved_file.into_path()) else {
warn!("Failed to convert path to system path");
continue;
};
let db = db.snapshot();
let glob_resolver = glob_resolver.clone();
let root = root.clone();
let result = inner_result.clone();
scope.spawn(move |_| {
// Identify any imports via static analysis.
let mut imports =
ModuleImports::detect(&db, &path, package.as_deref(), string_imports)
.unwrap_or_else(|err| {
warn!("Failed to generate import map for {path}: {err}");
ModuleImports::default()
});
debug!("Discovered {} imports for {}", imports.len(), path);
// Append any imports that were statically defined in the configuration.
if let Some((root, globs)) = include_dependencies {
let mut glob_resolver = glob_resolver.lock().unwrap();
imports.extend(glob_resolver.resolve(root, globs));
}
// Convert the path (and imports) to be relative to the working directory.
let path = path
.strip_prefix(&root)
.map(SystemPath::to_path_buf)
.unwrap_or(path);
let imports = imports.relative_to(&root);
result.lock().unwrap().push((path, imports));
});
}
});
// Collect the results.
let imports = Arc::into_inner(result).unwrap().into_inner()?;
// Generate the import map.
let import_map = match args.direction {
Direction::Dependencies => ImportMap::from_iter(imports),
Direction::Dependents => ImportMap::reverse(imports),
};
// Print to JSON.
println!("{}", serde_json::to_string_pretty(&import_map)?);
Ok(ExitStatus::Success)
}
/// A resolver for glob sets.
#[derive(Default, Debug)]
struct GlobResolver {
cache: GlobCache,
}
impl GlobResolver {
/// Resolve a set of globs, anchored at a given root.
fn resolve(&mut self, root: PathBuf, globs: Vec<String>) -> Vec<SystemPathBuf> {
if let Some(cached) = self.cache.get(&root, &globs) {
return cached.clone();
}
let walker = match globwalk::GlobWalkerBuilder::from_patterns(&root, &globs)
.file_type(globwalk::FileType::FILE)
.build()
{
Ok(walker) => walker,
Err(err) => {
warn!("Failed to read glob walker: {err}");
return Vec::new();
}
};
let mut paths = Vec::new();
for entry in walker {
let entry = match entry {
Ok(entry) => entry,
Err(err) => {
warn!("Failed to read glob entry: {err}");
continue;
}
};
let path = match SystemPathBuf::from_path_buf(entry.into_path()) {
Ok(path) => path,
Err(err) => {
warn!("Failed to convert path to system path: {}", err.display());
continue;
}
};
paths.push(path);
}
self.cache.insert(root, globs, paths.clone());
paths
}
}
/// A cache for resolved globs.
#[derive(Default, Debug)]
struct GlobCache(FxHashMap<PathBuf, FxHashMap<Vec<String>, Vec<SystemPathBuf>>>);
impl GlobCache {
/// Insert a resolved glob.
fn insert(&mut self, root: PathBuf, globs: Vec<String>, paths: Vec<SystemPathBuf>) {
self.0.entry(root).or_default().insert(globs, paths);
}
/// Get a resolved glob.
fn get(&self, root: &Path, globs: &[String]) -> Option<&Vec<SystemPathBuf>> {
self.0.get(root).and_then(|map| map.get(globs))
}
}

View File

@@ -1,5 +1,4 @@
pub(crate) mod add_noqa;
pub(crate) mod analyze_graph;
pub(crate) mod check;
pub(crate) mod check_stdin;
pub(crate) mod clean;

View File

@@ -20,9 +20,7 @@ use ruff_linter::settings::types::OutputFormat;
use ruff_linter::{fs, warn_user, warn_user_once};
use ruff_workspace::Settings;
use crate::args::{
AnalyzeCommand, AnalyzeGraphCommand, Args, CheckCommand, Command, FormatCommand,
};
use crate::args::{Args, CheckCommand, Command, FormatCommand};
use crate::printer::{Flags as PrinterFlags, Printer};
pub mod args;
@@ -188,7 +186,6 @@ pub fn run(
Command::Check(args) => check(args, global_options),
Command::Format(args) => format(args, global_options),
Command::Server(args) => server(args),
Command::Analyze(AnalyzeCommand::Graph(args)) => analyze_graph(args, global_options),
}
}
@@ -202,15 +199,6 @@ fn format(args: FormatCommand, global_options: GlobalConfigArgs) -> Result<ExitS
}
}
fn analyze_graph(
args: AnalyzeGraphCommand,
global_options: GlobalConfigArgs,
) -> Result<ExitStatus> {
let (cli, config_arguments) = args.partition(global_options)?;
commands::analyze_graph::analyze_graph(cli, &config_arguments)
}
fn server(args: ServerCommand) -> Result<ExitStatus> {
let four = NonZeroUsize::new(4).unwrap();

View File

@@ -1,307 +0,0 @@
//! Tests the interaction of the `analyze graph` command.
#![cfg(not(target_arch = "wasm32"))]
#![cfg(not(windows))]
use assert_fs::prelude::*;
use std::process::Command;
use std::str;
use anyhow::Result;
use assert_fs::fixture::ChildPath;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use tempfile::TempDir;
fn command() -> Command {
let mut command = Command::new(get_cargo_bin("ruff"));
command.arg("analyze");
command.arg("graph");
command.arg("--preview");
command
}
const INSTA_FILTERS: &[(&str, &str)] = &[
// Rewrite Windows output to Unix output
(r"\\", "/"),
];
#[test]
fn dependencies() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
from ruff import c
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"
from . import d
"#})?;
root.child("ruff")
.child("d.py")
.write_str(indoc::indoc! {r#"
from .e import f
"#})?;
root.child("ruff")
.child("e.py")
.write_str(indoc::indoc! {r#"
def f(): pass
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": [
"ruff/d.py"
],
"ruff/d.py": [
"ruff/e.py"
],
"ruff/e.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn dependents() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
from ruff import c
"#})?;
root.child("ruff")
.child("c.py")
.write_str(indoc::indoc! {r#"
from . import d
"#})?;
root.child("ruff")
.child("d.py")
.write_str(indoc::indoc! {r#"
from .e import f
"#})?;
root.child("ruff")
.child("e.py")
.write_str(indoc::indoc! {r#"
def f(): pass
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--direction").arg("dependents").current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [],
"ruff/b.py": [
"ruff/a.py"
],
"ruff/c.py": [
"ruff/b.py"
],
"ruff/d.py": [
"ruff/c.py"
],
"ruff/e.py": [
"ruff/d.py"
]
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn string_detection() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff")
.child("b.py")
.write_str(indoc::indoc! {r#"
import importlib
importlib.import_module("ruff.c")
"#})?;
root.child("ruff").child("c.py").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"###);
});
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().arg("--detect-string-imports").current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn globs() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff.toml").write_str(indoc::indoc! {r#"
[analyze]
include-dependencies = { "ruff/a.py" = ["ruff/b.py"], "ruff/b.py" = ["ruff/*.py"], "ruff/c.py" = ["*.json"] }
"#})?;
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff").child("a.py").write_str("")?;
root.child("ruff").child("b.py").write_str("")?;
root.child("ruff").child("c.py").write_str("")?;
root.child("ruff").child("d.json").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [
"ruff/__init__.py",
"ruff/a.py",
"ruff/b.py",
"ruff/c.py"
],
"ruff/c.py": [
"ruff/d.json"
]
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn exclude() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff.toml").write_str(indoc::indoc! {r#"
[analyze]
exclude = ["ruff/c.py"]
"#})?;
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
import ruff.b
"#})?;
root.child("ruff").child("b.py").write_str("")?;
root.child("ruff").child("c.py").write_str("")?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": []
}
----- stderr -----
"###);
});
Ok(())
}

View File

@@ -326,18 +326,18 @@ fn docstring_options() -> Result<()> {
let ruff_toml = tempdir.path().join("ruff.toml");
fs::write(
&ruff_toml,
r"
r#"
[format]
docstring-code-format = true
docstring-code-line-length = 20
",
"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["format", "--config"])
.arg(&ruff_toml)
.arg("-")
.pass_stdin(r"
.pass_stdin(r#"
def f(x):
'''
Something about `f`. And an example:
@@ -357,7 +357,7 @@ def f(x):
>>> foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
'''
pass
"), @r###"
"#), @r###"
success: true
exit_code: 0
----- stdout -----
@@ -509,9 +509,9 @@ fn syntax_error() -> Result<()> {
fs::write(
tempdir.path().join("main.py"),
r"
r#"
from module import =
",
"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
@@ -1945,10 +1945,11 @@ fn range_end_only() {
def foo(arg1, arg2,):
print("Should format this" )
"#), @r#"
"#), @r###"
success: true
exit_code: 0
----- stdout -----
def foo(
arg1,
arg2,
@@ -1957,7 +1958,7 @@ def foo(arg1, arg2,):
----- stderr -----
"#);
"###);
}
#[test]

View File

@@ -158,15 +158,15 @@ fn check_default_files() -> Result<()> {
let tempdir = TempDir::new()?;
fs::write(
tempdir.path().join("foo.py"),
r"
r#"
import foo # unused import
",
"#,
)?;
fs::write(
tempdir.path().join("bar.py"),
r"
r#"
import bar # unused import
",
"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
@@ -906,10 +906,10 @@ fn full_output_preview_config() -> Result<()> {
let pyproject_toml = tempdir.path().join("pyproject.toml");
fs::write(
&pyproject_toml,
r"
r#"
[tool.ruff]
preview = true
",
"#,
)?;
let mut cmd = RuffCheck::default().config(&pyproject_toml).build();
assert_cmd_snapshot!(cmd.pass_stdin("l = 1"), @r###"

View File

@@ -1619,58 +1619,6 @@ print(
Ok(())
}
#[test]
fn add_noqa_exclude() -> Result<()> {
let tempdir = TempDir::new()?;
let ruff_toml = tempdir.path().join("ruff.toml");
fs::write(
&ruff_toml,
r#"
[lint]
exclude = ["excluded.py"]
select = ["RUF015"]
"#,
)?;
let test_path = tempdir.path().join("noqa.py");
fs::write(
&test_path,
r#"
def first_square():
return [x * x for x in range(20)][0]
"#,
)?;
let exclude_path = tempdir.path().join("excluded.py");
fs::write(
&exclude_path,
r#"
def first_square():
return [x * x for x in range(20)][0]
"#,
)?;
insta::with_settings!({
filters => vec![(tempdir_filter(&tempdir).as_str(), "[TMP]/")]
}, {
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.current_dir(tempdir.path())
.args(STDIN_BASE_OPTIONS)
.args(["--add-noqa"]), @r###"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
Added 1 noqa directive.
"###);
});
Ok(())
}
/// Infer `3.11` from `requires-python` in `pyproject.toml`.
#[test]
fn requires_python() -> Result<()> {

View File

@@ -200,7 +200,7 @@ linter.safety_table.forced_unsafe = []
linter.target_version = Py37
linter.preview = disabled
linter.explicit_preview_rules = false
linter.extension = ExtensionMapping({})
linter.extension.mapping = {}
linter.allowed_confusables = []
linter.builtins = []
linter.dummy_variable_rgx = ^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$
@@ -388,12 +388,4 @@ formatter.magic_trailing_comma = respect
formatter.docstring_code_format = disabled
formatter.docstring_code_line_width = dynamic
# Analyze Settings
analyze.exclude = []
analyze.preview = disabled
analyze.target_version = Py37
analyze.detect_string_imports = false
analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {}
----- stderr -----

View File

@@ -26,7 +26,6 @@ filetime = { workspace = true }
ignore = { workspace = true, optional = true }
matchit = { workspace = true }
salsa = { workspace = true }
serde = { workspace = true, optional = true }
path-slash = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
@@ -34,14 +33,12 @@ tracing-subscriber = { workspace = true, optional = true }
tracing-tree = { workspace = true, optional = true }
rustc-hash = { workspace = true }
[target.'cfg(not(any(target_arch = "wasm32", target_arch = "powerpc64")))'.dependencies]
[target.'cfg(not(target_arch="wasm32"))'.dependencies]
zip = { workspace = true, features = ["zstd"] }
[target.'cfg(any(target_arch = "wasm32", target_arch = "powerpc64"))'.dependencies]
zip = { workspace = true, features = ["deflate"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
[target.'cfg(target_arch="wasm32")'.dependencies]
web-time = { version = "1.1.0" }
zip = { workspace = true, features = ["deflate"] }
[dev-dependencies]
insta = { workspace = true }
@@ -50,6 +47,5 @@ tempfile = { workspace = true }
[features]
cache = ["ruff_cache"]
os = ["ignore"]
serde = ["dep:serde", "camino/serde1"]
# Exposes testing utilities.
testing = ["tracing-subscriber", "tracing-tree"]

View File

@@ -16,7 +16,7 @@ use super::walk_directory::{
};
/// A system implementation that uses the OS file system.
#[derive(Default, Debug, Clone)]
#[derive(Default, Debug)]
pub struct OsSystem {
inner: Arc<OsSystemInner>,
}

View File

@@ -593,27 +593,6 @@ impl ruff_cache::CacheKey for SystemPathBuf {
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for SystemPath {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.0.serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for SystemPathBuf {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.0.serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de> serde::Deserialize<'de> for SystemPathBuf {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
Utf8PathBuf::deserialize(deserializer).map(SystemPathBuf)
}
}
/// A slice of a virtual path on [`System`](super::System) (akin to [`str`]).
#[repr(transparent)]
pub struct SystemVirtualPath(str);

View File

@@ -194,10 +194,6 @@ pub(crate) struct Args {
/// Format the files. Without this flag, the python files are not modified
#[arg(long)]
pub(crate) write: bool,
#[arg(long)]
pub(crate) preview: bool,
/// Control the verbosity of the output
#[arg(long, default_value_t, value_enum)]
pub(crate) format: Format,
@@ -239,8 +235,7 @@ pub(crate) fn main(args: &Args) -> anyhow::Result<ExitCode> {
let all_success = if args.multi_project {
format_dev_multi_project(args, error_file)?
} else {
let result =
format_dev_project(&args.files, args.stability_check, args.write, args.preview)?;
let result = format_dev_project(&args.files, args.stability_check, args.write)?;
let error_count = result.error_count();
if result.error_count() > 0 {
@@ -349,12 +344,7 @@ fn format_dev_multi_project(
for project_path in project_paths {
debug!(parent: None, "Starting {}", project_path.display());
match format_dev_project(
&[project_path.clone()],
args.stability_check,
args.write,
args.preview,
) {
match format_dev_project(&[project_path.clone()], args.stability_check, args.write) {
Ok(result) => {
total_errors += result.error_count();
total_files += result.file_count;
@@ -452,7 +442,6 @@ fn format_dev_project(
files: &[PathBuf],
stability_check: bool,
write: bool,
preview: bool,
) -> anyhow::Result<CheckRepoResult> {
let start = Instant::now();
@@ -488,14 +477,7 @@ fn format_dev_project(
#[cfg(feature = "singlethreaded")]
let iter = { paths.into_iter() };
iter.map(|path| {
let result = format_dir_entry(
path,
stability_check,
write,
preview,
&black_options,
&resolver,
);
let result = format_dir_entry(path, stability_check, write, &black_options, &resolver);
pb_span.pb_inc(1);
result
})
@@ -550,7 +532,6 @@ fn format_dir_entry(
resolved_file: Result<ResolvedFile, ignore::Error>,
stability_check: bool,
write: bool,
preview: bool,
options: &BlackOptions,
resolver: &Resolver,
) -> anyhow::Result<(Result<Statistics, CheckFileError>, PathBuf), Error> {
@@ -563,10 +544,6 @@ fn format_dir_entry(
let path = resolved_file.into_path();
let mut options = options.to_py_format_options(&path);
if preview {
options = options.with_preview(PreviewMode::Enabled);
}
let settings = resolver.resolve(&path);
// That's a bad way of doing this but it's not worth doing something better for format_dev
if settings.formatter.line_width != LineWidth::default() {
@@ -574,8 +551,9 @@ fn format_dir_entry(
}
// Handle panics (mostly in `debug_assert!`)
let result = catch_unwind(|| format_dev_file(&path, stability_check, write, options))
.unwrap_or_else(|panic| {
let result = match catch_unwind(|| format_dev_file(&path, stability_check, write, options)) {
Ok(result) => result,
Err(panic) => {
if let Some(message) = panic.downcast_ref::<String>() {
Err(CheckFileError::Panic {
message: message.clone(),
@@ -590,7 +568,8 @@ fn format_dir_entry(
message: "(Panic didn't set a string message)".to_string(),
})
}
});
}
};
Ok((result, path))
}

View File

@@ -1,31 +0,0 @@
[package]
name = "ruff_graph"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
[dependencies]
red_knot_python_semantic = { workspace = true }
ruff_cache = { workspace = true }
ruff_db = { workspace = true, features = ["os", "serde"] }
ruff_linter = { workspace = true }
ruff_macros = { workspace = true }
ruff_python_ast = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true, optional = true }
salsa = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
[lints]
workspace = true
[package.metadata.cargo-shear]
# Used via `CacheKey` macro expansion.
ignored = ["ruff_cache"]

View File

@@ -1,111 +0,0 @@
use red_knot_python_semantic::ModuleName;
use ruff_python_ast::visitor::source_order::{walk_body, walk_expr, walk_stmt, SourceOrderVisitor};
use ruff_python_ast::{self as ast, Expr, ModModule, Stmt};
/// Collect all imports for a given Python file.
#[derive(Default, Debug)]
pub(crate) struct Collector<'a> {
/// The path to the current module.
module_path: Option<&'a [String]>,
/// Whether to detect imports from string literals.
string_imports: bool,
/// The collected imports from the Python AST.
imports: Vec<CollectedImport>,
}
impl<'a> Collector<'a> {
pub(crate) fn new(module_path: Option<&'a [String]>, string_imports: bool) -> Self {
Self {
module_path,
string_imports,
imports: Vec::new(),
}
}
#[must_use]
pub(crate) fn collect(mut self, module: &ModModule) -> Vec<CollectedImport> {
walk_body(&mut self, &module.body);
self.imports
}
}
impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
fn visit_stmt(&mut self, stmt: &'ast Stmt) {
match stmt {
Stmt::ImportFrom(ast::StmtImportFrom {
names,
module,
level,
range: _,
}) => {
let module = module.as_deref();
let level = *level;
for alias in names {
let mut components = vec![];
if level > 0 {
// If we're resolving a relative import, we must have a module path.
let Some(module_path) = self.module_path else {
return;
};
// Start with the containing module.
components.extend(module_path.iter().map(String::as_str));
// Remove segments based on the number of dots.
for _ in 0..level {
if components.is_empty() {
return;
}
components.pop();
}
}
// Add the module path.
if let Some(module) = module {
components.extend(module.split('.'));
}
// Add the alias name.
components.push(alias.name.as_str());
if let Some(module_name) = ModuleName::from_components(components) {
self.imports.push(CollectedImport::ImportFrom(module_name));
}
}
}
Stmt::Import(ast::StmtImport { names, range: _ }) => {
for alias in names {
if let Some(module_name) = ModuleName::new(alias.name.as_str()) {
self.imports.push(CollectedImport::Import(module_name));
}
}
}
_ => {
walk_stmt(self, stmt);
}
}
}
fn visit_expr(&mut self, expr: &'ast Expr) {
if self.string_imports {
if let Expr::StringLiteral(ast::ExprStringLiteral { value, range: _ }) = expr {
// Determine whether the string literal "looks like" an import statement: contains
// a dot, and consists solely of valid Python identifiers.
let value = value.to_str();
if let Some(module_name) = ModuleName::new(value) {
self.imports.push(CollectedImport::Import(module_name));
}
}
walk_expr(self, expr);
}
}
}
#[derive(Debug)]
pub(crate) enum CollectedImport {
/// The import was part of an `import` statement.
Import(ModuleName),
/// The import was part of an `import from` statement.
ImportFrom(ModuleName),
}

View File

@@ -1,97 +0,0 @@
use anyhow::Result;
use red_knot_python_semantic::{Db, Program, ProgramSettings, PythonVersion, SearchPathSettings};
use ruff_db::files::{File, Files};
use ruff_db::system::{OsSystem, System, SystemPathBuf};
use ruff_db::vendored::VendoredFileSystem;
use ruff_db::{Db as SourceDb, Upcast};
#[salsa::db]
#[derive(Default)]
pub struct ModuleDb {
storage: salsa::Storage<Self>,
files: Files,
system: OsSystem,
vendored: VendoredFileSystem,
}
impl ModuleDb {
/// Initialize a [`ModuleDb`] from the given source root.
pub fn from_src_roots(
mut src_roots: impl Iterator<Item = SystemPathBuf>,
target_version: PythonVersion,
) -> Result<Self> {
let search_paths = {
// Use the first source root.
let src_root = src_roots
.next()
.ok_or_else(|| anyhow::anyhow!("No source roots provided"))?;
let mut search_paths = SearchPathSettings::new(src_root.to_path_buf());
// Add the remaining source roots as extra paths.
for src_root in src_roots {
search_paths.extra_paths.push(src_root.to_path_buf());
}
search_paths
};
let db = Self::default();
Program::from_settings(
&db,
&ProgramSettings {
target_version,
search_paths,
},
)?;
Ok(db)
}
/// Create a snapshot of the current database.
#[must_use]
pub fn snapshot(&self) -> Self {
Self {
storage: self.storage.clone(),
system: self.system.clone(),
vendored: self.vendored.clone(),
files: self.files.snapshot(),
}
}
}
impl Upcast<dyn SourceDb> for ModuleDb {
fn upcast(&self) -> &(dyn SourceDb + 'static) {
self
}
fn upcast_mut(&mut self) -> &mut (dyn SourceDb + 'static) {
self
}
}
#[salsa::db]
impl SourceDb for ModuleDb {
fn vendored(&self) -> &VendoredFileSystem {
&self.vendored
}
fn system(&self) -> &dyn System {
&self.system
}
fn files(&self) -> &Files {
&self.files
}
}
#[salsa::db]
impl Db for ModuleDb {
fn is_file_open(&self, file: File) -> bool {
!file.path(self).is_vendored_path()
}
}
#[salsa::db]
impl salsa::Database for ModuleDb {
fn salsa_event(&self, _event: &dyn Fn() -> salsa::Event) {}
}

View File

@@ -1,126 +0,0 @@
use crate::collector::Collector;
pub use crate::db::ModuleDb;
use crate::resolver::Resolver;
pub use crate::settings::{AnalyzeSettings, Direction};
use anyhow::Result;
use red_knot_python_semantic::SemanticModel;
use ruff_db::files::system_path_to_file;
use ruff_db::parsed::parsed_module;
use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::helpers::to_module_path;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
mod collector;
mod db;
mod resolver;
mod settings;
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ModuleImports(BTreeSet<SystemPathBuf>);
impl ModuleImports {
/// Detect the [`ModuleImports`] for a given Python file.
pub fn detect(
db: &ModuleDb,
path: &SystemPath,
package: Option<&SystemPath>,
string_imports: bool,
) -> Result<Self> {
// Read and parse the source code.
let file = system_path_to_file(db, path)?;
let parsed = parsed_module(db, file);
let module_path =
package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path()));
let model = SemanticModel::new(db, file);
// Collect the imports.
let imports =
Collector::new(module_path.as_deref(), string_imports).collect(parsed.syntax());
// Resolve the imports.
let mut resolved_imports = ModuleImports::default();
for import in imports {
let Some(resolved) = Resolver::new(&model).resolve(import) else {
continue;
};
let Some(path) = resolved.as_system_path() else {
continue;
};
resolved_imports.insert(path.to_path_buf());
}
Ok(resolved_imports)
}
/// Insert a file path into the module imports.
pub fn insert(&mut self, path: SystemPathBuf) {
self.0.insert(path);
}
/// Extend the module imports with additional file paths.
pub fn extend(&mut self, paths: impl IntoIterator<Item = SystemPathBuf>) {
self.0.extend(paths);
}
/// Returns `true` if the module imports are empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of module imports.
pub fn len(&self) -> usize {
self.0.len()
}
/// Convert the file paths to be relative to a given path.
#[must_use]
pub fn relative_to(self, path: &SystemPath) -> Self {
Self(
self.0
.into_iter()
.map(|import| {
import
.strip_prefix(path)
.map(SystemPath::to_path_buf)
.unwrap_or(import)
})
.collect(),
)
}
}
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ImportMap(BTreeMap<SystemPathBuf, ModuleImports>);
impl ImportMap {
/// Insert a module's imports into the map.
pub fn insert(&mut self, path: SystemPathBuf, imports: ModuleImports) {
self.0.insert(path, imports);
}
/// Reverse the [`ImportMap`], e.g., to convert from dependencies to dependents.
#[must_use]
pub fn reverse(imports: impl IntoIterator<Item = (SystemPathBuf, ModuleImports)>) -> Self {
let mut reverse = ImportMap::default();
for (path, imports) in imports {
for import in imports.0 {
reverse.0.entry(import).or_default().insert(path.clone());
}
reverse.0.entry(path).or_default();
}
reverse
}
}
impl FromIterator<(SystemPathBuf, ModuleImports)> for ImportMap {
fn from_iter<I: IntoIterator<Item = (SystemPathBuf, ModuleImports)>>(iter: I) -> Self {
let mut map = ImportMap::default();
for (path, imports) in iter {
map.0.entry(path).or_default().0.extend(imports.0);
}
map
}
}

View File

@@ -1,39 +0,0 @@
use red_knot_python_semantic::SemanticModel;
use ruff_db::files::FilePath;
use crate::collector::CollectedImport;
/// Collect all imports for a given Python file.
pub(crate) struct Resolver<'a> {
semantic: &'a SemanticModel<'a>,
}
impl<'a> Resolver<'a> {
/// Initialize a [`Resolver`] with a given [`SemanticModel`].
pub(crate) fn new(semantic: &'a SemanticModel<'a>) -> Self {
Self { semantic }
}
/// Resolve the [`CollectedImport`] into a [`FilePath`].
pub(crate) fn resolve(&self, import: CollectedImport) -> Option<&'a FilePath> {
match import {
CollectedImport::Import(import) => self
.semantic
.resolve_module(import)
.map(|module| module.file().path(self.semantic.db())),
CollectedImport::ImportFrom(import) => {
// Attempt to resolve the member (e.g., given `from foo import bar`, look for `foo.bar`).
let parent = import.parent();
self.semantic
.resolve_module(import)
.map(|module| module.file().path(self.semantic.db()))
.or_else(|| {
// Attempt to resolve the module (e.g., given `from foo import bar`, look for `foo`).
self.semantic
.resolve_module(parent?)
.map(|module| module.file().path(self.semantic.db()))
})
}
}
}
}

View File

@@ -1,56 +0,0 @@
use ruff_linter::display_settings;
use ruff_linter::settings::types::{ExtensionMapping, FilePatternSet, PreviewMode, PythonVersion};
use ruff_macros::CacheKey;
use std::collections::BTreeMap;
use std::fmt;
use std::path::PathBuf;
#[derive(Debug, Default, Clone, CacheKey)]
pub struct AnalyzeSettings {
pub exclude: FilePatternSet,
pub preview: PreviewMode,
pub target_version: PythonVersion,
pub detect_string_imports: bool,
pub include_dependencies: BTreeMap<PathBuf, (PathBuf, Vec<String>)>,
pub extension: ExtensionMapping,
}
impl fmt::Display for AnalyzeSettings {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "\n# Analyze Settings")?;
display_settings! {
formatter = f,
namespace = "analyze",
fields = [
self.exclude,
self.preview,
self.target_version | debug,
self.detect_string_imports,
self.extension | debug,
self.include_dependencies | debug,
]
}
Ok(())
}
}
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, CacheKey)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
pub enum Direction {
/// Construct a map from module to its dependencies (i.e., the modules that it imports).
#[default]
Dependencies,
/// Construct a map from module to its dependents (i.e., the modules that import it).
Dependents,
}
impl fmt::Display for Direction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Dependencies => write!(f, "\"dependencies\""),
Self::Dependents => write!(f, "\"dependents\""),
}
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_linter"
version = "0.6.6"
version = "0.6.5"
publish = false
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -1,6 +1,4 @@
from typing import Annotated
from fastapi import FastAPI, Path
from fastapi import FastAPI
app = FastAPI()
@@ -84,11 +82,6 @@ async def read_thing(
return {"query": query}
@app.get("/books/{name}/{title}")
async def read_thing(*, author: Annotated[str, Path(alias="author_name")], title: str):
return {"author": author, "title": title}
# OK
@app.get("/things/{thing_id}")
async def read_thing(thing_id: int, query: str):
@@ -125,11 +118,6 @@ async def read_thing(*, author: str, title: str):
return {"author": author, "title": title}
@app.get("/books/{name}/{title}")
async def read_thing(*, author: Annotated[str, Path(alias="name")], title: str):
return {"author": author, "title": title}
# Ignored
@app.get("/things/{thing-id}")
async def read_thing(query: str):
@@ -143,4 +131,4 @@ async def read_thing(query: str):
@app.get("/things/{thing_id=}")
async def read_thing(query: str):
return {"query": query}
return {"query": query}

View File

@@ -1,20 +0,0 @@
def f():
"Here's a line ending in a question mark?"
...
def f():
"""Here's a line ending in an exclamation mark!"""
...
def f():
"""Here's a line ending in a colon:"""
...
def f():
"""Here's a line ending in a semi colon;"""
...
def f():
"""Here's a line ending with a whitespace """
...

View File

@@ -1,8 +0,0 @@
def foo():
"""Returns foo()."""
def foo():
""""Use prefix_foo()."""
def foo():
""""Use this function; foo()."""

View File

@@ -151,22 +151,4 @@ def remove_prefix_comparable_literal_expr() -> None:
def shadow_builtins(filename: str, extension: str) -> None:
from builtins import len as builtins_len
return filename[:-builtins_len(extension)] if filename.endswith(extension) else filename
def okay_steps():
text = "!x!y!z"
if text.startswith("!"):
text = text[1::1]
if text.startswith("!"):
text = text[1::True]
if text.startswith("!"):
text = text[1::None]
print(text)
# this should be skipped
def ignore_step():
text = "!x!y!z"
if text.startswith("!"):
text = text[1::2]
print(text)
return filename[:-builtins_len(extension)] if filename.endswith(extension) else filename

View File

@@ -152,8 +152,6 @@ pub fn set_up_logging(level: LogLevel) -> Result<()> {
})
.level(level.level_filter())
.level_for("globset", log::LevelFilter::Warn)
.level_for("red_knot_python_semantic", log::LevelFilter::Warn)
.level_for("salsa", log::LevelFilter::Warn)
.chain(std::io::stderr())
.apply()?;
Ok(())

View File

@@ -6,8 +6,7 @@ use ruff_diagnostics::Fix;
use ruff_diagnostics::{Diagnostic, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast as ast;
use ruff_python_ast::{Expr, Parameter, ParameterWithDefault};
use ruff_python_semantic::{Modules, SemanticModel};
use ruff_python_semantic::Modules;
use ruff_python_stdlib::identifiers::is_identifier;
use ruff_text_size::{Ranged, TextSize};
@@ -142,10 +141,7 @@ pub(crate) fn fastapi_unused_path_parameter(
.args
.iter()
.chain(function_def.parameters.kwonlyargs.iter())
.map(|ParameterWithDefault { parameter, .. }| {
parameter_alias(parameter, checker.semantic())
.unwrap_or_else(|| parameter.name.as_str())
})
.map(|arg| arg.parameter.name.as_str())
.collect();
// Check if any of the path parameters are not in the function signature.
@@ -194,52 +190,6 @@ pub(crate) fn fastapi_unused_path_parameter(
checker.diagnostics.extend(diagnostics);
}
/// Extract the expected in-route name for a given parameter, if it has an alias.
/// For example, given `document_id: Annotated[str, Path(alias="documentId")]`, returns `"documentId"`.
fn parameter_alias<'a>(parameter: &'a Parameter, semantic: &SemanticModel) -> Option<&'a str> {
let Some(annotation) = &parameter.annotation else {
return None;
};
let Expr::Subscript(subscript) = annotation.as_ref() else {
return None;
};
let Expr::Tuple(tuple) = subscript.slice.as_ref() else {
return None;
};
let Some(Expr::Call(path)) = tuple.elts.get(1) else {
return None;
};
// Find the `alias` keyword argument.
let alias = path
.arguments
.find_keyword("alias")
.map(|alias| &alias.value)?;
// Ensure that it's a literal string.
let Expr::StringLiteral(alias) = alias else {
return None;
};
// Verify that the subscript was a `typing.Annotated`.
if !semantic.match_typing_expr(&subscript.value, "Annotated") {
return None;
}
// Verify that the call was a `fastapi.Path`.
if !semantic
.resolve_qualified_name(&path.func)
.is_some_and(|qualified_name| matches!(qualified_name.segments(), ["fastapi", "Path"]))
{
return None;
}
Some(alias.value.to_str())
}
/// An iterator to extract parameters from FastAPI route paths.
///
/// The iterator yields tuples of the parameter name and the range of the parameter in the input,

View File

@@ -1,342 +1,323 @@
---
source: crates/ruff_linter/src/rules/fastapi/mod.rs
---
FAST003.py:9:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
8 | # Errors
9 | @app.get("/things/{thing_id}")
| ^^^^^^^^^^ FAST003
10 | async def read_thing(query: str):
11 | return {"query": query}
|
= help: Add `thing_id` to function signature
FAST003.py:7:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
6 | # Errors
7 | @app.get("/things/{thing_id}")
| ^^^^^^^^^^ FAST003
8 | async def read_thing(query: str):
9 | return {"query": query}
|
= help: Add `thing_id` to function signature
Unsafe fix
7 7 |
8 8 | # Errors
9 9 | @app.get("/things/{thing_id}")
10 |-async def read_thing(query: str):
10 |+async def read_thing(query: str, thing_id):
11 11 | return {"query": query}
12 12 |
13 13 |
5 5 |
6 6 | # Errors
7 7 | @app.get("/things/{thing_id}")
8 |-async def read_thing(query: str):
8 |+async def read_thing(query: str, thing_id):
9 9 | return {"query": query}
10 10 |
11 11 |
FAST003.py:14:23: FAST003 [*] Parameter `isbn` appears in route path, but not in `read_thing` signature
FAST003.py:12:23: FAST003 [*] Parameter `isbn` appears in route path, but not in `read_thing` signature
|
14 | @app.get("/books/isbn-{isbn}")
12 | @app.get("/books/isbn-{isbn}")
| ^^^^^^ FAST003
15 | async def read_thing():
16 | ...
13 | async def read_thing():
14 | ...
|
= help: Add `isbn` to function signature
Unsafe fix
12 12 |
13 13 |
14 14 | @app.get("/books/isbn-{isbn}")
15 |-async def read_thing():
15 |+async def read_thing(isbn):
16 16 | ...
17 17 |
18 18 |
10 10 |
11 11 |
12 12 | @app.get("/books/isbn-{isbn}")
13 |-async def read_thing():
13 |+async def read_thing(isbn):
14 14 | ...
15 15 |
16 16 |
FAST003.py:19:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
FAST003.py:17:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
19 | @app.get("/things/{thing_id:path}")
17 | @app.get("/things/{thing_id:path}")
| ^^^^^^^^^^^^^^^ FAST003
20 | async def read_thing(query: str):
21 | return {"query": query}
18 | async def read_thing(query: str):
19 | return {"query": query}
|
= help: Add `thing_id` to function signature
Unsafe fix
17 17 |
18 18 |
19 19 | @app.get("/things/{thing_id:path}")
20 |-async def read_thing(query: str):
20 |+async def read_thing(query: str, thing_id):
21 21 | return {"query": query}
22 22 |
23 23 |
15 15 |
16 16 |
17 17 | @app.get("/things/{thing_id:path}")
18 |-async def read_thing(query: str):
18 |+async def read_thing(query: str, thing_id):
19 19 | return {"query": query}
20 20 |
21 21 |
FAST003.py:24:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
FAST003.py:22:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
24 | @app.get("/things/{thing_id : path}")
22 | @app.get("/things/{thing_id : path}")
| ^^^^^^^^^^^^^^^^^ FAST003
25 | async def read_thing(query: str):
26 | return {"query": query}
23 | async def read_thing(query: str):
24 | return {"query": query}
|
= help: Add `thing_id` to function signature
Unsafe fix
22 22 |
23 23 |
24 24 | @app.get("/things/{thing_id : path}")
25 |-async def read_thing(query: str):
25 |+async def read_thing(query: str, thing_id):
26 26 | return {"query": query}
27 27 |
28 28 |
20 20 |
21 21 |
22 22 | @app.get("/things/{thing_id : path}")
23 |-async def read_thing(query: str):
23 |+async def read_thing(query: str, thing_id):
24 24 | return {"query": query}
25 25 |
26 26 |
FAST003.py:29:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
FAST003.py:27:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
|
29 | @app.get("/books/{author}/{title}")
27 | @app.get("/books/{author}/{title}")
| ^^^^^^^ FAST003
30 | async def read_thing(author: str):
31 | return {"author": author}
28 | async def read_thing(author: str):
29 | return {"author": author}
|
= help: Add `title` to function signature
Unsafe fix
27 27 |
28 28 |
29 29 | @app.get("/books/{author}/{title}")
30 |-async def read_thing(author: str):
30 |+async def read_thing(author: str, title):
31 31 | return {"author": author}
32 32 |
33 33 |
25 25 |
26 26 |
27 27 | @app.get("/books/{author}/{title}")
28 |-async def read_thing(author: str):
28 |+async def read_thing(author: str, title):
29 29 | return {"author": author}
30 30 |
31 31 |
FAST003.py:34:18: FAST003 [*] Parameter `author_name` appears in route path, but not in `read_thing` signature
FAST003.py:32:18: FAST003 [*] Parameter `author_name` appears in route path, but not in `read_thing` signature
|
34 | @app.get("/books/{author_name}/{title}")
32 | @app.get("/books/{author_name}/{title}")
| ^^^^^^^^^^^^^ FAST003
35 | async def read_thing():
36 | ...
33 | async def read_thing():
34 | ...
|
= help: Add `author_name` to function signature
Unsafe fix
32 32 |
33 33 |
34 34 | @app.get("/books/{author_name}/{title}")
35 |-async def read_thing():
35 |+async def read_thing(author_name):
36 36 | ...
37 37 |
38 38 |
30 30 |
31 31 |
32 32 | @app.get("/books/{author_name}/{title}")
33 |-async def read_thing():
33 |+async def read_thing(author_name):
34 34 | ...
35 35 |
36 36 |
FAST003.py:34:32: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
FAST003.py:32:32: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
|
34 | @app.get("/books/{author_name}/{title}")
32 | @app.get("/books/{author_name}/{title}")
| ^^^^^^^ FAST003
35 | async def read_thing():
36 | ...
33 | async def read_thing():
34 | ...
|
= help: Add `title` to function signature
Unsafe fix
32 32 |
33 33 |
34 34 | @app.get("/books/{author_name}/{title}")
35 |-async def read_thing():
35 |+async def read_thing(title):
36 36 | ...
37 37 |
38 38 |
30 30 |
31 31 |
32 32 | @app.get("/books/{author_name}/{title}")
33 |-async def read_thing():
33 |+async def read_thing(title):
34 34 | ...
35 35 |
36 36 |
FAST003.py:39:18: FAST003 Parameter `author` appears in route path, but only as a positional-only argument in `read_thing` signature
FAST003.py:37:18: FAST003 Parameter `author` appears in route path, but only as a positional-only argument in `read_thing` signature
|
39 | @app.get("/books/{author}/{title}")
37 | @app.get("/books/{author}/{title}")
| ^^^^^^^^ FAST003
40 | async def read_thing(author: str, title: str, /):
41 | return {"author": author, "title": title}
38 | async def read_thing(author: str, title: str, /):
39 | return {"author": author, "title": title}
|
FAST003.py:39:27: FAST003 Parameter `title` appears in route path, but only as a positional-only argument in `read_thing` signature
FAST003.py:37:27: FAST003 Parameter `title` appears in route path, but only as a positional-only argument in `read_thing` signature
|
39 | @app.get("/books/{author}/{title}")
37 | @app.get("/books/{author}/{title}")
| ^^^^^^^ FAST003
40 | async def read_thing(author: str, title: str, /):
41 | return {"author": author, "title": title}
38 | async def read_thing(author: str, title: str, /):
39 | return {"author": author, "title": title}
|
FAST003.py:44:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
FAST003.py:42:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
|
44 | @app.get("/books/{author}/{title}/{page}")
42 | @app.get("/books/{author}/{title}/{page}")
| ^^^^^^^ FAST003
45 | async def read_thing(
46 | author: str,
43 | async def read_thing(
44 | author: str,
|
= help: Add `title` to function signature
Unsafe fix
44 44 | @app.get("/books/{author}/{title}/{page}")
45 45 | async def read_thing(
46 46 | author: str,
47 |- query: str,
47 |+ query: str, title,
48 48 | ): ...
49 49 |
50 50 |
42 42 | @app.get("/books/{author}/{title}/{page}")
43 43 | async def read_thing(
44 44 | author: str,
45 |- query: str,
45 |+ query: str, title,
46 46 | ): ...
47 47 |
48 48 |
FAST003.py:44:35: FAST003 [*] Parameter `page` appears in route path, but not in `read_thing` signature
FAST003.py:42:35: FAST003 [*] Parameter `page` appears in route path, but not in `read_thing` signature
|
44 | @app.get("/books/{author}/{title}/{page}")
42 | @app.get("/books/{author}/{title}/{page}")
| ^^^^^^ FAST003
45 | async def read_thing(
46 | author: str,
43 | async def read_thing(
44 | author: str,
|
= help: Add `page` to function signature
Unsafe fix
44 44 | @app.get("/books/{author}/{title}/{page}")
45 45 | async def read_thing(
46 46 | author: str,
47 |- query: str,
47 |+ query: str, page,
48 48 | ): ...
49 49 |
50 50 |
42 42 | @app.get("/books/{author}/{title}/{page}")
43 43 | async def read_thing(
44 44 | author: str,
45 |- query: str,
45 |+ query: str, page,
46 46 | ): ...
47 47 |
48 48 |
FAST003.py:51:18: FAST003 [*] Parameter `author` appears in route path, but not in `read_thing` signature
FAST003.py:49:18: FAST003 [*] Parameter `author` appears in route path, but not in `read_thing` signature
|
51 | @app.get("/books/{author}/{title}")
49 | @app.get("/books/{author}/{title}")
| ^^^^^^^^ FAST003
52 | async def read_thing():
53 | ...
50 | async def read_thing():
51 | ...
|
= help: Add `author` to function signature
Unsafe fix
49 49 |
50 50 |
51 51 | @app.get("/books/{author}/{title}")
52 |-async def read_thing():
52 |+async def read_thing(author):
53 53 | ...
54 54 |
55 55 |
47 47 |
48 48 |
49 49 | @app.get("/books/{author}/{title}")
50 |-async def read_thing():
50 |+async def read_thing(author):
51 51 | ...
52 52 |
53 53 |
FAST003.py:51:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
FAST003.py:49:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
|
51 | @app.get("/books/{author}/{title}")
49 | @app.get("/books/{author}/{title}")
| ^^^^^^^ FAST003
52 | async def read_thing():
53 | ...
50 | async def read_thing():
51 | ...
|
= help: Add `title` to function signature
Unsafe fix
49 49 |
50 50 |
51 51 | @app.get("/books/{author}/{title}")
52 |-async def read_thing():
52 |+async def read_thing(title):
53 53 | ...
54 54 |
55 55 |
47 47 |
48 48 |
49 49 | @app.get("/books/{author}/{title}")
50 |-async def read_thing():
50 |+async def read_thing(title):
51 51 | ...
52 52 |
53 53 |
FAST003.py:56:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
FAST003.py:54:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
|
56 | @app.get("/books/{author}/{title}")
54 | @app.get("/books/{author}/{title}")
| ^^^^^^^ FAST003
57 | async def read_thing(*, author: str):
58 | ...
55 | async def read_thing(*, author: str):
56 | ...
|
= help: Add `title` to function signature
Unsafe fix
54 54 |
55 55 |
56 56 | @app.get("/books/{author}/{title}")
57 |-async def read_thing(*, author: str):
57 |+async def read_thing(title, *, author: str):
58 58 | ...
59 59 |
60 60 |
52 52 |
53 53 |
54 54 | @app.get("/books/{author}/{title}")
55 |-async def read_thing(*, author: str):
55 |+async def read_thing(title, *, author: str):
56 56 | ...
57 57 |
58 58 |
FAST003.py:61:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
FAST003.py:59:27: FAST003 [*] Parameter `title` appears in route path, but not in `read_thing` signature
|
61 | @app.get("/books/{author}/{title}")
59 | @app.get("/books/{author}/{title}")
| ^^^^^^^ FAST003
62 | async def read_thing(hello, /, *, author: str):
63 | ...
60 | async def read_thing(hello, /, *, author: str):
61 | ...
|
= help: Add `title` to function signature
Unsafe fix
59 59 |
60 60 |
61 61 | @app.get("/books/{author}/{title}")
62 |-async def read_thing(hello, /, *, author: str):
62 |+async def read_thing(hello, /, title, *, author: str):
63 63 | ...
64 64 |
65 65 |
57 57 |
58 58 |
59 59 | @app.get("/books/{author}/{title}")
60 |-async def read_thing(hello, /, *, author: str):
60 |+async def read_thing(hello, /, title, *, author: str):
61 61 | ...
62 62 |
63 63 |
FAST003.py:66:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
FAST003.py:64:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
66 | @app.get("/things/{thing_id}")
64 | @app.get("/things/{thing_id}")
| ^^^^^^^^^^ FAST003
67 | async def read_thing(
68 | query: str,
65 | async def read_thing(
66 | query: str,
|
= help: Add `thing_id` to function signature
Unsafe fix
65 65 |
66 66 | @app.get("/things/{thing_id}")
67 67 | async def read_thing(
68 |- query: str,
68 |+ query: str, thing_id,
69 69 | ):
70 70 | return {"query": query}
71 71 |
63 63 |
64 64 | @app.get("/things/{thing_id}")
65 65 | async def read_thing(
66 |- query: str,
66 |+ query: str, thing_id,
67 67 | ):
68 68 | return {"query": query}
69 69 |
FAST003.py:73:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
FAST003.py:71:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
73 | @app.get("/things/{thing_id}")
71 | @app.get("/things/{thing_id}")
| ^^^^^^^^^^ FAST003
74 | async def read_thing(
75 | query: str = "default",
72 | async def read_thing(
73 | query: str = "default",
|
= help: Add `thing_id` to function signature
Unsafe fix
72 72 |
73 73 | @app.get("/things/{thing_id}")
74 74 | async def read_thing(
75 |- query: str = "default",
75 |+ thing_id, query: str = "default",
76 76 | ):
77 77 | return {"query": query}
78 78 |
70 70 |
71 71 | @app.get("/things/{thing_id}")
72 72 | async def read_thing(
73 |- query: str = "default",
73 |+ thing_id, query: str = "default",
74 74 | ):
75 75 | return {"query": query}
76 76 |
FAST003.py:80:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
FAST003.py:78:19: FAST003 [*] Parameter `thing_id` appears in route path, but not in `read_thing` signature
|
80 | @app.get("/things/{thing_id}")
78 | @app.get("/things/{thing_id}")
| ^^^^^^^^^^ FAST003
81 | async def read_thing(
82 | *, query: str = "default",
79 | async def read_thing(
80 | *, query: str = "default",
|
= help: Add `thing_id` to function signature
Unsafe fix
79 79 |
80 80 | @app.get("/things/{thing_id}")
81 81 | async def read_thing(
82 |- *, query: str = "default",
82 |+ thing_id, *, query: str = "default",
83 83 | ):
84 84 | return {"query": query}
85 85 |
FAST003.py:87:18: FAST003 [*] Parameter `name` appears in route path, but not in `read_thing` signature
|
87 | @app.get("/books/{name}/{title}")
| ^^^^^^ FAST003
88 | async def read_thing(*, author: Annotated[str, Path(alias="author_name")], title: str):
89 | return {"author": author, "title": title}
|
= help: Add `name` to function signature
Unsafe fix
85 85 |
86 86 |
87 87 | @app.get("/books/{name}/{title}")
88 |-async def read_thing(*, author: Annotated[str, Path(alias="author_name")], title: str):
88 |+async def read_thing(name, *, author: Annotated[str, Path(alias="author_name")], title: str):
89 89 | return {"author": author, "title": title}
90 90 |
91 91 |
77 77 |
78 78 | @app.get("/things/{thing_id}")
79 79 | async def read_thing(
80 |- *, query: str = "default",
80 |+ thing_id, *, query: str = "default",
81 81 | ):
82 82 | return {"query": query}
83 83 |

View File

@@ -30,7 +30,7 @@ use crate::registry::Rule;
/// ```console
/// Traceback (most recent call last):
/// File "tmp.py", line 2, in <module>
/// raise RuntimeError("'Some value' is incorrect")
/// raise RuntimeError("Some value is incorrect")
/// RuntimeError: 'Some value' is incorrect
/// ```
///

View File

@@ -29,9 +29,7 @@ mod tests {
#[test_case(Rule::UndocumentedParam, Path::new("sections.py"))]
#[test_case(Rule::EndsInPeriod, Path::new("D.py"))]
#[test_case(Rule::EndsInPeriod, Path::new("D400.py"))]
#[test_case(Rule::EndsInPeriod, Path::new("D400_415.py"))]
#[test_case(Rule::EndsInPunctuation, Path::new("D.py"))]
#[test_case(Rule::EndsInPunctuation, Path::new("D400_415.py"))]
#[test_case(Rule::FirstLineCapitalized, Path::new("D.py"))]
#[test_case(Rule::FirstLineCapitalized, Path::new("D403.py"))]
#[test_case(Rule::FitsOnOneLine, Path::new("D.py"))]
@@ -51,7 +49,6 @@ mod tests {
#[test_case(Rule::OverIndentation, Path::new("D.py"))]
#[test_case(Rule::OverIndentation, Path::new("D208.py"))]
#[test_case(Rule::NoSignature, Path::new("D.py"))]
#[test_case(Rule::NoSignature, Path::new("D402.py"))]
#[test_case(Rule::SurroundingWhitespace, Path::new("D.py"))]
#[test_case(Rule::DocstringStartsWithThis, Path::new("D.py"))]
#[test_case(Rule::UnderIndentation, Path::new("D.py"))]

View File

@@ -1,7 +1,7 @@
use ruff_text_size::TextLen;
use strum::IntoEnumIterator;
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
use ruff_source_file::{UniversalNewlineIterator, UniversalNewlines};
use ruff_text_size::Ranged;
@@ -47,18 +47,14 @@ use crate::rules::pydocstyle::helpers::logical_line;
#[violation]
pub struct EndsInPeriod;
impl Violation for EndsInPeriod {
/// `None` in the case a fix is never available or otherwise Some
/// [`FixAvailability`] describing the available fix.
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
impl AlwaysFixableViolation for EndsInPeriod {
#[derive_message_formats]
fn message(&self) -> String {
format!("First line should end with a period")
}
fn fix_title(&self) -> Option<String> {
Some("Add period".to_string())
fn fix_title(&self) -> String {
"Add period".to_string()
}
}
@@ -108,7 +104,7 @@ pub(crate) fn ends_with_period(checker: &mut Checker, docstring: &Docstring) {
if !trimmed.ends_with('.') {
let mut diagnostic = Diagnostic::new(EndsInPeriod, docstring.range());
// Best-effort fix: avoid adding a period after other punctuation marks.
if !trimmed.ends_with([':', ';', '?', '!']) {
if !trimmed.ends_with([':', ';']) {
diagnostic.set_fix(Fix::unsafe_edit(Edit::insertion(
".".to_string(),
line.start() + trimmed.text_len(),

View File

@@ -1,7 +1,7 @@
use ruff_text_size::TextLen;
use strum::IntoEnumIterator;
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation};
use ruff_source_file::{UniversalNewlineIterator, UniversalNewlines};
use ruff_text_size::Ranged;
@@ -46,18 +46,14 @@ use crate::rules::pydocstyle::helpers::logical_line;
#[violation]
pub struct EndsInPunctuation;
impl Violation for EndsInPunctuation {
/// `None` in the case a fix is never available or otherwise Some
/// [`FixAvailability`] describing the available fix.
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
impl AlwaysFixableViolation for EndsInPunctuation {
#[derive_message_formats]
fn message(&self) -> String {
format!("First line should end with a period, question mark, or exclamation point")
}
fn fix_title(&self) -> Option<String> {
Some("Add closing punctuation".to_string())
fn fix_title(&self) -> String {
"Add closing punctuation".to_string()
}
}

View File

@@ -66,25 +66,7 @@ pub(crate) fn no_signature(checker: &mut Checker, docstring: &Docstring) {
// a function named `foo`).
if first_line
.match_indices(function.name.as_str())
.any(|(index, _)| {
// The function name must be preceded by a word boundary.
let preceded_by_word_boundary = first_line[..index]
.chars()
.next_back()
.map_or(true, |c| matches!(c, ' ' | '\t' | ';' | ','));
if !preceded_by_word_boundary {
return false;
}
// The function name must be followed by an open parenthesis.
let followed_by_open_parenthesis =
first_line[index + function.name.len()..].starts_with('(');
if !followed_by_open_parenthesis {
return false;
}
true
})
.any(|(index, _)| first_line[index + function.name.len()..].starts_with('('))
{
checker
.diagnostics

View File

@@ -194,7 +194,7 @@ D.py:487:5: D400 [*] First line should end with a period
489 489 |
490 490 |
D.py:514:5: D400 First line should end with a period
D.py:514:5: D400 [*] First line should end with a period
|
513 | def valid_google_string(): # noqa: D400
514 | """Test a valid something!"""
@@ -202,6 +202,16 @@ D.py:514:5: D400 First line should end with a period
|
= help: Add period
Unsafe fix
511 511 |
512 512 |
513 513 | def valid_google_string(): # noqa: D400
514 |- """Test a valid something!"""
514 |+ """Test a valid something!."""
515 515 |
516 516 |
517 517 | @expect("D415: First line should end with a period, question mark, "
D.py:520:5: D400 [*] First line should end with a period
|
518 | "or exclamation point (not 'g')")
@@ -318,4 +328,6 @@ D.py:664:5: D400 [*] First line should end with a period
665 |+ but continuations shouldn't be considered multi-line."
666 666 |
667 667 |
668 668 |
668 668 |

View File

@@ -1,55 +0,0 @@
---
source: crates/ruff_linter/src/rules/pydocstyle/mod.rs
---
D400_415.py:2:5: D400 First line should end with a period
|
1 | def f():
2 | "Here's a line ending in a question mark?"
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D400
3 | ...
|
= help: Add period
D400_415.py:7:5: D400 First line should end with a period
|
6 | def f():
7 | """Here's a line ending in an exclamation mark!"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D400
8 | ...
|
= help: Add period
D400_415.py:11:5: D400 First line should end with a period
|
10 | def f():
11 | """Here's a line ending in a colon:"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D400
12 | ...
|
= help: Add period
D400_415.py:15:5: D400 First line should end with a period
|
14 | def f():
15 | """Here's a line ending in a semi colon;"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D400
16 | ...
|
= help: Add period
D400_415.py:19:5: D400 [*] First line should end with a period
|
18 | def f():
19 | """Here's a line ending with a whitespace """
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D400
20 | ...
|
= help: Add period
Unsafe fix
16 16 | ...
17 17 |
18 18 | def f():
19 |- """Here's a line ending with a whitespace """
19 |+ """Here's a line ending with a whitespace. """
20 20 | ...

View File

@@ -1,18 +0,0 @@
---
source: crates/ruff_linter/src/rules/pydocstyle/mod.rs
---
D402.py:2:5: D402 First line should not be the function's signature
|
1 | def foo():
2 | """Returns foo()."""
| ^^^^^^^^^^^^^^^^^^^^ D402
3 |
4 | def foo():
|
D402.py:8:5: D402 First line should not be the function's signature
|
7 | def foo():
8 | """"Use this function; foo()."""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D402
|

View File

@@ -1,37 +0,0 @@
---
source: crates/ruff_linter/src/rules/pydocstyle/mod.rs
---
D400_415.py:11:5: D415 First line should end with a period, question mark, or exclamation point
|
10 | def f():
11 | """Here's a line ending in a colon:"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D415
12 | ...
|
= help: Add closing punctuation
D400_415.py:15:5: D415 First line should end with a period, question mark, or exclamation point
|
14 | def f():
15 | """Here's a line ending in a semi colon;"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D415
16 | ...
|
= help: Add closing punctuation
D400_415.py:19:5: D415 [*] First line should end with a period, question mark, or exclamation point
|
18 | def f():
19 | """Here's a line ending with a whitespace """
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D415
20 | ...
|
= help: Add closing punctuation
Unsafe fix
16 16 | ...
17 17 |
18 18 | def f():
19 |- """Here's a line ending with a whitespace """
19 |+ """Here's a line ending with a whitespace. """
20 20 | ...

View File

@@ -1,37 +0,0 @@
---
source: crates/ruff_linter/src/rules/pydocstyle/mod.rs
---
D415.py:11:5: D415 First line should end with a period, question mark, or exclamation point
|
10 | def f():
11 | """Here's a line ending in a colon:"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D415
12 | ...
|
= help: Add closing punctuation
D415.py:15:5: D415 First line should end with a period, question mark, or exclamation point
|
14 | def f():
15 | """Here's a line ending in a semi colon;"""
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D415
16 | ...
|
= help: Add closing punctuation
D415.py:19:5: D415 [*] First line should end with a period, question mark, or exclamation point
|
18 | def f():
19 | """Here's a line ending with a whitespace """
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ D415
20 | ...
|
= help: Add closing punctuation
Unsafe fix
16 16 | ...
17 17 |
18 18 | def f():
19 |- """Here's a line ending with a whitespace """
19 |+ """Here's a line ending with a whitespace. """
20 20 | ...

View File

@@ -11,8 +11,7 @@ use ruff_text_size::{Ranged, TextLen};
/// the string to a slice after checking `.startswith()` or `.endswith()`, respectively.
///
/// ## Why is this bad?
/// The methods [`str.removeprefix`](https://docs.python.org/3/library/stdtypes.html#str.removeprefix)
/// and [`str.removesuffix`](https://docs.python.org/3/library/stdtypes.html#str.removesuffix),
/// The methods [`str.removeprefix`] and [`str.removesuffix`],
/// introduced in Python 3.9, have the same behavior
/// and are more readable and efficient.
///
@@ -34,6 +33,9 @@ use ruff_text_size::{Ranged, TextLen};
/// ```python
/// text = text.removeprefix("pre")
/// ```
///
/// [`str.removeprefix`]: https://docs.python.org/3/library/stdtypes.html#str.removeprefix
/// [`str.removesuffix`]: https://docs.python.org/3/library/stdtypes.html#str.removesuffix
#[violation]
pub struct SliceToRemovePrefixOrSuffix {
string: String,
@@ -246,27 +248,6 @@ fn affix_removal_data<'a>(
return None;
}
let slice = slice.as_slice_expr()?;
// Exit early if slice step is...
if slice
.step
.as_deref()
// present and
.is_some_and(|step| match step {
// not equal to 1
ast::Expr::NumberLiteral(ast::ExprNumberLiteral {
value: ast::Number::Int(x),
..
}) => x.as_u8() != Some(1),
// and not equal to `None` or `True`
ast::Expr::NoneLiteral(_)
| ast::Expr::BooleanLiteral(ast::ExprBooleanLiteral { value: true, .. }) => false,
_ => true,
})
{
return None;
};
let compr_test_expr = ast::comparable::ComparableExpr::from(
&test.as_call_expr()?.func.as_attribute_expr()?.value,
);

View File

@@ -166,8 +166,6 @@ FURB188.py:154:12: FURB188 [*] Prefer `removesuffix` over conditionally replacin
153 |
154 | return filename[:-builtins_len(extension)] if filename.endswith(extension) else filename
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ FURB188
155 |
156 | def okay_steps():
|
= help: Use removesuffix instead of ternary expression conditional upon endswith.
@@ -177,77 +175,3 @@ FURB188.py:154:12: FURB188 [*] Prefer `removesuffix` over conditionally replacin
153 153 |
154 |- return filename[:-builtins_len(extension)] if filename.endswith(extension) else filename
154 |+ return filename.removesuffix(extension)
155 155 |
156 156 | def okay_steps():
157 157 | text = "!x!y!z"
FURB188.py:158:5: FURB188 [*] Prefer `removeprefix` over conditionally replacing with slice.
|
156 | def okay_steps():
157 | text = "!x!y!z"
158 | if text.startswith("!"):
| _____^
159 | | text = text[1::1]
| |_________________________^ FURB188
160 | if text.startswith("!"):
161 | text = text[1::True]
|
= help: Use removeprefix instead of assignment conditional upon startswith.
Safe fix
155 155 |
156 156 | def okay_steps():
157 157 | text = "!x!y!z"
158 |- if text.startswith("!"):
159 |- text = text[1::1]
158 |+ text = text.removeprefix("!")
160 159 | if text.startswith("!"):
161 160 | text = text[1::True]
162 161 | if text.startswith("!"):
FURB188.py:160:5: FURB188 [*] Prefer `removeprefix` over conditionally replacing with slice.
|
158 | if text.startswith("!"):
159 | text = text[1::1]
160 | if text.startswith("!"):
| _____^
161 | | text = text[1::True]
| |____________________________^ FURB188
162 | if text.startswith("!"):
163 | text = text[1::None]
|
= help: Use removeprefix instead of assignment conditional upon startswith.
Safe fix
157 157 | text = "!x!y!z"
158 158 | if text.startswith("!"):
159 159 | text = text[1::1]
160 |- if text.startswith("!"):
161 |- text = text[1::True]
160 |+ text = text.removeprefix("!")
162 161 | if text.startswith("!"):
163 162 | text = text[1::None]
164 163 | print(text)
FURB188.py:162:5: FURB188 [*] Prefer `removeprefix` over conditionally replacing with slice.
|
160 | if text.startswith("!"):
161 | text = text[1::True]
162 | if text.startswith("!"):
| _____^
163 | | text = text[1::None]
| |____________________________^ FURB188
164 | print(text)
|
= help: Use removeprefix instead of assignment conditional upon startswith.
Safe fix
159 159 | text = text[1::1]
160 160 | if text.startswith("!"):
161 161 | text = text[1::True]
162 |- if text.startswith("!"):
163 |- text = text[1::None]
162 |+ text = text.removeprefix("!")
164 163 | print(text)
165 164 |
166 165 |

View File

@@ -61,7 +61,7 @@ pub struct MissingFStringSyntax;
impl AlwaysFixableViolation for MissingFStringSyntax {
#[derive_message_formats]
fn message(&self) -> String {
format!(r"Possible f-string without an `f` prefix")
format!(r#"Possible f-string without an `f` prefix"#)
}
fn fix_title(&self) -> String {

View File

@@ -285,7 +285,7 @@ impl Display for LinterSettings {
self.target_version | debug,
self.preview,
self.explicit_preview_rules,
self.extension | debug,
self.extension | nested,
self.allowed_confusables | array,
self.builtins | array,

View File

@@ -478,31 +478,46 @@ impl From<ExtensionPair> for (String, Language) {
(value.extension, value.language)
}
}
#[derive(Debug, Clone, Default, CacheKey)]
pub struct ExtensionMapping(FxHashMap<String, Language>);
pub struct ExtensionMapping {
mapping: FxHashMap<String, Language>,
}
impl ExtensionMapping {
/// Return the [`Language`] for the given file.
pub fn get(&self, path: &Path) -> Option<Language> {
let ext = path.extension()?.to_str()?;
self.0.get(ext).copied()
self.mapping.get(ext).copied()
}
}
impl Display for ExtensionMapping {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
display_settings! {
formatter = f,
namespace = "linter.extension",
fields = [
self.mapping | debug
]
}
Ok(())
}
}
impl From<FxHashMap<String, Language>> for ExtensionMapping {
fn from(value: FxHashMap<String, Language>) -> Self {
Self(value)
Self { mapping: value }
}
}
impl FromIterator<ExtensionPair> for ExtensionMapping {
fn from_iter<T: IntoIterator<Item = ExtensionPair>>(iter: T) -> Self {
Self(
iter.into_iter()
Self {
mapping: iter
.into_iter()
.map(|pair| (pair.extension, pair.language))
.collect(),
)
}
}
}

View File

@@ -82,6 +82,30 @@ func([1, 2, 3,], bar)
func([(x, y,) for (x, y) in z], bar)
# Ensure that return type annotations (which use `parenthesize_if_expands`) are also hugged.
def func() -> [1, 2, 3,]:
pass
def func() -> ([1, 2, 3,]):
pass
def func() -> ([1, 2, 3,]):
pass
def func() -> ( # comment
[1, 2, 3,]):
pass
def func() -> (
[1, 2, 3,] # comment
):
pass
def func() -> (
[1, 2, 3,]
# comment
):
pass
# Ensure that nested lists are hugged.
func([

View File

@@ -458,108 +458,3 @@ def foo(x: S) -> S: ...
@decorator # comment
def foo(x: S) -> S: ...
# Regression tests for https://github.com/astral-sh/ruff/issues/13369
def foo(
arg: ( # comment with non-return annotation
int
# comment with non-return annotation
),
):
pass
def foo(
arg: ( # comment with non-return annotation
int
| range
| memoryview
# comment with non-return annotation
),
):
pass
def foo(arg: (
int
# only after
)):
pass
# Asserts that "incorrectly" placed comments don't *move* by fixing https://github.com/astral-sh/ruff/issues/13369
def foo(
# comment with non-return annotation
# comment with non-return annotation
arg: (int),
):
pass
# Comments between *args and **kwargs
def args_no_type_annotation(*
# comment
args): pass
def args_type_annotation(*
# comment
args: int): pass
def args_trailing_end_of_line_comment(* # comment
args): pass
def args_blank_line_comment(*
# comment
args): pass
def args_with_leading_parameter_comment(
# What comes next are arguments
*
# with an inline comment
args): pass
def kargs_no_type_annotation(**
# comment
kwargs): pass
def kwargs_type_annotation(**
# comment
kwargs: int): pass
def args_many_comments(
# before
*
# between * and name
args # trailing args
# after name
): pass
def args_many_comments_with_type_annotation(
# before
*
# between * and name
args # trailing args
# before colon
: # after colon
# before type
int # trailing type
# after type
): pass
def args_with_type_annotations_no_after_colon_comment(
# before
*
# between * and name
args # trailing args
# before colon
:
# before type
int # trailing type
# after type
): pass

View File

@@ -1,176 +0,0 @@
# Tests for functions without parameters or a dangling comment
# Black's overall behavior is to:
# 1. Print the return type on the same line as the function header if it fits
# 2. Parenthesize the return type if it doesn't fit.
# The exception to this are subscripts, see below
#########################################################################################
# Return types that use NeedsParantheses::BestFit layout with the exception of subscript
#########################################################################################
# String return type that fits on the same line
def no_parameters_string_return_type() -> "ALongIdentifierButDoesntGetParenthesized":
pass
# String return type that exceeds the line length
def no_parameters_overlong_string_return_type() -> (
"ALongIdentifierButDoesntGetParenthesized"
):
pass
# Name return type that fits on the same line as the function header
def no_parameters_name_return_type() -> ALongIdentifierButDoesntGetParenthesized:
pass
# Name return type that exceeds the configured line width
def no_parameters_overlong_name_return_type() -> (
ALongIdentifierButDoesntGetParenthesized
):
pass
#########################################################################################
# Unions
#########################################################################################
def test_return_overlong_union() -> (
A | B | C | DDDDDDDDDDDDDDDDDDDDDDDD | EEEEEEEEEEEEEEEEEEEEEE
):
pass
def test_return_union_with_elements_exceeding_length() -> (
A
| B
| Ccccccccccccccccccccccccccccccccc
| DDDDDDDDDDDDDDDDDDDDDDDD
| EEEEEEEEEEEEEEEEEEEEEE
):
pass
#########################################################################################
# Multiline strings (NeedsParentheses::Never)
#########################################################################################
def test_return_multiline_string_type_annotation() -> """str
| list[str]
""":
pass
def test_return_multiline_string_binary_expression_return_type_annotation() -> """str
| list[str]
""" + "b":
pass
#########################################################################################
# Implicit concatenated strings (NeedsParentheses::Multiline)
#########################################################################################
def test_implicit_concatenated_string_return_type() -> "str" "bbbbbbbbbbbbbbbb":
pass
def test_overlong_implicit_concatenated_string_return_type() -> (
"liiiiiiiiiiiisssssst[str]" "bbbbbbbbbbbbbbbb"
):
pass
def test_extralong_implicit_concatenated_string_return_type() -> (
"liiiiiiiiiiiisssssst[str]"
"bbbbbbbbbbbbbbbbbbbb"
"cccccccccccccccccccccccccccccccccccccc"
):
pass
#########################################################################################
# Subscript
#########################################################################################
def no_parameters_subscript_return_type() -> list[str]:
pass
# 1. Black tries to keep the list flat by parenthesizing the list as shown below even when the `list` identifier
# fits on the header line. IMO, this adds unnecessary parentheses that can be avoided
# and supporting it requires extra complexity (best_fitting! layout)
def no_parameters_overlong_subscript_return_type_with_single_element() -> (
list[xxxxxxxxxxxxxxxxxxxxx]
):
pass
# 2. Black: Removes the parentheses when the subscript fits after breaking individual elements.
# This is somewhat wasteful because the below list actually fits on a single line when splitting after
# `list[`. It is also inconsistent with how subscripts are normally formatted where it first tries to fit the entire subscript,
# then splits after `list[` but keeps all elements on a single line, and finally, splits after each element.
# IMO: Splitting after the `list[` and trying to keep the elements together when possible seems more consistent.
def no_parameters_subscript_return_type_multiple_elements() -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Black removes the parentheses even the elements exceed the configured line width.
# So does Ruff.
def no_parameters_subscript_return_type_multiple_overlong_elements() -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Black parenthesizes the subscript if its name doesn't fit on the header line.
# So does Ruff
def no_parameters_subscriptreturn_type_with_overlong_value_() -> (
liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]
):
pass
# Black: It removes the parentheses when the subscript contains multiple elements as
# `no_parameters_subscript_return_type_multiple_overlong_elements` shows. However, it doesn't
# when the subscript contains a single element. Black then keeps the parentheses.
# Ruff removes the parentheses in this case for consistency.
def no_parameters_overlong_subscript_return_type_with_overlong_single_element() -> (
list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]
):
pass
#########################################################################################
# can_omit_optional_parentheses_layout
#########################################################################################
def test_binary_expression_return_type_annotation() -> aaaaaaaaaaaaaaaaaaaaaaaaaa > [
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbb,
]:
pass
#########################################################################################
# Other
#########################################################################################
# Don't paranthesize lists
def f() -> [
a,
b,
]: pass

View File

@@ -1,195 +0,0 @@
# Tests for functions with parameters.
# The main difference to functions without parameters is that the return type never gets
# parenthesized for values that can't be split (NeedsParentheses::BestFit).
#########################################################################################
# Return types that use NeedsParantheses::BestFit layout with the exception of subscript
#########################################################################################
# String return type that fits on the same line
def parameters_string_return_type(a) -> "ALongIdentifierButDoesntGetParenthesized":
pass
# String return type that exceeds the line length
def parameters_overlong_string_return_type(
a,
) -> "ALongIdentifierButDoesntGetParenthesized":
pass
# Name return type that fits on the same line as the function header
def parameters_name_return_type(a) -> ALongIdentifierButDoesntGetParenthesized:
pass
# Name return type that exceeds the configured line width
def parameters_overlong_name_return_type(
a,
) -> ALongIdentifierButDoesntGetParenthesized:
pass
#########################################################################################
# Unions
#########################################################################################
def test_return_overlong_union(
a,
) -> A | B | C | DDDDDDDDDDDDDDDDDDDDDDDD | EEEEEEEEEEEEEEEEEEEEEE:
pass
def test_return_union_with_elements_exceeding_length(
a,
) -> (
A
| B
| Ccccccccccccccccccccccccccccccccc
| DDDDDDDDDDDDDDDDDDDDDDDD
| EEEEEEEEEEEEEEEEEEEEEE
):
pass
#########################################################################################
# Multiline stirngs (NeedsParentheses::Never)
#########################################################################################
def test_return_multiline_string_type_annotation(a) -> """str
| list[str]
""":
pass
def test_return_multiline_string_binary_expression_return_type_annotation(a) -> """str
| list[str]
""" + "b":
pass
#########################################################################################
# Implicit concatenated strings (NeedsParentheses::Multiline)
#########################################################################################
def test_implicit_concatenated_string_return_type(a) -> "str" "bbbbbbbbbbbbbbbb":
pass
def test_overlong_implicit_concatenated_string_return_type(
a,
) -> "liiiiiiiiiiiisssssst[str]" "bbbbbbbbbbbbbbbb":
pass
def test_extralong_implicit_concatenated_string_return_type(
a,
) -> (
"liiiiiiiiiiiisssssst[str]"
"bbbbbbbbbbbbbbbbbbbb"
"cccccccccccccccccccccccccccccccccccccc"
):
pass
#########################################################################################
# Subscript
#########################################################################################
def parameters_subscript_return_type(a) -> list[str]:
pass
# Unlike with no-parameters, the return type gets never parenthesized.
def parameters_overlong_subscript_return_type_with_single_element(
a
) -> list[xxxxxxxxxxxxxxxxxxxxx]:
pass
def parameters_subscript_return_type_multiple_elements(a) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_subscript_return_type_multiple_overlong_elements(a) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_subscriptreturn_type_with_overlong_value_(
a
) -> liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_overlong_subscript_return_type_with_overlong_single_element(
a
) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Not even in this very ridiculous case
def a():
def b():
def c():
def d():
def e():
def f():
def g():
def h():
def i():
def j():
def k():
def l():
def m():
def n():
def o():
def p():
def q():
def r():
def s():
def t():
def u():
def thiiiiiiiiiiiiiiiiiis_iiiiiiiiiiiiiiiiiiiiiiiiiiiiiis_veeeeeeeeeeedooooong(
a,
) -> list[
int,
float
]: ...
#########################################################################################
# Magic comma in return type
#########################################################################################
# Black only splits the return type. Ruff also breaks the parameters. This is probably a bug.
def parameters_subscriptreturn_type_with_overlong_value_(a) -> liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]:
pass
#########################################################################################
# can_omit_optional_parentheses_layout
#########################################################################################
def test_return_multiline_string_binary_expression_return_type_annotation(
a,
) -> aaaaaaaaaaaaaaaaaaaaaaaaaa > [
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbb,
]:
pass

View File

@@ -2,12 +2,10 @@ use std::cmp::Ordering;
use ast::helpers::comment_indentation_after;
use ruff_python_ast::whitespace::indentation;
use ruff_python_ast::{
self as ast, AnyNodeRef, Comprehension, Expr, ModModule, Parameter, Parameters,
};
use ruff_python_ast::{self as ast, AnyNodeRef, Comprehension, Expr, ModModule, Parameters};
use ruff_python_trivia::{
find_only_token_in_range, first_non_trivia_token, indentation_at_offset, BackwardsTokenizer,
CommentRanges, SimpleToken, SimpleTokenKind, SimpleTokenizer,
find_only_token_in_range, indentation_at_offset, BackwardsTokenizer, CommentRanges,
SimpleToken, SimpleTokenKind, SimpleTokenizer,
};
use ruff_source_file::Locator;
use ruff_text_size::{Ranged, TextLen, TextRange};
@@ -204,7 +202,14 @@ fn handle_enclosed_comment<'a>(
}
})
}
AnyNodeRef::Parameter(parameter) => handle_parameter_comment(comment, parameter, locator),
AnyNodeRef::Parameter(parameter) => {
// E.g. a comment between the `*` or `**` and the parameter name.
if comment.preceding_node().is_none() || comment.following_node().is_none() {
CommentPlacement::leading(parameter, comment)
} else {
CommentPlacement::Default(comment)
}
}
AnyNodeRef::Arguments(_) | AnyNodeRef::TypeParams(_) | AnyNodeRef::PatternArguments(_) => {
handle_bracketed_end_of_line_comment(comment, locator)
}
@@ -755,41 +760,6 @@ fn handle_parameters_separator_comment<'a>(
CommentPlacement::Default(comment)
}
/// Associate comments that come before the `:` starting the type annotation or before the
/// parameter's name for unannotated parameters as leading parameter-comments.
///
/// The parameter's name isn't a node to which comments can be associated.
/// That's why we pull out all comments that come before the expression name or the type annotation
/// and make them leading parameter comments. For example:
/// * `* # comment\nargs`
/// * `arg # comment\n : int`
///
/// Associate comments with the type annotation when possible.
fn handle_parameter_comment<'a>(
comment: DecoratedComment<'a>,
parameter: &'a Parameter,
locator: &Locator,
) -> CommentPlacement<'a> {
if parameter.annotation.as_deref().is_some() {
let colon = first_non_trivia_token(parameter.name.end(), locator.contents()).expect(
"A annotated parameter should have a colon following its name when it is valid syntax.",
);
assert_eq!(colon.kind(), SimpleTokenKind::Colon);
if comment.start() < colon.start() {
// The comment is before the colon, pull it out and make it a leading comment of the parameter.
CommentPlacement::leading(parameter, comment)
} else {
CommentPlacement::Default(comment)
}
} else if comment.start() < parameter.name.start() {
CommentPlacement::leading(parameter, comment)
} else {
CommentPlacement::Default(comment)
}
}
/// Handles comments between the left side and the operator of a binary expression (trailing comments of the left),
/// and trailing end-of-line comments that are on the same line as the operator.
///

View File

@@ -8,7 +8,6 @@ use crate::expression::parentheses::{
};
use crate::expression::CallChainLayout;
use crate::prelude::*;
use crate::preview::is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled;
#[derive(Default)]
pub struct FormatExprSubscript {
@@ -104,25 +103,19 @@ impl NeedsParentheses for ExprSubscript {
} else {
match self.value.needs_parentheses(self.into(), context) {
OptionalParentheses::BestFit => {
if let Some(function) = parent.as_stmt_function_def() {
if function.returns.as_deref().is_some_and(|returns| {
AnyNodeRef::ptr_eq(returns.into(), self.into())
}) {
if is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled(context) &&
function.parameters.is_empty() && !context.comments().has(&*function.parameters) {
// Apply the `optional_parentheses` layout when the subscript
// is in a return type position of a function without parameters.
// This ensures the subscript is parenthesized if it has a very
// long name that goes over the line length limit.
return OptionalParentheses::Multiline
}
// Don't use the best fitting layout for return type annotation because it results in the
// return type expanding before the parameters.
return OptionalParentheses::Never;
}
if parent.as_stmt_function_def().is_some_and(|function_def| {
function_def
.returns
.as_deref()
.and_then(Expr::as_subscript_expr)
== Some(self)
}) {
// Don't use the best fitting layout for return type annotation because it results in the
// return type expanding before the parameters.
OptionalParentheses::Never
} else {
OptionalParentheses::BestFit
}
OptionalParentheses::BestFit
}
parentheses => parentheses,
}

View File

@@ -19,10 +19,7 @@ use crate::expression::parentheses::{
OptionalParentheses, Parentheses, Parenthesize,
};
use crate::prelude::*;
use crate::preview::{
is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled,
is_hug_parens_with_braces_and_square_brackets_enabled,
};
use crate::preview::is_hug_parens_with_braces_and_square_brackets_enabled;
mod binary_like;
pub(crate) mod expr_attribute;
@@ -327,7 +324,7 @@ fn format_with_parentheses_comments(
)
}
/// Wraps an expression in optional parentheses except if its [`NeedsParentheses::needs_parentheses`] implementation
/// Wraps an expression in an optional parentheses except if its [`NeedsParentheses::needs_parentheses`] implementation
/// indicates that it is okay to omit the parentheses. For example, parentheses can always be omitted for lists,
/// because they already bring their own parentheses.
pub(crate) fn maybe_parenthesize_expression<'a, T>(
@@ -385,38 +382,23 @@ impl Format<PyFormatContext<'_>> for MaybeParenthesizeExpression<'_> {
OptionalParentheses::Always => OptionalParentheses::Always,
// The reason to add parentheses is to avoid a syntax error when breaking an expression over multiple lines.
// Therefore, it is unnecessary to add an additional pair of parentheses if an outer expression
// is parenthesized. Unless, it's the `Parenthesize::IfBreaksParenthesizedNested` layout
// where parenthesizing nested `maybe_parenthesized_expression` is explicitly desired.
_ if f.context().node_level().is_parenthesized() => {
if !is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled(
f.context(),
) {
OptionalParentheses::Never
} else if matches!(parenthesize, Parenthesize::IfBreaksParenthesizedNested) {
return parenthesize_if_expands(
&expression.format().with_options(Parentheses::Never),
)
.with_indent(!is_expression_huggable(expression, f.context()))
.fmt(f);
} else {
return expression.format().with_options(Parentheses::Never).fmt(f);
}
}
// is parenthesized.
_ if f.context().node_level().is_parenthesized() => OptionalParentheses::Never,
needs_parentheses => needs_parentheses,
};
match needs_parentheses {
OptionalParentheses::Multiline => match parenthesize {
Parenthesize::IfBreaksParenthesized | Parenthesize::IfBreaksParenthesizedNested if !is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled(f.context()) => {
Parenthesize::IfBreaksOrIfRequired => {
parenthesize_if_expands(&expression.format().with_options(Parentheses::Never))
.fmt(f)
}
Parenthesize::IfRequired => {
expression.format().with_options(Parentheses::Never).fmt(f)
}
Parenthesize::Optional | Parenthesize::IfBreaks | Parenthesize::IfBreaksParenthesized | Parenthesize::IfBreaksParenthesizedNested => {
Parenthesize::Optional | Parenthesize::IfBreaks => {
if can_omit_optional_parentheses(expression, f.context()) {
optional_parentheses(&expression.format().with_options(Parentheses::Never))
.fmt(f)
@@ -429,7 +411,7 @@ impl Format<PyFormatContext<'_>> for MaybeParenthesizeExpression<'_> {
}
},
OptionalParentheses::BestFit => match parenthesize {
Parenthesize::IfBreaksParenthesized | Parenthesize::IfBreaksParenthesizedNested => {
Parenthesize::IfBreaksOrIfRequired => {
parenthesize_if_expands(&expression.format().with_options(Parentheses::Never))
.fmt(f)
}
@@ -453,13 +435,13 @@ impl Format<PyFormatContext<'_>> for MaybeParenthesizeExpression<'_> {
}
},
OptionalParentheses::Never => match parenthesize {
Parenthesize::IfBreaksParenthesized | Parenthesize::IfBreaksParenthesizedNested if !is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled(f.context()) => {
Parenthesize::IfBreaksOrIfRequired => {
parenthesize_if_expands(&expression.format().with_options(Parentheses::Never))
.with_indent(!is_expression_huggable(expression, f.context()))
.fmt(f)
}
Parenthesize::Optional | Parenthesize::IfBreaks | Parenthesize::IfRequired | Parenthesize::IfBreaksParenthesized | Parenthesize::IfBreaksParenthesizedNested => {
Parenthesize::Optional | Parenthesize::IfBreaks | Parenthesize::IfRequired => {
expression.format().with_options(Parentheses::Never).fmt(f)
}
},

View File

@@ -56,15 +56,10 @@ pub(crate) enum Parenthesize {
/// Adding parentheses is desired to prevent the comments from wandering.
IfRequired,
/// Same as [`Self::IfBreaks`] except that it uses [`parenthesize_if_expands`] for expressions
/// with the layout [`NeedsParentheses::BestFit`] which is used by non-splittable
/// expressions like literals, name, and strings.
IfBreaksParenthesized,
/// Same as [`Self::IfBreaksParenthesized`] but uses [`parenthesize_if_expands`] for nested
/// [`maybe_parenthesized_expression`] calls unlike other layouts that always omit parentheses
/// when outer parentheses are present.
IfBreaksParenthesizedNested,
/// Parenthesizes the expression if the group doesn't fit on a line (e.g., even name expressions are parenthesized), or if
/// the expression doesn't break, but _does_ reports that it always requires parentheses in this position (e.g., walrus
/// operators in function return annotations).
IfBreaksOrIfRequired,
}
impl Parenthesize {
@@ -421,25 +416,27 @@ impl Format<PyFormatContext<'_>> for FormatEmptyParenthesized<'_> {
debug_assert!(self.comments[end_of_line_split..]
.iter()
.all(|comment| comment.line_position().is_own_line()));
group(&format_args![
token(self.left),
// end-of-line comments
trailing_comments(&self.comments[..end_of_line_split]),
// Avoid unstable formatting with
// ```python
// x = () - (#
// )
// ```
// Without this the comment would go after the empty tuple first, but still expand
// the bin op. In the second formatting pass they are trailing bin op comments
// so the bin op collapse. Suboptimally we keep parentheses around the bin op in
// either case.
(!self.comments[..end_of_line_split].is_empty()).then_some(hard_line_break()),
// own line comments, which need to be indented
soft_block_indent(&dangling_comments(&self.comments[end_of_line_split..])),
token(self.right)
])
.fmt(f)
write!(
f,
[group(&format_args![
token(self.left),
// end-of-line comments
trailing_comments(&self.comments[..end_of_line_split]),
// Avoid unstable formatting with
// ```python
// x = () - (#
// )
// ```
// Without this the comment would go after the empty tuple first, but still expand
// the bin op. In the second formatting pass they are trailing bin op comments
// so the bin op collapse. Suboptimally we keep parentheses around the bin op in
// either case.
(!self.comments[..end_of_line_split].is_empty()).then_some(hard_line_break()),
// own line comments, which need to be indented
soft_block_indent(&dangling_comments(&self.comments[end_of_line_split..])),
token(self.right)
])]
)
}
}

View File

@@ -1,7 +1,8 @@
use crate::expression::parentheses::is_expression_parenthesized;
use crate::prelude::*;
use ruff_formatter::write;
use ruff_python_ast::Parameter;
use crate::prelude::*;
#[derive(Default)]
pub struct FormatParameter;
@@ -15,22 +16,8 @@ impl FormatNodeRule<Parameter> for FormatParameter {
name.format().fmt(f)?;
if let Some(annotation) = annotation.as_deref() {
token(":").fmt(f)?;
if f.context().comments().has_leading(annotation)
&& !is_expression_parenthesized(
annotation.into(),
f.context().comments().ranges(),
f.context().source(),
)
{
hard_line_break().fmt(f)?;
} else {
space().fmt(f)?;
}
annotation.format().fmt(f)?;
if let Some(annotation) = annotation {
write!(f, [token(":"), space(), annotation.format()])?;
}
Ok(())

View File

@@ -112,7 +112,7 @@ impl FormatNodeRule<WithItem> for FormatWithItem {
maybe_parenthesize_expression(
context_expr,
item,
Parenthesize::IfBreaksParenthesizedNested,
Parenthesize::IfBreaksOrIfRequired,
)
.fmt(f)?;
} else {

View File

@@ -29,10 +29,3 @@ pub(crate) fn is_comprehension_leading_expression_comments_same_line_enabled(
) -> bool {
context.is_preview()
}
/// See [#9447](https://github.com/astral-sh/ruff/issues/9447)
pub(crate) fn is_empty_parameters_no_unnecessary_parentheses_around_return_value_enabled(
context: &PyFormatContext,
) -> bool {
context.is_preview()
}

View File

@@ -1,3 +1,6 @@
use ruff_formatter::write;
use ruff_python_ast::{NodeKind, StmtFunctionDef};
use crate::comments::format::{
empty_lines_after_leading_comments, empty_lines_before_trailing_comments,
};
@@ -7,8 +10,6 @@ use crate::prelude::*;
use crate::statement::clause::{clause_body, clause_header, ClauseHeader};
use crate::statement::stmt_class_def::FormatDecorators;
use crate::statement::suite::SuiteKind;
use ruff_formatter::write;
use ruff_python_ast::{NodeKind, StmtFunctionDef};
#[derive(Default)]
pub struct FormatStmtFunctionDef;
@@ -111,23 +112,23 @@ fn format_function_header(f: &mut PyFormatter, item: &StmtFunctionDef) -> Format
write!(f, [token("def"), space(), name.format()])?;
if let Some(type_params) = type_params.as_ref() {
type_params.format().fmt(f)?;
write!(f, [type_params.format()])?;
}
let format_inner = format_with(|f: &mut PyFormatter| {
parameters.format().fmt(f)?;
write!(f, [parameters.format()])?;
if let Some(return_annotation) = returns.as_deref() {
if let Some(return_annotation) = returns.as_ref() {
write!(f, [space(), token("->"), space()])?;
if return_annotation.is_tuple_expr() {
let parentheses = if comments.has_leading(return_annotation) {
let parentheses = if comments.has_leading(return_annotation.as_ref()) {
Parentheses::Always
} else {
Parentheses::Never
};
return_annotation.format().with_options(parentheses).fmt(f)
} else if comments.has_trailing(return_annotation) {
write!(f, [return_annotation.format().with_options(parentheses)])?;
} else if comments.has_trailing(return_annotation.as_ref()) {
// Intentionally parenthesize any return annotations with trailing comments.
// This avoids an instability in cases like:
// ```python
@@ -155,17 +156,15 @@ fn format_function_header(f: &mut PyFormatter, item: &StmtFunctionDef) -> Format
// requires that the parent be aware of how the child is formatted, which
// is challenging. As a compromise, we break those expressions to avoid an
// instability.
return_annotation
.format()
.with_options(Parentheses::Always)
.fmt(f)
write!(
f,
[return_annotation.format().with_options(Parentheses::Always)]
)?;
} else {
let parenthesize = if parameters.is_empty() && !comments.has(parameters.as_ref()) {
// If the parameters are empty, add parentheses around literal expressions
// (any non splitable expression) but avoid parenthesizing subscripts and
// other parenthesized expressions unless necessary.
Parenthesize::IfBreaksParenthesized
// If the parameters are empty, add parentheses if the return annotation
// breaks at all.
Parenthesize::IfBreaksOrIfRequired
} else {
// Otherwise, use our normal rules for parentheses, which allows us to break
// like:
@@ -180,11 +179,17 @@ fn format_function_header(f: &mut PyFormatter, item: &StmtFunctionDef) -> Format
// ```
Parenthesize::IfBreaks
};
maybe_parenthesize_expression(return_annotation, item, parenthesize).fmt(f)
write!(
f,
[maybe_parenthesize_expression(
return_annotation,
item,
parenthesize
)]
)?;
}
} else {
Ok(())
}
Ok(())
});
group(&format_inner).fmt(f)

View File

@@ -62,7 +62,7 @@ impl Transformer for Normalizer {
fn visit_string_literal(&self, string_literal: &mut ast::StringLiteral) {
static STRIP_DOC_TESTS: Lazy<Regex> = Lazy::new(|| {
Regex::new(
r"(?mx)
r#"(?mx)
(
# strip doctest PS1 prompt lines
^\s*>>>\s.*(\n|$)
@@ -71,7 +71,7 @@ impl Transformer for Normalizer {
# Also handles the case of an empty ... line.
^\s*\.\.\.((\n|$)|\s.*(\n|$))
)+
",
"#,
)
.unwrap()
});
@@ -80,11 +80,11 @@ impl Transformer for Normalizer {
// impossible) to detect a reStructuredText block with a simple
// regex. So we just look for the start of a block and remove
// everything after it. Talk about a hammer.
Regex::new(r"::(?s:.*)").unwrap()
Regex::new(r#"::(?s:.*)"#).unwrap()
});
static STRIP_MARKDOWN_BLOCKS: Lazy<Regex> = Lazy::new(|| {
// This covers more than valid Markdown blocks, but that's OK.
Regex::new(r"(```|~~~)\p{any}*(```|~~~|$)").unwrap()
Regex::new(r#"(```|~~~)\p{any}*(```|~~~|$)"#).unwrap()
});
// Start by (1) stripping everything that looks like a code

View File

@@ -155,7 +155,20 @@ def SimplePyFn(
```diff
--- Black
+++ Ruff
@@ -36,7 +36,9 @@
@@ -29,14 +29,18 @@
# magic trailing comma in return type, no params
-def a() -> tuple[
- a,
- b,
-]: ...
+def a() -> (
+ tuple[
+ a,
+ b,
+ ]
+): ...
# magic trailing comma in return type, params
@@ -166,7 +179,26 @@ def SimplePyFn(
p,
q,
]:
@@ -93,7 +95,11 @@
@@ -68,11 +72,13 @@
# long return type, no param list
-def foo() -> list[
- Loooooooooooooooooooooooooooooooooooong,
- Loooooooooooooooooooong,
- Looooooooooooong,
-]: ...
+def foo() -> (
+ list[
+ Loooooooooooooooooooooooooooooooooooong,
+ Loooooooooooooooooooong,
+ Looooooooooooong,
+ ]
+): ...
# long function name, no param list, no return value
@@ -93,7 +99,11 @@
# unskippable type hint (??)
@@ -179,7 +211,7 @@ def SimplePyFn(
pass
@@ -112,7 +118,13 @@
@@ -112,7 +122,13 @@
# don't lose any comments (no magic)
@@ -194,7 +226,7 @@ def SimplePyFn(
... # 6
@@ -120,12 +132,18 @@
@@ -120,12 +136,18 @@
def foo( # 1
a, # 2
b,
@@ -251,10 +283,12 @@ def foo(
# magic trailing comma in return type, no params
def a() -> tuple[
a,
b,
]: ...
def a() -> (
tuple[
a,
b,
]
): ...
# magic trailing comma in return type, params
@@ -292,11 +326,13 @@ def aaaaaaaaaaaaaaaaa(
# long return type, no param list
def foo() -> list[
Loooooooooooooooooooooooooooooooooooong,
Loooooooooooooooooooong,
Looooooooooooong,
]: ...
def foo() -> (
list[
Loooooooooooooooooooooooooooooooooooong,
Loooooooooooooooooooong,
Looooooooooooong,
]
): ...
# long function name, no param list, no return value
@@ -556,3 +592,5 @@ def SimplePyFn(
Buffer[UInt8, 2],
]: ...
```

View File

@@ -142,29 +142,53 @@ variable: (
): ...
@@ -153,16 +151,18 @@
@@ -143,34 +141,31 @@
def foo(
arg: ( # comment with non-return annotation
- arg: ( # comment with non-return annotation
- int
- # comment with non-return annotation
- ),
+ # comment with non-return annotation
+ # comment with non-return annotation
+ arg: (int),
):
pass
def foo(
- arg: ( # comment with non-return annotation
- int
- | range
- | memoryview
+ int | range | memoryview
# comment with non-return annotation
),
- # comment with non-return annotation
- ),
+ # comment with non-return annotation
+ # comment with non-return annotation
+ arg: (int | range | memoryview),
):
pass
-def foo(arg: int): # only before
+def foo(
+ arg: ( # only before
+ int
+ ),
+ # only before
+ arg: (int),
+):
pass
def foo(
- arg: (
- int
- # only after
- ),
+ # only after
+ arg: (int),
):
pass
```
## Ruff Output
@@ -313,36 +337,31 @@ def foo() -> (
def foo(
arg: ( # comment with non-return annotation
int
# comment with non-return annotation
),
# comment with non-return annotation
# comment with non-return annotation
arg: (int),
):
pass
def foo(
arg: ( # comment with non-return annotation
int | range | memoryview
# comment with non-return annotation
),
# comment with non-return annotation
# comment with non-return annotation
arg: (int | range | memoryview),
):
pass
def foo(
arg: ( # only before
int
),
# only before
arg: (int),
):
pass
def foo(
arg: (
int
# only after
),
# only after
arg: (int),
):
pass

View File

@@ -88,6 +88,30 @@ func([1, 2, 3,], bar)
func([(x, y,) for (x, y) in z], bar)
# Ensure that return type annotations (which use `parenthesize_if_expands`) are also hugged.
def func() -> [1, 2, 3,]:
pass
def func() -> ([1, 2, 3,]):
pass
def func() -> ([1, 2, 3,]):
pass
def func() -> ( # comment
[1, 2, 3,]):
pass
def func() -> (
[1, 2, 3,] # comment
):
pass
def func() -> (
[1, 2, 3,]
# comment
):
pass
# Ensure that nested lists are hugged.
func([
@@ -305,6 +329,68 @@ func(
)
# Ensure that return type annotations (which use `parenthesize_if_expands`) are also hugged.
def func() -> (
[
1,
2,
3,
]
):
pass
def func() -> (
[
1,
2,
3,
]
):
pass
def func() -> (
[
1,
2,
3,
]
):
pass
def func() -> ( # comment
[
1,
2,
3,
]
):
pass
def func() -> (
[
1,
2,
3,
] # comment
):
pass
def func() -> (
[
1,
2,
3,
]
# comment
):
pass
# Ensure that nested lists are hugged.
func(
[
@@ -525,7 +611,56 @@ func(
foo(
# comment
@@ -167,56 +145,46 @@
@@ -167,33 +145,27 @@
# Ensure that return type annotations (which use `parenthesize_if_expands`) are also hugged.
-def func() -> (
- [
- 1,
- 2,
- 3,
- ]
-):
+def func() -> ([
+ 1,
+ 2,
+ 3,
+]):
pass
-def func() -> (
- [
- 1,
- 2,
- 3,
- ]
-):
+def func() -> ([
+ 1,
+ 2,
+ 3,
+]):
pass
-def func() -> (
- [
- 1,
- 2,
- 3,
- ]
-):
+def func() -> ([
+ 1,
+ 2,
+ 3,
+]):
pass
@@ -229,56 +201,46 @@
# Ensure that nested lists are hugged.
@@ -612,3 +747,6 @@ func(
-)
+])
```

View File

@@ -464,111 +464,6 @@ def foo(x: S) -> S: ...
@decorator # comment
def foo(x: S) -> S: ...
# Regression tests for https://github.com/astral-sh/ruff/issues/13369
def foo(
arg: ( # comment with non-return annotation
int
# comment with non-return annotation
),
):
pass
def foo(
arg: ( # comment with non-return annotation
int
| range
| memoryview
# comment with non-return annotation
),
):
pass
def foo(arg: (
int
# only after
)):
pass
# Asserts that "incorrectly" placed comments don't *move* by fixing https://github.com/astral-sh/ruff/issues/13369
def foo(
# comment with non-return annotation
# comment with non-return annotation
arg: (int),
):
pass
# Comments between *args and **kwargs
def args_no_type_annotation(*
# comment
args): pass
def args_type_annotation(*
# comment
args: int): pass
def args_trailing_end_of_line_comment(* # comment
args): pass
def args_blank_line_comment(*
# comment
args): pass
def args_with_leading_parameter_comment(
# What comes next are arguments
*
# with an inline comment
args): pass
def kargs_no_type_annotation(**
# comment
kwargs): pass
def kwargs_type_annotation(**
# comment
kwargs: int): pass
def args_many_comments(
# before
*
# between * and name
args # trailing args
# after name
): pass
def args_many_comments_with_type_annotation(
# before
*
# between * and name
args # trailing args
# before colon
: # after colon
# before type
int # trailing type
# after type
): pass
def args_with_type_annotations_no_after_colon_comment(
# before
*
# between * and name
args # trailing args
# before colon
:
# before type
int # trailing type
# after type
): pass
```
## Output
@@ -1194,130 +1089,6 @@ def foo(x: S) -> S: ...
@decorator # comment
def foo(x: S) -> S: ...
# Regression tests for https://github.com/astral-sh/ruff/issues/13369
def foo(
arg: ( # comment with non-return annotation
int
# comment with non-return annotation
),
):
pass
def foo(
arg: ( # comment with non-return annotation
int | range | memoryview
# comment with non-return annotation
),
):
pass
def foo(
arg: (
int
# only after
),
):
pass
# Asserts that "incorrectly" placed comments don't *move* by fixing https://github.com/astral-sh/ruff/issues/13369
def foo(
# comment with non-return annotation
# comment with non-return annotation
arg: (int),
):
pass
# Comments between *args and **kwargs
def args_no_type_annotation(
# comment
*args,
):
pass
def args_type_annotation(
# comment
*args: int,
):
pass
def args_trailing_end_of_line_comment(
# comment
*args,
):
pass
def args_blank_line_comment(
# comment
*args,
):
pass
def args_with_leading_parameter_comment(
# What comes next are arguments
# with an inline comment
*args,
):
pass
def kargs_no_type_annotation(
# comment
**kwargs,
):
pass
def kwargs_type_annotation(
# comment
**kwargs: int,
):
pass
def args_many_comments(
# before
# between * and name
*args, # trailing args
# after name
):
pass
def args_many_comments_with_type_annotation(
# before
# between * and name
# trailing args
# before colon
*args:
# after colon
# before type
int, # trailing type
# after type
):
pass
def args_with_type_annotations_no_after_colon_comment(
# before
# between * and name
# trailing args
# before colon
*args:
# before type
int, # trailing type
# after type
):
pass
```

View File

@@ -521,67 +521,4 @@ def process_board_action(
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -131,32 +131,24 @@
# Breaking return type annotations. Black adds parentheses if the parameters are
# empty; otherwise, it leverages the expressions own parentheses if possible.
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
- Set[
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- ]
-): ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+]: ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
- Set[
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- ]
-): ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+]: ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
- Set[
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- ]
-): ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+]: ...
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
- Set[
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- ]
-): ...
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[
+ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+]: ...
def xxxxxxxxxxxxxxxxxxxxxxxxxxxx(
@@ -257,11 +249,8 @@
): ...
-def double() -> (
- first_item
- and foo.bar.baz().bop(
- 1,
- )
+def double() -> first_item and foo.bar.baz().bop(
+ 1,
):
return 2 * a
```

View File

@@ -1,492 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/return_type_no_parameters.py
---
## Input
```python
# Tests for functions without parameters or a dangling comment
# Black's overall behavior is to:
# 1. Print the return type on the same line as the function header if it fits
# 2. Parenthesize the return type if it doesn't fit.
# The exception to this are subscripts, see below
#########################################################################################
# Return types that use NeedsParantheses::BestFit layout with the exception of subscript
#########################################################################################
# String return type that fits on the same line
def no_parameters_string_return_type() -> "ALongIdentifierButDoesntGetParenthesized":
pass
# String return type that exceeds the line length
def no_parameters_overlong_string_return_type() -> (
"ALongIdentifierButDoesntGetParenthesized"
):
pass
# Name return type that fits on the same line as the function header
def no_parameters_name_return_type() -> ALongIdentifierButDoesntGetParenthesized:
pass
# Name return type that exceeds the configured line width
def no_parameters_overlong_name_return_type() -> (
ALongIdentifierButDoesntGetParenthesized
):
pass
#########################################################################################
# Unions
#########################################################################################
def test_return_overlong_union() -> (
A | B | C | DDDDDDDDDDDDDDDDDDDDDDDD | EEEEEEEEEEEEEEEEEEEEEE
):
pass
def test_return_union_with_elements_exceeding_length() -> (
A
| B
| Ccccccccccccccccccccccccccccccccc
| DDDDDDDDDDDDDDDDDDDDDDDD
| EEEEEEEEEEEEEEEEEEEEEE
):
pass
#########################################################################################
# Multiline strings (NeedsParentheses::Never)
#########################################################################################
def test_return_multiline_string_type_annotation() -> """str
| list[str]
""":
pass
def test_return_multiline_string_binary_expression_return_type_annotation() -> """str
| list[str]
""" + "b":
pass
#########################################################################################
# Implicit concatenated strings (NeedsParentheses::Multiline)
#########################################################################################
def test_implicit_concatenated_string_return_type() -> "str" "bbbbbbbbbbbbbbbb":
pass
def test_overlong_implicit_concatenated_string_return_type() -> (
"liiiiiiiiiiiisssssst[str]" "bbbbbbbbbbbbbbbb"
):
pass
def test_extralong_implicit_concatenated_string_return_type() -> (
"liiiiiiiiiiiisssssst[str]"
"bbbbbbbbbbbbbbbbbbbb"
"cccccccccccccccccccccccccccccccccccccc"
):
pass
#########################################################################################
# Subscript
#########################################################################################
def no_parameters_subscript_return_type() -> list[str]:
pass
# 1. Black tries to keep the list flat by parenthesizing the list as shown below even when the `list` identifier
# fits on the header line. IMO, this adds unnecessary parentheses that can be avoided
# and supporting it requires extra complexity (best_fitting! layout)
def no_parameters_overlong_subscript_return_type_with_single_element() -> (
list[xxxxxxxxxxxxxxxxxxxxx]
):
pass
# 2. Black: Removes the parentheses when the subscript fits after breaking individual elements.
# This is somewhat wasteful because the below list actually fits on a single line when splitting after
# `list[`. It is also inconsistent with how subscripts are normally formatted where it first tries to fit the entire subscript,
# then splits after `list[` but keeps all elements on a single line, and finally, splits after each element.
# IMO: Splitting after the `list[` and trying to keep the elements together when possible seems more consistent.
def no_parameters_subscript_return_type_multiple_elements() -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Black removes the parentheses even the elements exceed the configured line width.
# So does Ruff.
def no_parameters_subscript_return_type_multiple_overlong_elements() -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Black parenthesizes the subscript if its name doesn't fit on the header line.
# So does Ruff
def no_parameters_subscriptreturn_type_with_overlong_value_() -> (
liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]
):
pass
# Black: It removes the parentheses when the subscript contains multiple elements as
# `no_parameters_subscript_return_type_multiple_overlong_elements` shows. However, it doesn't
# when the subscript contains a single element. Black then keeps the parentheses.
# Ruff removes the parentheses in this case for consistency.
def no_parameters_overlong_subscript_return_type_with_overlong_single_element() -> (
list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]
):
pass
#########################################################################################
# can_omit_optional_parentheses_layout
#########################################################################################
def test_binary_expression_return_type_annotation() -> aaaaaaaaaaaaaaaaaaaaaaaaaa > [
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbb,
]:
pass
#########################################################################################
# Other
#########################################################################################
# Don't paranthesize lists
def f() -> [
a,
b,
]: pass
```
## Output
```python
# Tests for functions without parameters or a dangling comment
# Black's overall behavior is to:
# 1. Print the return type on the same line as the function header if it fits
# 2. Parenthesize the return type if it doesn't fit.
# The exception to this are subscripts, see below
#########################################################################################
# Return types that use NeedsParantheses::BestFit layout with the exception of subscript
#########################################################################################
# String return type that fits on the same line
def no_parameters_string_return_type() -> "ALongIdentifierButDoesntGetParenthesized":
pass
# String return type that exceeds the line length
def no_parameters_overlong_string_return_type() -> (
"ALongIdentifierButDoesntGetParenthesized"
):
pass
# Name return type that fits on the same line as the function header
def no_parameters_name_return_type() -> ALongIdentifierButDoesntGetParenthesized:
pass
# Name return type that exceeds the configured line width
def no_parameters_overlong_name_return_type() -> (
ALongIdentifierButDoesntGetParenthesized
):
pass
#########################################################################################
# Unions
#########################################################################################
def test_return_overlong_union() -> (
A | B | C | DDDDDDDDDDDDDDDDDDDDDDDD | EEEEEEEEEEEEEEEEEEEEEE
):
pass
def test_return_union_with_elements_exceeding_length() -> (
A
| B
| Ccccccccccccccccccccccccccccccccc
| DDDDDDDDDDDDDDDDDDDDDDDD
| EEEEEEEEEEEEEEEEEEEEEE
):
pass
#########################################################################################
# Multiline strings (NeedsParentheses::Never)
#########################################################################################
def test_return_multiline_string_type_annotation() -> (
"""str
| list[str]
"""
):
pass
def test_return_multiline_string_binary_expression_return_type_annotation() -> (
"""str
| list[str]
"""
+ "b"
):
pass
#########################################################################################
# Implicit concatenated strings (NeedsParentheses::Multiline)
#########################################################################################
def test_implicit_concatenated_string_return_type() -> "str" "bbbbbbbbbbbbbbbb":
pass
def test_overlong_implicit_concatenated_string_return_type() -> (
"liiiiiiiiiiiisssssst[str]" "bbbbbbbbbbbbbbbb"
):
pass
def test_extralong_implicit_concatenated_string_return_type() -> (
"liiiiiiiiiiiisssssst[str]"
"bbbbbbbbbbbbbbbbbbbb"
"cccccccccccccccccccccccccccccccccccccc"
):
pass
#########################################################################################
# Subscript
#########################################################################################
def no_parameters_subscript_return_type() -> list[str]:
pass
# 1. Black tries to keep the list flat by parenthesizing the list as shown below even when the `list` identifier
# fits on the header line. IMO, this adds unnecessary parentheses that can be avoided
# and supporting it requires extra complexity (best_fitting! layout)
def no_parameters_overlong_subscript_return_type_with_single_element() -> (
list[xxxxxxxxxxxxxxxxxxxxx]
):
pass
# 2. Black: Removes the parentheses when the subscript fits after breaking individual elements.
# This is somewhat wasteful because the below list actually fits on a single line when splitting after
# `list[`. It is also inconsistent with how subscripts are normally formatted where it first tries to fit the entire subscript,
# then splits after `list[` but keeps all elements on a single line, and finally, splits after each element.
# IMO: Splitting after the `list[` and trying to keep the elements together when possible seems more consistent.
def no_parameters_subscript_return_type_multiple_elements() -> (
list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]
):
pass
# Black removes the parentheses even the elements exceed the configured line width.
# So does Ruff.
def no_parameters_subscript_return_type_multiple_overlong_elements() -> (
list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]
):
pass
# Black parenthesizes the subscript if its name doesn't fit on the header line.
# So does Ruff
def no_parameters_subscriptreturn_type_with_overlong_value_() -> (
liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]
):
pass
# Black: It removes the parentheses when the subscript contains multiple elements as
# `no_parameters_subscript_return_type_multiple_overlong_elements` shows. However, it doesn't
# when the subscript contains a single element. Black then keeps the parentheses.
# Ruff removes the parentheses in this case for consistency.
def no_parameters_overlong_subscript_return_type_with_overlong_single_element() -> (
list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]
):
pass
#########################################################################################
# can_omit_optional_parentheses_layout
#########################################################################################
def test_binary_expression_return_type_annotation() -> (
aaaaaaaaaaaaaaaaaaaaaaaaaa
> [
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbb,
]
):
pass
#########################################################################################
# Other
#########################################################################################
# Don't paranthesize lists
def f() -> (
[
a,
b,
]
):
pass
```
## Preview changes
```diff
--- Stable
+++ Preview
@@ -58,11 +58,9 @@
#########################################################################################
-def test_return_multiline_string_type_annotation() -> (
- """str
+def test_return_multiline_string_type_annotation() -> """str
| list[str]
-"""
-):
+""":
pass
@@ -108,9 +106,9 @@
# 1. Black tries to keep the list flat by parenthesizing the list as shown below even when the `list` identifier
# fits on the header line. IMO, this adds unnecessary parentheses that can be avoided
# and supporting it requires extra complexity (best_fitting! layout)
-def no_parameters_overlong_subscript_return_type_with_single_element() -> (
- list[xxxxxxxxxxxxxxxxxxxxx]
-):
+def no_parameters_overlong_subscript_return_type_with_single_element() -> list[
+ xxxxxxxxxxxxxxxxxxxxx
+]:
pass
@@ -119,23 +117,18 @@
# `list[`. It is also inconsistent with how subscripts are normally formatted where it first tries to fit the entire subscript,
# then splits after `list[` but keeps all elements on a single line, and finally, splits after each element.
# IMO: Splitting after the `list[` and trying to keep the elements together when possible seems more consistent.
-def no_parameters_subscript_return_type_multiple_elements() -> (
- list[
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
- ]
-):
+def no_parameters_subscript_return_type_multiple_elements() -> list[
+ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+]:
pass
# Black removes the parentheses even the elements exceed the configured line width.
# So does Ruff.
-def no_parameters_subscript_return_type_multiple_overlong_elements() -> (
- list[
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
- ]
-):
+def no_parameters_subscript_return_type_multiple_overlong_elements() -> list[
+ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
+ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
+]:
pass
@@ -154,11 +147,9 @@
# `no_parameters_subscript_return_type_multiple_overlong_elements` shows. However, it doesn't
# when the subscript contains a single element. Black then keeps the parentheses.
# Ruff removes the parentheses in this case for consistency.
-def no_parameters_overlong_subscript_return_type_with_overlong_single_element() -> (
- list[
- xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- ]
-):
+def no_parameters_overlong_subscript_return_type_with_overlong_single_element() -> list[
+ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+]:
pass
@@ -167,13 +158,10 @@
#########################################################################################
-def test_binary_expression_return_type_annotation() -> (
- aaaaaaaaaaaaaaaaaaaaaaaaaa
- > [
- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
- bbbbbbbbbbbbbbbbbbbbbbbbb,
- ]
-):
+def test_binary_expression_return_type_annotation() -> aaaaaaaaaaaaaaaaaaaaaaaaaa > [
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
+ bbbbbbbbbbbbbbbbbbbbbbbbb,
+]:
pass
@@ -183,10 +171,8 @@
# Don't paranthesize lists
-def f() -> (
- [
- a,
- b,
- ]
-):
+def f() -> [
+ a,
+ b,
+]:
pass
```

View File

@@ -1,414 +0,0 @@
---
source: crates/ruff_python_formatter/tests/fixtures.rs
input_file: crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/return_type_parameters.py
---
## Input
```python
# Tests for functions with parameters.
# The main difference to functions without parameters is that the return type never gets
# parenthesized for values that can't be split (NeedsParentheses::BestFit).
#########################################################################################
# Return types that use NeedsParantheses::BestFit layout with the exception of subscript
#########################################################################################
# String return type that fits on the same line
def parameters_string_return_type(a) -> "ALongIdentifierButDoesntGetParenthesized":
pass
# String return type that exceeds the line length
def parameters_overlong_string_return_type(
a,
) -> "ALongIdentifierButDoesntGetParenthesized":
pass
# Name return type that fits on the same line as the function header
def parameters_name_return_type(a) -> ALongIdentifierButDoesntGetParenthesized:
pass
# Name return type that exceeds the configured line width
def parameters_overlong_name_return_type(
a,
) -> ALongIdentifierButDoesntGetParenthesized:
pass
#########################################################################################
# Unions
#########################################################################################
def test_return_overlong_union(
a,
) -> A | B | C | DDDDDDDDDDDDDDDDDDDDDDDD | EEEEEEEEEEEEEEEEEEEEEE:
pass
def test_return_union_with_elements_exceeding_length(
a,
) -> (
A
| B
| Ccccccccccccccccccccccccccccccccc
| DDDDDDDDDDDDDDDDDDDDDDDD
| EEEEEEEEEEEEEEEEEEEEEE
):
pass
#########################################################################################
# Multiline stirngs (NeedsParentheses::Never)
#########################################################################################
def test_return_multiline_string_type_annotation(a) -> """str
| list[str]
""":
pass
def test_return_multiline_string_binary_expression_return_type_annotation(a) -> """str
| list[str]
""" + "b":
pass
#########################################################################################
# Implicit concatenated strings (NeedsParentheses::Multiline)
#########################################################################################
def test_implicit_concatenated_string_return_type(a) -> "str" "bbbbbbbbbbbbbbbb":
pass
def test_overlong_implicit_concatenated_string_return_type(
a,
) -> "liiiiiiiiiiiisssssst[str]" "bbbbbbbbbbbbbbbb":
pass
def test_extralong_implicit_concatenated_string_return_type(
a,
) -> (
"liiiiiiiiiiiisssssst[str]"
"bbbbbbbbbbbbbbbbbbbb"
"cccccccccccccccccccccccccccccccccccccc"
):
pass
#########################################################################################
# Subscript
#########################################################################################
def parameters_subscript_return_type(a) -> list[str]:
pass
# Unlike with no-parameters, the return type gets never parenthesized.
def parameters_overlong_subscript_return_type_with_single_element(
a
) -> list[xxxxxxxxxxxxxxxxxxxxx]:
pass
def parameters_subscript_return_type_multiple_elements(a) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_subscript_return_type_multiple_overlong_elements(a) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_subscriptreturn_type_with_overlong_value_(
a
) -> liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_overlong_subscript_return_type_with_overlong_single_element(
a
) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Not even in this very ridiculous case
def a():
def b():
def c():
def d():
def e():
def f():
def g():
def h():
def i():
def j():
def k():
def l():
def m():
def n():
def o():
def p():
def q():
def r():
def s():
def t():
def u():
def thiiiiiiiiiiiiiiiiiis_iiiiiiiiiiiiiiiiiiiiiiiiiiiiiis_veeeeeeeeeeedooooong(
a,
) -> list[
int,
float
]: ...
#########################################################################################
# Magic comma in return type
#########################################################################################
# Black only splits the return type. Ruff also breaks the parameters. This is probably a bug.
def parameters_subscriptreturn_type_with_overlong_value_(a) -> liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]:
pass
#########################################################################################
# can_omit_optional_parentheses_layout
#########################################################################################
def test_return_multiline_string_binary_expression_return_type_annotation(
a,
) -> aaaaaaaaaaaaaaaaaaaaaaaaaa > [
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbb,
]:
pass
```
## Output
```python
# Tests for functions with parameters.
# The main difference to functions without parameters is that the return type never gets
# parenthesized for values that can't be split (NeedsParentheses::BestFit).
#########################################################################################
# Return types that use NeedsParantheses::BestFit layout with the exception of subscript
#########################################################################################
# String return type that fits on the same line
def parameters_string_return_type(a) -> "ALongIdentifierButDoesntGetParenthesized":
pass
# String return type that exceeds the line length
def parameters_overlong_string_return_type(
a,
) -> "ALongIdentifierButDoesntGetParenthesized":
pass
# Name return type that fits on the same line as the function header
def parameters_name_return_type(a) -> ALongIdentifierButDoesntGetParenthesized:
pass
# Name return type that exceeds the configured line width
def parameters_overlong_name_return_type(
a,
) -> ALongIdentifierButDoesntGetParenthesized:
pass
#########################################################################################
# Unions
#########################################################################################
def test_return_overlong_union(
a,
) -> A | B | C | DDDDDDDDDDDDDDDDDDDDDDDD | EEEEEEEEEEEEEEEEEEEEEE:
pass
def test_return_union_with_elements_exceeding_length(
a,
) -> (
A
| B
| Ccccccccccccccccccccccccccccccccc
| DDDDDDDDDDDDDDDDDDDDDDDD
| EEEEEEEEEEEEEEEEEEEEEE
):
pass
#########################################################################################
# Multiline stirngs (NeedsParentheses::Never)
#########################################################################################
def test_return_multiline_string_type_annotation(
a,
) -> """str
| list[str]
""":
pass
def test_return_multiline_string_binary_expression_return_type_annotation(
a,
) -> (
"""str
| list[str]
"""
+ "b"
):
pass
#########################################################################################
# Implicit concatenated strings (NeedsParentheses::Multiline)
#########################################################################################
def test_implicit_concatenated_string_return_type(a) -> "str" "bbbbbbbbbbbbbbbb":
pass
def test_overlong_implicit_concatenated_string_return_type(
a,
) -> "liiiiiiiiiiiisssssst[str]" "bbbbbbbbbbbbbbbb":
pass
def test_extralong_implicit_concatenated_string_return_type(
a,
) -> (
"liiiiiiiiiiiisssssst[str]"
"bbbbbbbbbbbbbbbbbbbb"
"cccccccccccccccccccccccccccccccccccccc"
):
pass
#########################################################################################
# Subscript
#########################################################################################
def parameters_subscript_return_type(a) -> list[str]:
pass
# Unlike with no-parameters, the return type gets never parenthesized.
def parameters_overlong_subscript_return_type_with_single_element(
a,
) -> list[xxxxxxxxxxxxxxxxxxxxx]:
pass
def parameters_subscript_return_type_multiple_elements(
a,
) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_subscript_return_type_multiple_overlong_elements(
a,
) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]:
pass
def parameters_subscriptreturn_type_with_overlong_value_(
a,
) -> liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx, xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
def parameters_overlong_subscript_return_type_with_overlong_single_element(
a,
) -> list[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]:
pass
# Not even in this very ridiculous case
def a():
def b():
def c():
def d():
def e():
def f():
def g():
def h():
def i():
def j():
def k():
def l():
def m():
def n():
def o():
def p():
def q():
def r():
def s():
def t():
def u():
def thiiiiiiiiiiiiiiiiiis_iiiiiiiiiiiiiiiiiiiiiiiiiiiiiis_veeeeeeeeeeedooooong(
a,
) -> list[
int,
float,
]: ...
#########################################################################################
# Magic comma in return type
#########################################################################################
# Black only splits the return type. Ruff also breaks the parameters. This is probably a bug.
def parameters_subscriptreturn_type_with_overlong_value_(
a,
) -> liiiiiiiiiiiiiiiiiiiiist[
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
]:
pass
#########################################################################################
# can_omit_optional_parentheses_layout
#########################################################################################
def test_return_multiline_string_binary_expression_return_type_annotation(
a,
) -> aaaaaaaaaaaaaaaaaaaaaaaaaa > [
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,
bbbbbbbbbbbbbbbbbbbbbbbbb,
]:
pass
```

View File

@@ -4,12 +4,14 @@ use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::{is_python_whitespace, Cursor};
/// Searches for the first non-trivia character after `offset`.
/// Searches for the first non-trivia character in `range`.
///
/// The search skips over any whitespace and comments.
///
/// Returns `Some` if the source code after `offset` contains any non-trivia character.///
/// Returns `None` if the text after `offset` is empty or only contains trivia (whitespace or comments).
/// Returns `Some` if the range contains any non-trivia character. The first item is the absolute offset
/// of the character, the second item the non-trivia character.
///
/// Returns `None` if the range is empty or only contains trivia (whitespace or comments).
pub fn first_non_trivia_token(offset: TextSize, code: &str) -> Option<SimpleToken> {
SimpleTokenizer::starts_at(offset, code)
.skip_trivia()

View File

@@ -222,57 +222,6 @@ impl LineIndex {
}
/// Returns the [byte offset](TextSize) at `line` and `column`.
///
/// ## Examples
///
/// ### ASCII
///
/// ```
/// use ruff_source_file::{LineIndex, OneIndexed};
/// use ruff_text_size::TextSize;
/// let source = r#"a = 4
/// c = "some string"
/// x = b"#;
///
/// let index = LineIndex::from_source_text(source);
///
/// // First line, first column
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(0), OneIndexed::from_zero_indexed(0), source), TextSize::new(0));
///
/// // Second line, 4th column
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(1), OneIndexed::from_zero_indexed(4), source), TextSize::new(10));
///
/// // Offset past the end of the first line
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(0), OneIndexed::from_zero_indexed(10), source), TextSize::new(6));
///
/// // Offset past the end of the file
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(3), OneIndexed::from_zero_indexed(0), source), TextSize::new(29));
/// ```
///
/// ### UTF8
///
/// ```
/// use ruff_source_file::{LineIndex, OneIndexed};
/// use ruff_text_size::TextSize;
/// let source = r#"a = 4
/// c = "❤️"
/// x = b"#;
///
/// let index = LineIndex::from_source_text(source);
///
/// // First line, first column
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(0), OneIndexed::from_zero_indexed(0), source), TextSize::new(0));
///
/// // Third line, 2nd column, after emoji
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(2), OneIndexed::from_zero_indexed(1), source), TextSize::new(20));
///
/// // Offset past the end of the second line
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(1), OneIndexed::from_zero_indexed(10), source), TextSize::new(19));
///
/// // Offset past the end of the file
/// assert_eq!(index.offset(OneIndexed::from_zero_indexed(3), OneIndexed::from_zero_indexed(0), source), TextSize::new(24));
/// ```
///
pub fn offset(&self, line: OneIndexed, column: OneIndexed, contents: &str) -> TextSize {
// If start-of-line position after last line
if line.to_zero_indexed() > self.line_starts().len() {
@@ -284,7 +233,7 @@ impl LineIndex {
match self.kind() {
IndexKind::Ascii => {
line_range.start()
+ TextSize::try_from(column.to_zero_indexed())
+ TextSize::try_from(column.get())
.unwrap_or(line_range.len())
.clamp(TextSize::new(0), line_range.len())
}
@@ -292,7 +241,7 @@ impl LineIndex {
let rest = &contents[line_range];
let column_offset: TextSize = rest
.chars()
.take(column.to_zero_indexed())
.take(column.get())
.map(ruff_text_size::TextLen::text_len)
.sum();
line_range.start() + column_offset

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_wasm"
version = "0.6.6"
version = "0.6.5"
publish = false
authors = { workspace = true }
edition = { workspace = true }

View File

@@ -13,15 +13,14 @@ license = { workspace = true }
[lib]
[dependencies]
ruff_cache = { workspace = true }
ruff_formatter = { workspace = true }
ruff_graph = { workspace = true, features = ["serde", "schemars"] }
ruff_linter = { workspace = true }
ruff_macros = { workspace = true }
ruff_python_ast = { workspace = true }
ruff_formatter = { workspace = true }
ruff_python_formatter = { workspace = true, features = ["serde"] }
ruff_python_ast = { workspace = true }
ruff_python_semantic = { workspace = true, features = ["serde"] }
ruff_source_file = { workspace = true }
ruff_cache = { workspace = true }
ruff_macros = { workspace = true }
anyhow = { workspace = true }
colored = { workspace = true }

View File

@@ -3,7 +3,6 @@
//! the various parameters.
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::env::VarError;
use std::num::{NonZeroU16, NonZeroU8};
use std::path::{Path, PathBuf};
@@ -20,7 +19,6 @@ use strum::IntoEnumIterator;
use ruff_cache::cache_dir;
use ruff_formatter::IndentStyle;
use ruff_graph::{AnalyzeSettings, Direction};
use ruff_linter::line_width::{IndentWidth, LineLength};
use ruff_linter::registry::RuleNamespace;
use ruff_linter::registry::{Rule, RuleSet, INCOMPATIBLE_CODES};
@@ -42,11 +40,11 @@ use ruff_python_formatter::{
};
use crate::options::{
AnalyzeOptions, Flake8AnnotationsOptions, Flake8BanditOptions, Flake8BooleanTrapOptions,
Flake8BugbearOptions, Flake8BuiltinsOptions, Flake8ComprehensionsOptions,
Flake8CopyrightOptions, Flake8ErrMsgOptions, Flake8GetTextOptions,
Flake8ImplicitStrConcatOptions, Flake8ImportConventionsOptions, Flake8PytestStyleOptions,
Flake8QuotesOptions, Flake8SelfOptions, Flake8TidyImportsOptions, Flake8TypeCheckingOptions,
Flake8AnnotationsOptions, Flake8BanditOptions, Flake8BooleanTrapOptions, Flake8BugbearOptions,
Flake8BuiltinsOptions, Flake8ComprehensionsOptions, Flake8CopyrightOptions,
Flake8ErrMsgOptions, Flake8GetTextOptions, Flake8ImplicitStrConcatOptions,
Flake8ImportConventionsOptions, Flake8PytestStyleOptions, Flake8QuotesOptions,
Flake8SelfOptions, Flake8TidyImportsOptions, Flake8TypeCheckingOptions,
Flake8UnusedArgumentsOptions, FormatOptions, IsortOptions, LintCommonOptions, LintOptions,
McCabeOptions, Options, Pep8NamingOptions, PyUpgradeOptions, PycodestyleOptions,
PydocstyleOptions, PyflakesOptions, PylintOptions, RuffOptions,
@@ -144,7 +142,6 @@ pub struct Configuration {
pub lint: LintConfiguration,
pub format: FormatConfiguration,
pub analyze: AnalyzeConfiguration,
}
impl Configuration {
@@ -210,23 +207,6 @@ impl Configuration {
.unwrap_or(format_defaults.docstring_code_line_width),
};
let analyze = self.analyze;
let analyze_preview = analyze.preview.unwrap_or(global_preview);
let analyze_defaults = AnalyzeSettings::default();
let analyze = AnalyzeSettings {
exclude: FilePatternSet::try_from_iter(analyze.exclude.unwrap_or_default())?,
preview: analyze_preview,
target_version,
extension: self.extension.clone().unwrap_or_default(),
detect_string_imports: analyze
.detect_string_imports
.unwrap_or(analyze_defaults.detect_string_imports),
include_dependencies: analyze
.include_dependencies
.unwrap_or(analyze_defaults.include_dependencies),
};
let lint = self.lint;
let lint_preview = lint.preview.unwrap_or(global_preview);
@@ -421,7 +401,6 @@ impl Configuration {
},
formatter,
analyze,
})
}
@@ -555,10 +534,6 @@ impl Configuration {
options.format.unwrap_or_default(),
project_root,
)?,
analyze: AnalyzeConfiguration::from_options(
options.analyze.unwrap_or_default(),
project_root,
)?,
})
}
@@ -598,7 +573,6 @@ impl Configuration {
lint: self.lint.combine(config.lint),
format: self.format.combine(config.format),
analyze: self.analyze.combine(config.analyze),
}
}
}
@@ -1217,57 +1191,6 @@ impl FormatConfiguration {
}
}
}
#[derive(Clone, Debug, Default)]
pub struct AnalyzeConfiguration {
pub exclude: Option<Vec<FilePattern>>,
pub preview: Option<PreviewMode>,
pub direction: Option<Direction>,
pub detect_string_imports: Option<bool>,
pub include_dependencies: Option<BTreeMap<PathBuf, (PathBuf, Vec<String>)>>,
}
impl AnalyzeConfiguration {
#[allow(clippy::needless_pass_by_value)]
pub fn from_options(options: AnalyzeOptions, project_root: &Path) -> Result<Self> {
Ok(Self {
exclude: options.exclude.map(|paths| {
paths
.into_iter()
.map(|pattern| {
let absolute = fs::normalize_path_to(&pattern, project_root);
FilePattern::User(pattern, absolute)
})
.collect()
}),
preview: options.preview.map(PreviewMode::from),
direction: options.direction,
detect_string_imports: options.detect_string_imports,
include_dependencies: options.include_dependencies.map(|dependencies| {
dependencies
.into_iter()
.map(|(key, value)| {
(project_root.join(key), (project_root.to_path_buf(), value))
})
.collect::<BTreeMap<_, _>>()
}),
})
}
#[must_use]
#[allow(clippy::needless_pass_by_value)]
pub fn combine(self, config: Self) -> Self {
Self {
exclude: self.exclude.or(config.exclude),
preview: self.preview.or(config.preview),
direction: self.direction.or(config.direction),
detect_string_imports: self.detect_string_imports.or(config.detect_string_imports),
include_dependencies: self.include_dependencies.or(config.include_dependencies),
}
}
}
pub(crate) trait CombinePluginOptions {
#[must_use]
fn combine(self, other: Self) -> Self;

View File

@@ -1,14 +1,11 @@
use regex::Regex;
use rustc_hash::{FxBuildHasher, FxHashMap, FxHashSet};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::path::PathBuf;
use strum::IntoEnumIterator;
use crate::options_base::{OptionsMetadata, Visit};
use crate::settings::LineEnding;
use ruff_formatter::IndentStyle;
use ruff_graph::Direction;
use ruff_linter::line_width::{IndentWidth, LineLength};
use ruff_linter::rules::flake8_import_conventions::settings::BannedAliases;
use ruff_linter::rules::flake8_pytest_style::settings::SettingsError;
@@ -436,10 +433,6 @@ pub struct Options {
/// Options to configure code formatting.
#[option_group]
pub format: Option<FormatOptions>,
/// Options to configure import map generation.
#[option_group]
pub analyze: Option<AnalyzeOptions>,
}
/// Configures how Ruff checks your code.
@@ -3313,80 +3306,6 @@ pub struct FormatOptions {
pub docstring_code_line_length: Option<DocstringCodeLineWidth>,
}
/// Configures Ruff's `analyze` command.
#[derive(
Clone, Debug, PartialEq, Eq, Default, Deserialize, Serialize, OptionsMetadata, CombineOptions,
)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct AnalyzeOptions {
/// A list of file patterns to exclude from analysis in addition to the files excluded globally (see [`exclude`](#exclude), and [`extend-exclude`](#extend-exclude)).
///
/// Exclusions are based on globs, and can be either:
///
/// - Single-path patterns, like `.mypy_cache` (to exclude any directory
/// named `.mypy_cache` in the tree), `foo.py` (to exclude any file named
/// `foo.py`), or `foo_*.py` (to exclude any file matching `foo_*.py` ).
/// - Relative patterns, like `directory/foo.py` (to exclude that specific
/// file) or `directory/*.py` (to exclude any Python files in
/// `directory`). Note that these paths are relative to the project root
/// (e.g., the directory containing your `pyproject.toml`).
///
/// For more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).
#[option(
default = r#"[]"#,
value_type = "list[str]",
example = r#"
exclude = ["generated"]
"#
)]
pub exclude: Option<Vec<String>>,
/// Whether to enable preview mode. When preview mode is enabled, Ruff will expose unstable
/// commands.
#[option(
default = "false",
value_type = "bool",
example = r#"
# Enable preview features.
preview = true
"#
)]
pub preview: Option<bool>,
/// Whether to generate a map from file to files that it depends on (dependencies) or files that
/// depend on it (dependents).
#[option(
default = r#"\"dependencies\""#,
value_type = "\"dependents\" | \"dependencies\"",
example = r#"
direction = "dependencies"
"#
)]
pub direction: Option<Direction>,
/// Whether to detect imports from string literals. When enabled, Ruff will search for string
/// literals that "look like" import paths, and include them in the import map, if they resolve
/// to valid Python modules.
#[option(
default = "false",
value_type = "bool",
example = r#"
detect-string-imports = true
"#
)]
pub detect_string_imports: Option<bool>,
/// A map from file path to the list of file paths or globs that should be considered
/// dependencies of that file, regardless of whether relevant imports are detected.
#[option(
default = "{}",
value_type = "dict[str, list[str]]",
example = r#"
include-dependencies = {
"foo/bar.py": ["foo/baz/*.py"],
}
"#
)]
pub include_dependencies: Option<BTreeMap<PathBuf, Vec<String>>>,
}
#[cfg(test)]
mod tests {
use crate::options::Flake8SelfOptions;

View File

@@ -395,6 +395,7 @@ pub fn python_files_in_path<'a>(
let walker = builder.build_parallel();
// Run the `WalkParallel` to collect all Python files.
let state = WalkPythonFilesState::new(resolver);
let mut visitor = PythonFilesVisitorBuilder::new(transformer, &state);
walker.visit(&mut visitor);

View File

@@ -1,7 +1,6 @@
use path_absolutize::path_dedot;
use ruff_cache::cache_dir;
use ruff_formatter::{FormatOptions, IndentStyle, IndentWidth, LineWidth};
use ruff_graph::AnalyzeSettings;
use ruff_linter::display_settings;
use ruff_linter::settings::types::{
ExtensionMapping, FilePattern, FilePatternSet, OutputFormat, UnsafeFixes,
@@ -36,7 +35,6 @@ pub struct Settings {
pub file_resolver: FileResolverSettings,
pub linter: LinterSettings,
pub formatter: FormatterSettings,
pub analyze: AnalyzeSettings,
}
impl Default for Settings {
@@ -52,7 +50,6 @@ impl Default for Settings {
linter: LinterSettings::new(project_root),
file_resolver: FileResolverSettings::new(project_root),
formatter: FormatterSettings::default(),
analyze: AnalyzeSettings::default(),
}
}
}
@@ -71,8 +68,7 @@ impl fmt::Display for Settings {
self.unsafe_fixes,
self.file_resolver | nested,
self.linter | nested,
self.formatter | nested,
self.analyze | nested,
self.formatter | nested
]
}
Ok(())

View File

@@ -522,7 +522,6 @@ Commands:
clean Clear any caches in the current directory and any subdirectories
format Run the Ruff formatter on the given files or directories
server Run the language server
analyze Run analysis over Python source code
version Display Ruff's version
help Print this message or the help of the given subcommand(s)

View File

@@ -78,7 +78,7 @@ Ruff can be used as a [pre-commit](https://pre-commit.com) hook via [`ruff-pre-c
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.6
rev: v0.6.5
hooks:
# Run the linter.
- id: ruff
@@ -91,7 +91,7 @@ To enable lint fixes, add the `--fix` argument to the lint hook:
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.6
rev: v0.6.5
hooks:
# Run the linter.
- id: ruff
@@ -105,7 +105,7 @@ To run the hooks over Jupyter Notebooks too, add `jupyter` to the list of allowe
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.6
rev: v0.6.5
hooks:
# Run the linter.
- id: ruff

View File

@@ -5053,9 +5053,9 @@
"dev": true
},
"node_modules/vite": {
"version": "5.4.6",
"resolved": "https://registry.npmjs.org/vite/-/vite-5.4.6.tgz",
"integrity": "sha512-IeL5f8OO5nylsgzd9tq4qD2QqI0k2CQLGrWD0rCN0EQJZpBK5vJAx0I+GDkMOXxQX/OfFHMuLIx6ddAxGX/k+Q==",
"version": "5.4.5",
"resolved": "https://registry.npmjs.org/vite/-/vite-5.4.5.tgz",
"integrity": "sha512-pXqR0qtb2bTwLkev4SE3r4abCNioP3GkjvIDLlzziPpXtHgiJIjuKl+1GN6ESOT3wMjG3JTeARopj2SwYaHTOA==",
"dev": true,
"license": "MIT",
"dependencies": {

View File

@@ -4,7 +4,7 @@ build-backend = "maturin"
[project]
name = "ruff"
version = "0.6.6"
version = "0.6.5"
description = "An extremely fast Python linter and code formatter, written in Rust."
authors = [{ name = "Astral Software Inc.", email = "hey@astral.sh" }]
readme = "README.md"

84
ruff.schema.json generated
View File

@@ -16,17 +16,6 @@
"minLength": 1
}
},
"analyze": {
"description": "Options to configure import map generation.",
"anyOf": [
{
"$ref": "#/definitions/AnalyzeOptions"
},
{
"type": "null"
}
]
},
"builtins": {
"description": "A list of builtins to treat as defined references, in addition to the system builtins.",
"type": [
@@ -757,61 +746,6 @@
},
"additionalProperties": false,
"definitions": {
"AnalyzeOptions": {
"description": "Configures Ruff's `analyze` command.",
"type": "object",
"properties": {
"detect-string-imports": {
"description": "Whether to detect imports from string literals. When enabled, Ruff will search for string literals that \"look like\" import paths, and include them in the import map, if they resolve to valid Python modules.",
"type": [
"boolean",
"null"
]
},
"direction": {
"description": "Whether to generate a map from file to files that it depends on (dependencies) or files that depend on it (dependents).",
"anyOf": [
{
"$ref": "#/definitions/Direction"
},
{
"type": "null"
}
]
},
"exclude": {
"description": "A list of file patterns to exclude from analysis in addition to the files excluded globally (see [`exclude`](#exclude), and [`extend-exclude`](#extend-exclude)).\n\nExclusions are based on globs, and can be either:\n\n- Single-path patterns, like `.mypy_cache` (to exclude any directory named `.mypy_cache` in the tree), `foo.py` (to exclude any file named `foo.py`), or `foo_*.py` (to exclude any file matching `foo_*.py` ). - Relative patterns, like `directory/foo.py` (to exclude that specific file) or `directory/*.py` (to exclude any Python files in `directory`). Note that these paths are relative to the project root (e.g., the directory containing your `pyproject.toml`).\n\nFor more information on the glob syntax, refer to the [`globset` documentation](https://docs.rs/globset/latest/globset/#syntax).",
"type": [
"array",
"null"
],
"items": {
"type": "string"
}
},
"include-dependencies": {
"description": "A map from file path to the list of file paths or globs that should be considered dependencies of that file, regardless of whether relevant imports are detected.",
"type": [
"object",
"null"
],
"additionalProperties": {
"type": "array",
"items": {
"type": "string"
}
}
},
"preview": {
"description": "Whether to enable preview mode. When preview mode is enabled, Ruff will expose unstable commands.",
"type": [
"boolean",
"null"
]
}
},
"additionalProperties": false
},
"ApiBan": {
"type": "object",
"required": [
@@ -866,24 +800,6 @@
}
]
},
"Direction": {
"oneOf": [
{
"description": "Construct a map from module to its dependencies (i.e., the modules that it imports).",
"type": "string",
"enum": [
"Dependencies"
]
},
{
"description": "Construct a map from module to its dependents (i.e., the modules that import it).",
"type": "string",
"enum": [
"Dependents"
]
}
]
},
"DocstringCodeLineWidth": {
"anyOf": [
{

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "scripts"
version = "0.6.6"
version = "0.6.5"
description = ""
authors = ["Charles Marsh <charlie.r.marsh@gmail.com>"]

View File

@@ -25,55 +25,55 @@ mkdir -p "$dir"
if [ ! -d "$dir/twine/.git" ]; then
git clone --filter=tree:0 https://github.com/pypa/twine "$dir/twine"
fi
git -C "$dir/twine" checkout -q ae71822a3cb0478d0f6a0cccb65d6f8e6275ece5
git -C "$dir/twine" checkout -q afc37f8b26ed06ccd104f6724f293f657b9b7f15
# web framework that implements a lot of magic
if [ ! -d "$dir/django/.git" ]; then
git clone --filter=tree:0 https://github.com/django/django "$dir/django"
fi
git -C "$dir/django" checkout -q ee5147cfd7de2add74a285537a8968ec074e70cd
git -C "$dir/django" checkout -q 20b7aac7ca60b0352d926340622e618bcbee54a8
# an ML project
if [ ! -d "$dir/transformers/.git" ]; then
git clone --filter=tree:0 https://github.com/huggingface/transformers "$dir/transformers"
fi
git -C "$dir/transformers" checkout -q ac5a0556f14dec503b064d5802da1092e0b558ea
git -C "$dir/transformers" checkout -q 5c081e29930466ecf9a478727039d980131076d9
# type annotations
if [ ! -d "$dir/typeshed/.git" ]; then
git clone --filter=tree:0 https://github.com/python/typeshed "$dir/typeshed"
fi
git -C "$dir/typeshed" checkout -q d34ef50754de993d01630883dbcd1d27ba507143
git -C "$dir/typeshed" checkout -q cb688d2577520d98c09853acc20de099300b4e48
# python 3.11, typing and 100% test coverage
if [ ! -d "$dir/warehouse/.git" ]; then
git clone --filter=tree:0 https://github.com/pypi/warehouse "$dir/warehouse"
fi
git -C "$dir/warehouse" checkout -q 5a4d2cadec641b5d6a6847d0127940e0f532f184
git -C "$dir/warehouse" checkout -q c6d9dd32b7c85d3a5f4240c95267874417e5b965
# zulip, a django user
if [ ! -d "$dir/zulip/.git" ]; then
git clone --filter=tree:0 https://github.com/zulip/zulip "$dir/zulip"
fi
git -C "$dir/zulip" checkout -q ccddbba7a3074283ccaac3bde35fd32b19faf042
git -C "$dir/zulip" checkout -q b605042312c763c9a1e458f0ca6a003799682546
# home-assistant, home automation with 1ok files
if [ ! -d "$dir/home-assistant/.git" ]; then
git clone --filter=tree:0 https://github.com/home-assistant/core "$dir/home-assistant"
fi
git -C "$dir/home-assistant" checkout -q 3601c531f400255d10b82529549e564fbe483a54
git -C "$dir/home-assistant" checkout -q 88296c1998fd1943576e0167ab190d25af175257
# poetry, a package manager that uses black preview style
if [ ! -d "$dir/poetry/.git" ]; then
git clone --filter=tree:0 https://github.com/python-poetry/poetry "$dir/poetry"
fi
git -C "$dir/poetry" checkout -q 36fedb59b8e655252168055b536ead591068e1e4
git -C "$dir/poetry" checkout -q f310a592ad3ab41bb8d635af6bacaf044a1fefef
# cpython itself
if [ ! -d "$dir/cpython/.git" ]; then
git clone --filter=tree:0 https://github.com/python/cpython "$dir/cpython"
fi
git -C "$dir/cpython" checkout -q 28aea5d07d163105b42acd81c1651397ef95ea57
git -C "$dir/cpython" checkout -q b75186f69edcf54615910a5cd707996144163ef7
# Uncomment if you want to update the hashes
#for i in "$dir"/*/; do git -C "$i" switch main && git -C "$i" pull; done
@@ -81,7 +81,7 @@ git -C "$dir/cpython" checkout -q 28aea5d07d163105b42acd81c1651397ef95ea57
time cargo run --bin ruff_dev -- format-dev --stability-check \
--error-file "$target/progress_projects_errors.txt" --log-file "$target/progress_projects_log.txt" --stats-file "$target/progress_projects_stats.txt" \
--files-with-errors 3 --multi-project "$dir" || (
--files-with-errors 15 --multi-project "$dir" || (
echo "Ecosystem check failed"
cat "$target/progress_projects_log.txt"
exit 1