Compare commits

...

4 Commits

Author SHA1 Message Date
Micha Reiser
19881f4cc3 [ty] Cache legacy generic context 2025-12-16 12:48:57 +01:00
Charlie Marsh
682d29c256 [ty] Avoid enforcing standalone expression for tests in f-strings (#21967)
## Summary

Based on what we do elsewhere and my understanding of "standalone"
here...

Closes https://github.com/astral-sh/ty/issues/1865.
2025-12-15 22:31:04 -05:00
Zanie Blue
8e13765b57 [ty] Use title for configuration code fences in ty reference documentation (#21992)
Part of https://github.com/astral-sh/ty/pull/1904
2025-12-15 16:36:08 -05:00
Douglas Creager
7d3b7c5754 [ty] Consistent ordering of constraint set specializations, take 2 (#21983)
In https://github.com/astral-sh/ruff/pull/21957, we tried to use
`union_or_intersection_elements_ordering` to provide a stable ordering
of the union and intersection elements that are created when determining
which type a typevar should specialize to. @AlexWaygood [pointed
out](https://github.com/astral-sh/ruff/pull/21551#discussion_r2616543762)
that this won't work, since that provides a consistent ordering within a
single process run, but does not provide a stable ordering across runs.

This is an attempt to produce a proper stable ordering for constraint
sets, so that we end up with consistent diagnostic and test output.

We do this by maintaining a new `source_order` field on each interior
BDD node, which records when that node's constraint was added to the
set. Several of the BDD operators (`and`, `or`, etc) now have
`_with_offset` variants, which update each `source_order` in the rhs to
be larger than any of the `source_order`s in the lhs. This is what
causes that field to be in line with (a) when you add each constraint to
the set, and (b) the order of the parameters you provide to `and`, `or`,
etc. Then we sort by that new field before constructing the
union/intersection types when creating a specialization.
2025-12-15 14:24:08 -05:00
8 changed files with 652 additions and 297 deletions

View File

@@ -166,8 +166,9 @@ fn emit_field(output: &mut String, name: &str, field: &OptionField, parents: &[S
output.push('\n');
let _ = writeln!(output, "**Type**: `{}`", field.value_type);
output.push('\n');
output.push_str("**Example usage** (`pyproject.toml`):\n\n");
output.push_str("**Example usage**:\n\n");
output.push_str(&format_example(
"pyproject.toml",
&format_header(
field.scope,
field.example,
@@ -179,11 +180,11 @@ fn emit_field(output: &mut String, name: &str, field: &OptionField, parents: &[S
output.push('\n');
}
fn format_example(header: &str, content: &str) -> String {
fn format_example(title: &str, header: &str, content: &str) -> String {
if header.is_empty() {
format!("```toml\n{content}\n```\n",)
format!("```toml title=\"{title}\"\n{content}\n```\n",)
} else {
format!("```toml\n{header}\n{content}\n```\n",)
format!("```toml title=\"{title}\"\n{header}\n{content}\n```\n",)
}
}

View File

@@ -18,9 +18,9 @@ Valid severities are:
**Type**: `dict[RuleName, "ignore" | "warn" | "error"]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.rules]
possibly-unresolved-reference = "warn"
division-by-zero = "ignore"
@@ -45,9 +45,9 @@ configuration setting.
**Type**: `list[str]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.environment]
extra-paths = ["./shared/my-search-path"]
```
@@ -76,9 +76,9 @@ This option can be used to point to virtual or system Python environments.
**Type**: `str`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.environment]
python = "./custom-venv-location/.venv"
```
@@ -103,9 +103,9 @@ If no platform is specified, ty will use the current platform:
**Type**: `"win32" | "darwin" | "android" | "ios" | "linux" | "all" | str`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.environment]
# Tailor type stubs and conditionalized type definitions to windows.
python-platform = "win32"
@@ -137,9 +137,9 @@ to reflect the differing contents of the standard library across Python versions
**Type**: `"3.7" | "3.8" | "3.9" | "3.10" | "3.11" | "3.12" | "3.13" | "3.14" | <major>.<minor>`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.environment]
python-version = "3.12"
```
@@ -165,9 +165,9 @@ it will also be included in the first party search path.
**Type**: `list[str]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.environment]
# Multiple directories (priority order)
root = ["./src", "./lib", "./vendor"]
@@ -185,9 +185,9 @@ bundled as a zip file in the binary
**Type**: `str`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.environment]
typeshed = "/path/to/custom/typeshed"
```
@@ -240,9 +240,9 @@ If not specified, defaults to `[]` (excludes no files).
**Type**: `list[str]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[[tool.ty.overrides]]
exclude = [
"generated",
@@ -268,9 +268,9 @@ If not specified, defaults to `["**"]` (matches all files).
**Type**: `list[str]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[[tool.ty.overrides]]
include = [
"src",
@@ -292,9 +292,9 @@ severity levels or disable them entirely.
**Type**: `dict[RuleName, "ignore" | "warn" | "error"]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[[tool.ty.overrides]]
include = ["src"]
@@ -358,9 +358,9 @@ to re-include `dist` use `exclude = ["!dist"]`
**Type**: `list[str]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.src]
exclude = [
"generated",
@@ -399,9 +399,9 @@ matches `<project_root>/src` and not `<project_root>/test/src`).
**Type**: `list[str]`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.src]
include = [
"src",
@@ -421,9 +421,9 @@ Enabled by default.
**Type**: `bool`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.src]
respect-ignore-files = false
```
@@ -450,9 +450,9 @@ it will also be included in the first party search path.
**Type**: `str`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.src]
root = "./app"
```
@@ -471,9 +471,9 @@ Defaults to `false`.
**Type**: `bool`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.terminal]
# Error if ty emits any warning-level diagnostics.
error-on-warning = true
@@ -491,9 +491,9 @@ Defaults to `full`.
**Type**: `full | concise`
**Example usage** (`pyproject.toml`):
**Example usage**:
```toml
```toml title="pyproject.toml"
[tool.ty.terminal]
output-format = "concise"
```

View File

@@ -152,6 +152,20 @@ The expressions in these string annotations aren't valid expressions in this con
shouldn't panic.
```py
# Regression test for https://github.com/astral-sh/ty/issues/1865
# error: [fstring-type-annotation]
stringified_fstring_with_conditional: "f'{1 if 1 else 1}'"
# error: [fstring-type-annotation]
stringified_fstring_with_boolean_expression: "f'{1 or 2}'"
# error: [fstring-type-annotation]
stringified_fstring_with_generator_expression: "f'{(i for i in range(5))}'"
# error: [fstring-type-annotation]
stringified_fstring_with_list_comprehension: "f'{[i for i in range(5)]}'"
# error: [fstring-type-annotation]
stringified_fstring_with_dict_comprehension: "f'{ {i: i for i in range(5)} }'"
# error: [fstring-type-annotation]
stringified_fstring_with_set_comprehension: "f'{ {i for i in range(5)} }'"
a: "1 or 2"
b: "(x := 1)"
# error: [invalid-type-form]

View File

@@ -522,6 +522,11 @@ impl<'db> SemanticIndex<'db> {
self.scopes_by_node[&node.node_key()]
}
/// Returns the id of the scope that `node` creates, if it exists.
pub(crate) fn try_node_scope(&self, node: NodeWithScopeRef) -> Option<FileScopeId> {
self.scopes_by_node.get(&node.node_key()).copied()
}
/// Checks if there is an import of `__future__.annotations` in the global scope, which affects
/// the logic for type inference.
pub(super) fn has_future_annotations(&self) -> bool {

View File

@@ -689,20 +689,10 @@ impl<'db> IntersectionBuilder<'db> {
}
}
pub(crate) fn order_elements(mut self, val: bool) -> Self {
self.order_elements = val;
self
}
pub(crate) fn add_positive(self, ty: Type<'db>) -> Self {
self.add_positive_impl(ty, &mut vec![])
}
pub(crate) fn add_positive_in_place(&mut self, ty: Type<'db>) {
let updated = std::mem::replace(self, Self::empty(self.db)).add_positive(ty);
*self = updated;
}
pub(crate) fn add_positive_impl(
mut self,
ty: Type<'db>,

View File

@@ -1566,6 +1566,9 @@ impl<'db> ClassLiteral<'db> {
})
}
#[salsa::tracked(cycle_initial=generic_context_cycle_initial,
heap_size=ruff_memory_usage::heap_size,
)]
pub(crate) fn legacy_generic_context(self, db: &'db dyn Db) -> Option<GenericContext<'db>> {
self.explicit_bases(db).iter().find_map(|base| match base {
Type::KnownInstance(

File diff suppressed because it is too large Load Diff

View File

@@ -7926,7 +7926,7 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
let Some(first_comprehension) = comprehensions_iter.next() else {
unreachable!("Comprehension must contain at least one generator");
};
self.infer_standalone_expression(&first_comprehension.iter, TypeContext::default());
self.infer_maybe_standalone_expression(&first_comprehension.iter, TypeContext::default());
if first_comprehension.is_async {
EvaluationMode::Async
@@ -7946,9 +7946,12 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
let evaluation_mode = self.infer_first_comprehension_iter(generators);
let scope_id = self
let Some(scope_id) = self
.index
.node_scope(NodeWithScopeRef::GeneratorExpression(generator));
.try_node_scope(NodeWithScopeRef::GeneratorExpression(generator))
else {
return Type::unknown();
};
let scope = scope_id.to_scope_id(self.db(), self.file());
let inference = infer_scope_types(self.db(), scope);
let yield_type = inference.expression_type(elt.as_ref());
@@ -8021,9 +8024,12 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
self.infer_first_comprehension_iter(generators);
let scope_id = self
let Some(scope_id) = self
.index
.node_scope(NodeWithScopeRef::ListComprehension(listcomp));
.try_node_scope(NodeWithScopeRef::ListComprehension(listcomp))
else {
return Type::unknown();
};
let scope = scope_id.to_scope_id(self.db(), self.file());
let inference = infer_scope_types(self.db(), scope);
let element_type = inference.expression_type(elt.as_ref());
@@ -8046,9 +8052,12 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
self.infer_first_comprehension_iter(generators);
let scope_id = self
let Some(scope_id) = self
.index
.node_scope(NodeWithScopeRef::DictComprehension(dictcomp));
.try_node_scope(NodeWithScopeRef::DictComprehension(dictcomp))
else {
return Type::unknown();
};
let scope = scope_id.to_scope_id(self.db(), self.file());
let inference = infer_scope_types(self.db(), scope);
let key_type = inference.expression_type(key.as_ref());
@@ -8071,9 +8080,12 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
self.infer_first_comprehension_iter(generators);
let scope_id = self
let Some(scope_id) = self
.index
.node_scope(NodeWithScopeRef::SetComprehension(setcomp));
.try_node_scope(NodeWithScopeRef::SetComprehension(setcomp))
else {
return Type::unknown();
};
let scope = scope_id.to_scope_id(self.db(), self.file());
let inference = infer_scope_types(self.db(), scope);
let element_type = inference.expression_type(elt.as_ref());
@@ -8165,14 +8177,14 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
builder.module(),
)
} else {
builder.infer_standalone_expression(iter, tcx)
builder.infer_maybe_standalone_expression(iter, tcx)
}
.iterate(builder.db())
.homogeneous_element_type(builder.db())
});
for expr in ifs {
self.infer_standalone_expression(expr, TypeContext::default());
self.infer_maybe_standalone_expression(expr, TypeContext::default());
}
}
@@ -8278,7 +8290,7 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
orelse,
} = if_expression;
let test_ty = self.infer_standalone_expression(test, TypeContext::default());
let test_ty = self.infer_maybe_standalone_expression(test, TypeContext::default());
let body_ty = self.infer_expression(body, tcx);
let orelse_ty = self.infer_expression(orelse, tcx);
@@ -10341,7 +10353,7 @@ impl<'db, 'ast> TypeInferenceBuilder<'db, 'ast> {
let ty = if index == values.len() - 1 {
builder.infer_expression(value, TypeContext::default())
} else {
builder.infer_standalone_expression(value, TypeContext::default())
builder.infer_maybe_standalone_expression(value, TypeContext::default())
};
(ty, value.range())