Compare commits

..

10 Commits

353 changed files with 5682 additions and 17908 deletions

View File

@@ -8,7 +8,3 @@ benchmark = "bench -p ruff_benchmark --bench linter --bench formatter --"
# See: https://github.com/astral-sh/ruff/issues/11503
[target.'cfg(all(target_env="msvc", target_os = "windows"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.'wasm32-unknown-unknown']
# See https://docs.rs/getrandom/latest/getrandom/#webassembly-support
rustflags = ["--cfg", 'getrandom_backend="wasm_js"']

View File

@@ -430,7 +430,7 @@ jobs:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v8
- uses: dawidd6/action-download-artifact@v7
name: Download baseline Ruff binary
with:
name: ruff

View File

@@ -16,7 +16,7 @@ jobs:
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@v8
- uses: dawidd6/action-download-artifact@v7
name: Download pull request number
with:
name: pr-number
@@ -32,7 +32,7 @@ jobs:
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@v8
- uses: dawidd6/action-download-artifact@v7
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number

2
.gitignore vendored
View File

@@ -30,7 +30,7 @@ tracing-flamechart.svg
tracing-flamegraph.svg
# insta
*.rs.pending-snap
.rs.pending-snap
###

View File

@@ -5,7 +5,6 @@ exclude: |
.github/workflows/release.yml|
crates/red_knot_vendored/vendor/.*|
crates/red_knot_project/resources/.*|
crates/ruff_benchmark/resources/.*|
crates/ruff_linter/resources/.*|
crates/ruff_linter/src/rules/.*/snapshots/.*|
crates/ruff_notebook/resources/.*|
@@ -24,7 +23,7 @@ repos:
- id: validate-pyproject
- repo: https://github.com/executablebooks/mdformat
rev: 0.7.22
rev: 0.7.21
hooks:
- id: mdformat
additional_dependencies:
@@ -37,7 +36,7 @@ repos:
)$
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.44.0
rev: v0.43.0
hooks:
- id: markdownlint-fix
exclude: |
@@ -57,10 +56,10 @@ repos:
.*?invalid(_.+)*_syntax\.md
)$
additional_dependencies:
- black==25.1.0
- black==24.10.0
- repo: https://github.com/crate-ci/typos
rev: v1.29.5
rev: v1.29.4
hooks:
- id: typos
@@ -74,7 +73,7 @@ repos:
pass_filenames: false # This makes it a lot faster
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.4
rev: v0.9.2
hooks:
- id: ruff-format
- id: ruff
@@ -92,12 +91,12 @@ repos:
# zizmor detects security vulnerabilities in GitHub Actions workflows.
# Additional configuration for the tool is found in `.github/zizmor.yml`
- repo: https://github.com/woodruffw/zizmor-pre-commit
rev: v1.3.0
rev: v1.2.2
hooks:
- id: zizmor
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.31.1
rev: 0.31.0
hooks:
- id: check-github-workflows

View File

@@ -1,43 +1,5 @@
# Changelog
## 0.9.4
### Preview features
- \[`airflow`\] Extend airflow context parameter check for `BaseOperator.execute` (`AIR302`) ([#15713](https://github.com/astral-sh/ruff/pull/15713))
- \[`airflow`\] Update `AIR302` to check for deprecated context keys ([#15144](https://github.com/astral-sh/ruff/pull/15144))
- \[`flake8-bandit`\] Permit suspicious imports within stub files (`S4`) ([#15822](https://github.com/astral-sh/ruff/pull/15822))
- \[`pylint`\] Do not trigger `PLR6201` on empty collections ([#15732](https://github.com/astral-sh/ruff/pull/15732))
- \[`refurb`\] Do not emit diagnostic when loop variables are used outside loop body (`FURB122`) ([#15757](https://github.com/astral-sh/ruff/pull/15757))
- \[`ruff`\] Add support for more `re` patterns (`RUF055`) ([#15764](https://github.com/astral-sh/ruff/pull/15764))
- \[`ruff`\] Check for shadowed `map` before suggesting fix (`RUF058`) ([#15790](https://github.com/astral-sh/ruff/pull/15790))
- \[`ruff`\] Do not emit diagnostic when all arguments to `zip()` are variadic (`RUF058`) ([#15744](https://github.com/astral-sh/ruff/pull/15744))
- \[`ruff`\] Parenthesize fix when argument spans multiple lines for `unnecessary-round` (`RUF057`) ([#15703](https://github.com/astral-sh/ruff/pull/15703))
### Rule changes
- Preserve quote style in generated code ([#15726](https://github.com/astral-sh/ruff/pull/15726), [#15778](https://github.com/astral-sh/ruff/pull/15778), [#15794](https://github.com/astral-sh/ruff/pull/15794))
- \[`flake8-bugbear`\] Exempt `NewType` calls where the original type is immutable (`B008`) ([#15765](https://github.com/astral-sh/ruff/pull/15765))
- \[`pylint`\] Honor banned top-level imports by `TID253` in `PLC0415`. ([#15628](https://github.com/astral-sh/ruff/pull/15628))
- \[`pyupgrade`\] Ignore `is_typeddict` and `TypedDict` for `deprecated-import` (`UP035`) ([#15800](https://github.com/astral-sh/ruff/pull/15800))
### CLI
- Fix formatter warning message for `flake8-quotes` option ([#15788](https://github.com/astral-sh/ruff/pull/15788))
- Implement tab autocomplete for `ruff config` ([#15603](https://github.com/astral-sh/ruff/pull/15603))
### Bug fixes
- \[`flake8-comprehensions`\] Do not emit `unnecessary-map` diagnostic when lambda has different arity (`C417`) ([#15802](https://github.com/astral-sh/ruff/pull/15802))
- \[`flake8-comprehensions`\] Parenthesize `sorted` when needed for `unnecessary-call-around-sorted` (`C413`) ([#15825](https://github.com/astral-sh/ruff/pull/15825))
- \[`pyupgrade`\] Handle end-of-line comments for `quoted-annotation` (`UP037`) ([#15824](https://github.com/astral-sh/ruff/pull/15824))
### Documentation
- Add missing config docstrings ([#15803](https://github.com/astral-sh/ruff/pull/15803))
- Add references to `trio.run_process` and `anyio.run_process` ([#15761](https://github.com/astral-sh/ruff/pull/15761))
- Use `uv init --lib` in tutorial ([#15718](https://github.com/astral-sh/ruff/pull/15718))
## 0.9.3
### Preview features

907
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -74,13 +74,11 @@ env_logger = { version = "0.11.0" }
etcetera = { version = "0.8.0" }
fern = { version = "0.7.0" }
filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
globwalk = { version = "0.9.1" }
hashbrown = { version = "0.15.0", default-features = false, features = [
"raw-entry",
"equivalent",
"inline-more",
] }
ignore = { version = "0.4.22" }
@@ -118,7 +116,7 @@ proc-macro2 = { version = "1.0.79" }
pyproject-toml = { version = "0.13.4" }
quick-junit = { version = "0.5.0" }
quote = { version = "1.0.23" }
rand = { version = "0.9.0" }
rand = { version = "0.8.5" }
rayon = { version = "1.10.0" }
regex = { version = "1.10.2" }
rustc-hash = { version = "2.0.0" }
@@ -136,12 +134,7 @@ serde_with = { version = "3.6.0", default-features = false, features = [
shellexpand = { version = "3.0.0" }
similar = { version = "2.4.0", features = ["inline"] }
smallvec = { version = "1.13.2" }
snapbox = { version = "0.6.0", features = [
"diff",
"term-svg",
"cmd",
"examples",
] }
snapbox = { version = "0.6.0", features = ["diff", "term-svg", "cmd", "examples"] }
static_assertions = "1.1.0"
strum = { version = "0.26.0", features = ["strum_macros"] }
strum_macros = { version = "0.26.0" }
@@ -166,6 +159,7 @@ unicode-ident = { version = "1.0.12" }
unicode-width = { version = "0.2.0" }
unicode_names2 = { version = "1.2.2" }
unicode-normalization = { version = "0.1.23" }
ureq = { version = "2.9.6" }
url = { version = "2.5.0" }
uuid = { version = "1.6.1", features = [
"v4",
@@ -179,10 +173,6 @@ wasm-bindgen-test = { version = "0.3.42" }
wild = { version = "2" }
zip = { version = "0.6.6", default-features = false }
[workspace.metadata.cargo-shear]
ignored = ["getrandom"]
[workspace.lints.rust]
unsafe_code = "warn"
unreachable_pub = "warn"
@@ -315,11 +305,7 @@ local-artifacts-jobs = ["./build-binaries", "./build-docker"]
# Publish jobs to run in CI
publish-jobs = ["./publish-pypi", "./publish-wasm"]
# Post-announce jobs to run in CI
post-announce-jobs = [
"./notify-dependents",
"./publish-docs",
"./publish-playground",
]
post-announce-jobs = ["./notify-dependents", "./publish-docs", "./publish-playground"]
# Custom permissions for GitHub Jobs
github-custom-job-permissions = { "build-docker" = { packages = "write", contents = "read" }, "publish-wasm" = { contents = "read", id-token = "write", packages = "write" } }
# Whether to install an updater program

View File

@@ -149,8 +149,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version.
curl -LsSf https://astral.sh/ruff/0.9.4/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.9.4/install.ps1 | iex"
curl -LsSf https://astral.sh/ruff/0.9.3/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.9.3/install.ps1 | iex"
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@@ -183,7 +183,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.9.4
rev: v0.9.3
hooks:
# Run the linter.
- id: ruff

View File

@@ -1,104 +0,0 @@
use std::{
fs,
path::{Path, PathBuf},
process::Command,
};
fn main() {
// The workspace root directory is not available without walking up the tree
// https://github.com/rust-lang/cargo/issues/3946
let workspace_root = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap())
.join("..")
.join("..");
commit_info(&workspace_root);
#[allow(clippy::disallowed_methods)]
let target = std::env::var("TARGET").unwrap();
println!("cargo::rustc-env=RUST_HOST_TARGET={target}");
}
fn commit_info(workspace_root: &Path) {
// If not in a git repository, do not attempt to retrieve commit information
let git_dir = workspace_root.join(".git");
if !git_dir.exists() {
return;
}
if let Some(git_head_path) = git_head(&git_dir) {
println!("cargo:rerun-if-changed={}", git_head_path.display());
let git_head_contents = fs::read_to_string(git_head_path);
if let Ok(git_head_contents) = git_head_contents {
// The contents are either a commit or a reference in the following formats
// - "<commit>" when the head is detached
// - "ref <ref>" when working on a branch
// If a commit, checking if the HEAD file has changed is sufficient
// If a ref, we need to add the head file for that ref to rebuild on commit
let mut git_ref_parts = git_head_contents.split_whitespace();
git_ref_parts.next();
if let Some(git_ref) = git_ref_parts.next() {
let git_ref_path = git_dir.join(git_ref);
println!("cargo:rerun-if-changed={}", git_ref_path.display());
}
}
}
let output = match Command::new("git")
.arg("log")
.arg("-1")
.arg("--date=short")
.arg("--abbrev=9")
.arg("--format=%H %h %cd %(describe)")
.output()
{
Ok(output) if output.status.success() => output,
_ => return,
};
let stdout = String::from_utf8(output.stdout).unwrap();
let mut parts = stdout.split_whitespace();
let mut next = || parts.next().unwrap();
let _commit_hash = next();
println!("cargo::rustc-env=RED_KNOT_COMMIT_SHORT_HASH={}", next());
println!("cargo::rustc-env=RED_KNOT_COMMIT_DATE={}", next());
// Describe can fail for some commits
// https://git-scm.com/docs/pretty-formats#Documentation/pretty-formats.txt-emdescribeoptionsem
if let Some(describe) = parts.next() {
let mut describe_parts = describe.split('-');
let _last_tag = describe_parts.next().unwrap();
// If this is the tagged commit, this component will be missing
println!(
"cargo::rustc-env=RED_KNOT_LAST_TAG_DISTANCE={}",
describe_parts.next().unwrap_or("0")
);
}
}
fn git_head(git_dir: &Path) -> Option<PathBuf> {
// The typical case is a standard git repository.
let git_head_path = git_dir.join("HEAD");
if git_head_path.exists() {
return Some(git_head_path);
}
if !git_dir.is_file() {
return None;
}
// If `.git/HEAD` doesn't exist and `.git` is actually a file,
// then let's try to attempt to read it as a worktree. If it's
// a worktree, then its contents will look like this, e.g.:
//
// gitdir: /home/andrew/astral/uv/main/.git/worktrees/pr2
//
// And the HEAD file we want to watch will be at:
//
// /home/andrew/astral/uv/main/.git/worktrees/pr2/HEAD
let contents = fs::read_to_string(git_dir).ok()?;
let (label, worktree_path) = contents.split_once(':')?;
if label != "gitdir" {
return None;
}
let worktree_path = worktree_path.trim();
Some(PathBuf::from(worktree_path))
}

View File

@@ -1,201 +0,0 @@
use crate::logging::Verbosity;
use crate::python_version::PythonVersion;
use clap::{ArgAction, ArgMatches, Error, Parser};
use red_knot_project::metadata::options::{EnvironmentOptions, Options};
use red_knot_project::metadata::value::{RangedValue, RelativePathBuf};
use red_knot_python_semantic::lint;
use ruff_db::system::SystemPathBuf;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
pub(crate) struct Args {
#[command(subcommand)]
pub(crate) command: Command,
}
#[derive(Debug, clap::Subcommand)]
pub(crate) enum Command {
/// Check a project for type errors.
Check(CheckCommand),
/// Start the language server
Server,
/// Display Red Knot's version
Version,
}
#[derive(Debug, Parser)]
pub(crate) struct CheckCommand {
/// Run the command within the given project directory.
///
/// All `pyproject.toml` files will be discovered by walking up the directory tree from the given project directory,
/// as will the project's virtual environment (`.venv`) unless the `venv-path` option is set.
///
/// Other command-line arguments (such as relative paths) will be resolved relative to the current working directory.
#[arg(long, value_name = "PROJECT")]
pub(crate) project: Option<SystemPathBuf>,
/// Path to the virtual environment the project uses.
///
/// If provided, red-knot will use the `site-packages` directory of this virtual environment
/// to resolve type information for the project's third-party dependencies.
#[arg(long, value_name = "PATH")]
pub(crate) venv_path: Option<SystemPathBuf>,
/// Custom directory to use for stdlib typeshed stubs.
#[arg(long, value_name = "PATH", alias = "custom-typeshed-dir")]
pub(crate) typeshed: Option<SystemPathBuf>,
/// Additional path to use as a module-resolution source (can be passed multiple times).
#[arg(long, value_name = "PATH")]
pub(crate) extra_search_path: Option<Vec<SystemPathBuf>>,
/// Python version to assume when resolving types.
#[arg(long, value_name = "VERSION", alias = "target-version")]
pub(crate) python_version: Option<PythonVersion>,
#[clap(flatten)]
pub(crate) verbosity: Verbosity,
#[clap(flatten)]
pub(crate) rules: RulesArg,
/// Use exit code 1 if there are any warning-level diagnostics.
#[arg(long, conflicts_with = "exit_zero")]
pub(crate) error_on_warning: bool,
/// Always use exit code 0, even when there are error-level diagnostics.
#[arg(long)]
pub(crate) exit_zero: bool,
/// Run in watch mode by re-running whenever files change.
#[arg(long, short = 'W')]
pub(crate) watch: bool,
}
impl CheckCommand {
pub(crate) fn into_options(self) -> Options {
let rules = if self.rules.is_empty() {
None
} else {
Some(
self.rules
.into_iter()
.map(|(rule, level)| (RangedValue::cli(rule), RangedValue::cli(level)))
.collect(),
)
};
Options {
environment: Some(EnvironmentOptions {
python_version: self
.python_version
.map(|version| RangedValue::cli(version.into())),
venv_path: self.venv_path.map(RelativePathBuf::cli),
typeshed: self.typeshed.map(RelativePathBuf::cli),
extra_paths: self.extra_search_path.map(|extra_search_paths| {
extra_search_paths
.into_iter()
.map(RelativePathBuf::cli)
.collect()
}),
..EnvironmentOptions::default()
}),
rules,
..Default::default()
}
}
}
/// A list of rules to enable or disable with a given severity.
///
/// This type is used to parse the `--error`, `--warn`, and `--ignore` arguments
/// while preserving the order in which they were specified (arguments last override previous severities).
#[derive(Debug)]
pub(crate) struct RulesArg(Vec<(String, lint::Level)>);
impl RulesArg {
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn into_iter(self) -> impl Iterator<Item = (String, lint::Level)> {
self.0.into_iter()
}
}
impl clap::FromArgMatches for RulesArg {
fn from_arg_matches(matches: &ArgMatches) -> Result<Self, Error> {
let mut rules = Vec::new();
for (level, arg_id) in [
(lint::Level::Ignore, "ignore"),
(lint::Level::Warn, "warn"),
(lint::Level::Error, "error"),
] {
let indices = matches.indices_of(arg_id).into_iter().flatten();
let levels = matches.get_many::<String>(arg_id).into_iter().flatten();
rules.extend(
indices
.zip(levels)
.map(|(index, rule)| (index, rule, level)),
);
}
// Sort by their index so that values specified later override earlier ones.
rules.sort_by_key(|(index, _, _)| *index);
Ok(Self(
rules
.into_iter()
.map(|(_, rule, level)| (rule.to_owned(), level))
.collect(),
))
}
fn update_from_arg_matches(&mut self, matches: &ArgMatches) -> Result<(), Error> {
self.0 = Self::from_arg_matches(matches)?.0;
Ok(())
}
}
impl clap::Args for RulesArg {
fn augment_args(cmd: clap::Command) -> clap::Command {
const HELP_HEADING: &str = "Enabling / disabling rules";
cmd.arg(
clap::Arg::new("error")
.long("error")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'error'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("warn")
.long("warn")
.action(ArgAction::Append)
.help("Treat the given rule as having severity 'warn'. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
.arg(
clap::Arg::new("ignore")
.long("ignore")
.action(ArgAction::Append)
.help("Disables the rule. Can be specified multiple times.")
.value_name("RULE")
.help_heading(HELP_HEADING),
)
}
fn augment_args_for_update(cmd: clap::Command) -> clap::Command {
Self::augment_args(cmd)
}
}

View File

@@ -1,29 +1,101 @@
use std::io::{self, BufWriter, Write};
use std::process::{ExitCode, Termination};
use anyhow::Result;
use std::sync::Mutex;
use crate::args::{Args, CheckCommand, Command};
use crate::logging::setup_tracing;
use anyhow::{anyhow, Context};
use clap::Parser;
use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use red_knot_project::metadata::options::Options;
use python_version::PythonVersion;
use red_knot_project::metadata::options::{EnvironmentOptions, Options};
use red_knot_project::metadata::value::{RangedValue, RelativePathBuf};
use red_knot_project::watch;
use red_knot_project::watch::ProjectWatcher;
use red_knot_project::{ProjectDatabase, ProjectMetadata};
use red_knot_server::run_server;
use ruff_db::diagnostic::{Diagnostic, Severity};
use ruff_db::diagnostic::Diagnostic;
use ruff_db::system::{OsSystem, System, SystemPath, SystemPathBuf};
use salsa::plumbing::ZalsaDatabase;
mod args;
use crate::logging::{setup_tracing, Verbosity};
mod logging;
mod python_version;
mod verbosity;
mod version;
#[derive(Debug, Parser)]
#[command(
author,
name = "red-knot",
about = "An extremely fast Python type checker."
)]
#[command(version)]
struct Args {
#[command(subcommand)]
pub(crate) command: Option<Command>,
/// Run the command within the given project directory.
///
/// All `pyproject.toml` files will be discovered by walking up the directory tree from the given project directory,
/// as will the project's virtual environment (`.venv`) unless the `venv-path` option is set.
///
/// Other command-line arguments (such as relative paths) will be resolved relative to the current working directory.
#[arg(long, value_name = "PROJECT")]
project: Option<SystemPathBuf>,
/// Path to the virtual environment the project uses.
///
/// If provided, red-knot will use the `site-packages` directory of this virtual environment
/// to resolve type information for the project's third-party dependencies.
#[arg(long, value_name = "PATH")]
venv_path: Option<SystemPathBuf>,
/// Custom directory to use for stdlib typeshed stubs.
#[arg(long, value_name = "PATH", alias = "custom-typeshed-dir")]
typeshed: Option<SystemPathBuf>,
/// Additional path to use as a module-resolution source (can be passed multiple times).
#[arg(long, value_name = "PATH")]
extra_search_path: Option<Vec<SystemPathBuf>>,
/// Python version to assume when resolving types.
#[arg(long, value_name = "VERSION", alias = "target-version")]
python_version: Option<PythonVersion>,
#[clap(flatten)]
verbosity: Verbosity,
/// Run in watch mode by re-running whenever files change.
#[arg(long, short = 'W')]
watch: bool,
}
impl Args {
fn to_options(&self) -> Options {
Options {
environment: Some(EnvironmentOptions {
python_version: self
.python_version
.map(|version| RangedValue::cli(version.into())),
venv_path: self.venv_path.as_ref().map(RelativePathBuf::cli),
typeshed: self.typeshed.as_ref().map(RelativePathBuf::cli),
extra_paths: self.extra_search_path.as_ref().map(|extra_search_paths| {
extra_search_paths
.iter()
.map(RelativePathBuf::cli)
.collect()
}),
..EnvironmentOptions::default()
}),
..Default::default()
}
}
}
#[derive(Debug, clap::Subcommand)]
pub enum Command {
/// Start the language server
Server,
}
#[allow(clippy::print_stdout, clippy::unnecessary_wraps, clippy::print_stderr)]
pub fn main() -> ExitStatus {
@@ -50,21 +122,10 @@ pub fn main() -> ExitStatus {
fn run() -> anyhow::Result<ExitStatus> {
let args = Args::parse_from(std::env::args());
match args.command {
Command::Server => run_server().map(|()| ExitStatus::Success),
Command::Check(check_args) => run_check(check_args),
Command::Version => version().map(|()| ExitStatus::Success),
if matches!(args.command, Some(Command::Server)) {
return run_server().map(|()| ExitStatus::Success);
}
}
pub(crate) fn version() -> Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
let version_info = crate::version::version();
writeln!(stdout, "red knot {}", &version_info)?;
Ok(())
}
fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
let verbosity = args.verbosity.level();
countme::enable(verbosity.is_trace());
let _guard = setup_tracing(verbosity)?;
@@ -95,21 +156,13 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
.unwrap_or_else(|| cli_base_path.clone());
let system = OsSystem::new(cwd);
let watch = args.watch;
let exit_zero = args.exit_zero;
let min_error_severity = if args.error_on_warning {
Severity::Warning
} else {
Severity::Error
};
let cli_options = args.into_options();
let cli_options = args.to_options();
let mut workspace_metadata = ProjectMetadata::discover(system.current_directory(), &system)?;
workspace_metadata.apply_cli_options(cli_options.clone());
let mut db = ProjectDatabase::new(workspace_metadata, system)?;
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_options, min_error_severity);
let (main_loop, main_loop_cancellation_token) = MainLoop::new(cli_options);
// Listen to Ctrl+C and abort the watch mode.
let main_loop_cancellation_token = Mutex::new(Some(main_loop_cancellation_token));
@@ -121,7 +174,7 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
}
})?;
let exit_status = if watch {
let exit_status = if args.watch {
main_loop.watch(&mut db)?
} else {
main_loop.run(&mut db)
@@ -131,11 +184,7 @@ fn run_check(args: CheckCommand) -> anyhow::Result<ExitStatus> {
std::mem::forget(db);
if exit_zero {
Ok(ExitStatus::Success)
} else {
Ok(exit_status)
}
Ok(exit_status)
}
#[derive(Copy, Clone)]
@@ -167,18 +216,10 @@ struct MainLoop {
watcher: Option<ProjectWatcher>,
cli_options: Options,
/// The minimum severity to consider an error when deciding the exit status.
///
/// TODO(micha): Get from the terminal settings.
min_error_severity: Severity,
}
impl MainLoop {
fn new(
cli_options: Options,
min_error_severity: Severity,
) -> (Self, MainLoopCancellationToken) {
fn new(cli_options: Options) -> (Self, MainLoopCancellationToken) {
let (sender, receiver) = crossbeam_channel::bounded(10);
(
@@ -187,7 +228,6 @@ impl MainLoop {
receiver,
watcher: None,
cli_options,
min_error_severity,
},
MainLoopCancellationToken { sender },
)
@@ -245,10 +285,7 @@ impl MainLoop {
result,
revision: check_revision,
} => {
let failed = result
.iter()
.any(|diagnostic| diagnostic.severity() >= self.min_error_severity);
let has_diagnostics = !result.is_empty();
if check_revision == revision {
#[allow(clippy::print_stdout)]
for diagnostic in result {
@@ -261,7 +298,7 @@ impl MainLoop {
}
if self.watcher.is_none() {
return if failed {
return if has_diagnostics {
ExitStatus::Failure
} else {
ExitStatus::Success

View File

@@ -1,105 +0,0 @@
//! Code for representing Red Knot's release version number.
use std::fmt;
/// Information about the git repository where Red Knot was built from.
pub(crate) struct CommitInfo {
short_commit_hash: String,
commit_date: String,
commits_since_last_tag: u32,
}
/// Red Knot's version.
pub(crate) struct VersionInfo {
/// Red Knot's version, such as "0.5.1"
version: String,
/// Information about the git commit we may have been built from.
///
/// `None` if not built from a git repo or if retrieval failed.
commit_info: Option<CommitInfo>,
}
impl fmt::Display for VersionInfo {
/// Formatted version information: `<version>[+<commits>] (<commit> <date>)`
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.version)?;
if let Some(ref ci) = self.commit_info {
if ci.commits_since_last_tag > 0 {
write!(f, "+{}", ci.commits_since_last_tag)?;
}
write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?;
}
Ok(())
}
}
/// Returns information about Red Knot's version.
pub(crate) fn version() -> VersionInfo {
// Environment variables are only read at compile-time
macro_rules! option_env_str {
($name:expr) => {
option_env!($name).map(|s| s.to_string())
};
}
// This version is pulled from Cargo.toml and set by Cargo
let version = option_env_str!("CARGO_PKG_VERSION").unwrap();
// Commit info is pulled from git and set by `build.rs`
let commit_info =
option_env_str!("RED_KNOT_COMMIT_SHORT_HASH").map(|short_commit_hash| CommitInfo {
short_commit_hash,
commit_date: option_env_str!("RED_KNOT_COMMIT_DATE").unwrap(),
commits_since_last_tag: option_env_str!("RED_KNOT_LAST_TAG_DISTANCE")
.as_deref()
.map_or(0, |value| value.parse::<u32>().unwrap_or(0)),
});
VersionInfo {
version,
commit_info,
}
}
#[cfg(test)]
mod tests {
use insta::assert_snapshot;
use super::{CommitInfo, VersionInfo};
#[test]
fn version_formatting() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: None,
};
assert_snapshot!(version, @"0.0.0");
}
#[test]
fn version_formatting_with_commit_info() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 0,
}),
};
assert_snapshot!(version, @"0.0.0 (53b0f5d92 2023-10-19)");
}
#[test]
fn version_formatting_with_commits_since_last_tag() {
let version = VersionInfo {
version: "0.0.0".to_string(),
commit_info: Some(CommitInfo {
short_commit_hash: "53b0f5d92".to_string(),
commit_date: "2023-10-19".to_string(),
commits_since_last_tag: 24,
}),
};
assert_snapshot!(version, @"0.0.0+24 (53b0f5d92 2023-10-19)");
}
}

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use insta::internals::SettingsBindDropGuard;
use insta::Settings;
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -28,29 +28,24 @@ fn config_override() -> anyhow::Result<()> {
),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:unresolved-attribute
--> <temp_dir>/test.py:5:7
|
4 | # Access `sys.last_exc` that was only added in Python 3.12
5 | print(sys.last_exc)
| ^^^^^^^^^^^^ Type `<module 'sys'>` has no attribute `last_exc`
|
case.insta_settings().bind(|| {
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[lint:unresolved-attribute] <temp_dir>/test.py:5:7 Type `<module 'sys'>` has no attribute `last_exc`
----- stderr -----
");
----- stderr -----
"###);
assert_cmd_snapshot!(case.command().arg("--python-version").arg("3.12"), @r"
success: true
exit_code: 0
----- stdout -----
assert_cmd_snapshot!(case.command().arg("--python-version").arg("3.12"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
----- stderr -----
");
});
Ok(())
}
@@ -97,31 +92,25 @@ fn cli_arguments_are_relative_to_the_current_directory() -> anyhow::Result<()> {
),
])?;
// Make sure that the CLI fails when the `libs` directory is not in the search path.
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:unresolved-import
--> <temp_dir>/child/test.py:2:1
|
2 | from utils import add
| ^^^^^^^^^^^^^^^^^^^^^ Cannot resolve import `utils`
3 |
4 | stat = add(10, 15)
|
case.insta_settings().bind(|| {
// Make sure that the CLI fails when the `libs` directory is not in the search path.
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")), @r#"
success: false
exit_code: 1
----- stdout -----
error[lint:unresolved-import] <temp_dir>/child/test.py:2:1 Cannot resolve import `utils`
----- stderr -----
"#);
----- stderr -----
"###);
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")).arg("--extra-search-path").arg("../libs"), @r"
success: true
exit_code: 0
----- stdout -----
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")).arg("--extra-search-path").arg("../libs"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
----- stderr -----
");
});
Ok(())
}
@@ -167,20 +156,22 @@ fn paths_in_configuration_files_are_relative_to_the_project_root() -> anyhow::Re
),
])?;
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")), @r"
success: true
exit_code: 0
----- stdout -----
case.insta_settings().bind(|| {
assert_cmd_snapshot!(case.command().current_dir(case.project_dir().join("child")), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
----- stderr -----
");
});
Ok(())
}
/// The rule severity can be changed in the configuration file
#[test]
fn configuration_rule_severity() -> anyhow::Result<()> {
fn rule_severity() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
@@ -193,238 +184,41 @@ fn configuration_rule_severity() -> anyhow::Result<()> {
"#,
)?;
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ^^^^^ Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
case.insta_settings().bind(|| {
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
error[lint:division-by-zero] <temp_dir>/test.py:2:5 Cannot divide object of type `Literal[4]` by zero
warning[lint:possibly-unresolved-reference] <temp_dir>/test.py:7:7 Name `x` used when possibly not defined
warning: lint:possibly-unresolved-reference
--> <temp_dir>/test.py:7:7
|
5 | x = a
6 |
7 | print(x) # possibly-unresolved-reference
| - Name `x` used when possibly not defined
|
----- stderr -----
");
case.write_file("pyproject.toml", r#"
[tool.knot.rules]
division-by-zero = "warn" # demote to warn
possibly-unresolved-reference = "ignore"
"#)?;
----- stderr -----
"###);
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
warning[lint:division-by-zero] <temp_dir>/test.py:2:5 Cannot divide object of type `Literal[4]` by zero
case.write_file(
"pyproject.toml",
r#"
[tool.knot.rules]
division-by-zero = "warn" # demote to warn
possibly-unresolved-reference = "ignore"
"#,
)?;
----- stderr -----
");
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ----- Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
----- stderr -----
"###);
Ok(())
Ok(())
})
}
/// The rule severity can be changed using `--ignore`, `--warn`, and `--error`
/// Red Knot warns about unknown rules
#[test]
fn cli_rule_severity() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
import does_not_exit
y = 4 / 0
for a in range(0, y):
x = a
print(x) # possibly-unresolved-reference
"#,
)?;
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:unresolved-import
--> <temp_dir>/test.py:2:8
|
2 | import does_not_exit
| ^^^^^^^^^^^^^ Cannot resolve import `does_not_exit`
3 |
4 | y = 4 / 0
|
error: lint:division-by-zero
--> <temp_dir>/test.py:4:5
|
2 | import does_not_exit
3 |
4 | y = 4 / 0
| ^^^^^ Cannot divide object of type `Literal[4]` by zero
5 |
6 | for a in range(0, y):
|
warning: lint:possibly-unresolved-reference
--> <temp_dir>/test.py:9:7
|
7 | x = a
8 |
9 | print(x) # possibly-unresolved-reference
| - Name `x` used when possibly not defined
|
----- stderr -----
"###);
assert_cmd_snapshot!(
case
.command()
.arg("--ignore")
.arg("possibly-unresolved-reference")
.arg("--warn")
.arg("division-by-zero")
.arg("--warn")
.arg("unresolved-import"),
@r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:unresolved-import
--> <temp_dir>/test.py:2:8
|
2 | import does_not_exit
| ------------- Cannot resolve import `does_not_exit`
3 |
4 | y = 4 / 0
|
warning: lint:division-by-zero
--> <temp_dir>/test.py:4:5
|
2 | import does_not_exit
3 |
4 | y = 4 / 0
| ----- Cannot divide object of type `Literal[4]` by zero
5 |
6 | for a in range(0, y):
|
----- stderr -----
"###
);
Ok(())
}
/// The rule severity can be changed using `--ignore`, `--warn`, and `--error` and
/// values specified last override previous severities.
#[test]
fn cli_rule_severity_precedence() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
y = 4 / 0
for a in range(0, y):
x = a
print(x) # possibly-unresolved-reference
"#,
)?;
// Assert that there's a possibly unresolved reference diagnostic
// and that division-by-zero has a severity of error by default.
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ^^^^^ Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
warning: lint:possibly-unresolved-reference
--> <temp_dir>/test.py:7:7
|
5 | x = a
6 |
7 | print(x) # possibly-unresolved-reference
| - Name `x` used when possibly not defined
|
----- stderr -----
"###);
assert_cmd_snapshot!(
case
.command()
.arg("--error")
.arg("possibly-unresolved-reference")
.arg("--warn")
.arg("division-by-zero")
// Override the error severity with warning
.arg("--ignore")
.arg("possibly-unresolved-reference"),
@r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:division-by-zero
--> <temp_dir>/test.py:2:5
|
2 | y = 4 / 0
| ----- Cannot divide object of type `Literal[4]` by zero
3 |
4 | for a in range(0, y):
|
----- stderr -----
"###
);
Ok(())
}
/// Red Knot warns about unknown rules specified in a configuration file
#[test]
fn configuration_unknown_rules() -> anyhow::Result<()> {
fn unknown_rules() -> anyhow::Result<()> {
let case = TestCase::with_files([
(
"pyproject.toml",
@@ -436,259 +230,22 @@ fn configuration_unknown_rules() -> anyhow::Result<()> {
("test.py", "print(10)"),
])?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning: unknown-rule
--> <temp_dir>/pyproject.toml:3:1
|
2 | [tool.knot.rules]
3 | division-by-zer = "warn" # incorrect rule name
| --------------- Unknown lint rule `division-by-zer`
|
case.insta_settings().bind(|| {
assert_cmd_snapshot!(case.command(), @r"
success: false
exit_code: 1
----- stdout -----
warning[unknown-rule] <temp_dir>/pyproject.toml:3:1 Unknown lint rule `division-by-zer`
----- stderr -----
"###);
Ok(())
}
/// Red Knot warns about unknown rules specified in a CLI argument
#[test]
fn cli_unknown_rules() -> anyhow::Result<()> {
let case = TestCase::with_file("test.py", "print(10)")?;
assert_cmd_snapshot!(case.command().arg("--ignore").arg("division-by-zer"), @r###"
success: true
exit_code: 0
----- stdout -----
warning: unknown-rule: Unknown lint rule `division-by-zer`
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_only_warnings() -> anyhow::Result<()> {
let case = TestCase::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:1:7
|
1 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_only_info() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
reveal_type(1)
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: true
exit_code: 0
----- stdout -----
info: revealed-type
--> <temp_dir>/test.py:3:1
|
2 | from typing_extensions import reveal_type
3 | reveal_type(1)
| -------------- info: Revealed type is `Literal[1]`
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_only_info_and_error_on_warning_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
from typing_extensions import reveal_type
reveal_type(1)
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: true
exit_code: 0
----- stdout -----
info: revealed-type
--> <temp_dir>/test.py:3:1
|
2 | from typing_extensions import reveal_type
3 | reveal_type(1)
| -------------- info: Revealed type is `Literal[1]`
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_no_errors_but_error_on_warning_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file("test.py", r"print(x) # [unresolved-reference]")?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: false
exit_code: 1
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:1:7
|
1 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_both_warnings_and_errors() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [non-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:2:7
|
2 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
3 | print(4[1]) # [non-subscriptable]
|
error: lint:non-subscriptable
--> <temp_dir>/test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [non-subscriptable]
| ^ Cannot subscript object of type `Literal[4]` with no `__getitem__` method
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_both_warnings_and_errors_and_error_on_warning_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r###"
print(x) # [unresolved-reference]
print(4[1]) # [non-subscriptable]
"###,
)?;
assert_cmd_snapshot!(case.command().arg("--error-on-warning"), @r###"
success: false
exit_code: 1
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:2:7
|
2 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
3 | print(4[1]) # [non-subscriptable]
|
error: lint:non-subscriptable
--> <temp_dir>/test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [non-subscriptable]
| ^ Cannot subscript object of type `Literal[4]` with no `__getitem__` method
|
----- stderr -----
"###);
Ok(())
}
#[test]
fn exit_code_exit_zero_is_true() -> anyhow::Result<()> {
let case = TestCase::with_file(
"test.py",
r#"
print(x) # [unresolved-reference]
print(4[1]) # [non-subscriptable]
"#,
)?;
assert_cmd_snapshot!(case.command().arg("--exit-zero"), @r###"
success: true
exit_code: 0
----- stdout -----
warning: lint:unresolved-reference
--> <temp_dir>/test.py:2:7
|
2 | print(x) # [unresolved-reference]
| - Name `x` used when not defined
3 | print(4[1]) # [non-subscriptable]
|
error: lint:non-subscriptable
--> <temp_dir>/test.py:3:7
|
2 | print(x) # [unresolved-reference]
3 | print(4[1]) # [non-subscriptable]
| ^ Cannot subscript object of type `Literal[4]` with no `__getitem__` method
|
----- stderr -----
"###);
----- stderr -----
");
});
Ok(())
}
struct TestCase {
_temp_dir: TempDir,
_settings_scope: SettingsBindDropGuard,
project_dir: PathBuf,
}
@@ -703,16 +260,9 @@ impl TestCase {
.canonicalize()
.context("Failed to canonicalize project path")?;
let mut settings = insta::Settings::clone_current();
settings.add_filter(&tempdir_filter(&project_dir), "<temp_dir>/");
settings.add_filter(r#"\\(\w\w|\s|\.|")"#, "/$1");
let settings_scope = settings.bind_to_scope();
Ok(Self {
project_dir,
_temp_dir: temp_dir,
_settings_scope: settings_scope,
})
}
@@ -757,9 +307,17 @@ impl TestCase {
&self.project_dir
}
// Returns the insta filters to escape paths in snapshots
fn insta_settings(&self) -> Settings {
let mut settings = insta::Settings::clone_current();
settings.add_filter(&tempdir_filter(&self.project_dir), "<temp_dir>/");
settings.add_filter(r#"\\(\w\w|\s|\.|")"#, "/$1");
settings
}
fn command(&self) -> Command {
let mut command = Command::new(get_cargo_bin("red_knot"));
command.current_dir(&self.project_dir).arg("check");
command.current_dir(&self.project_dir);
command
}
}

View File

@@ -47,7 +47,7 @@ impl TestCase {
#[track_caller]
fn panic_with_formatted_events(events: Vec<ChangeEvent>) -> Vec<ChangeEvent> {
panic!(
"Didn't observe the expected event. The following events occurred:\n{}",
"Didn't observe expected change:\n{}",
events
.into_iter()
.map(|event| format!(" - {event:?}"))

View File

@@ -149,16 +149,6 @@ impl Options {
format!("Unknown lint rule `{rule_name}`"),
Severity::Warning,
),
GetLintError::PrefixedWithCategory { suggestion, .. } => {
OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!(
"Unknown lint rule `{rule_name}`. Did you mean `{suggestion}`?"
),
Severity::Warning,
)
}
GetLintError::Removed(_) => OptionDiagnostic::new(
DiagnosticId::UnknownRule,
format!("Unknown lint rule `{rule_name}`"),
@@ -216,16 +206,6 @@ pub struct Rules {
inner: FxHashMap<RangedValue<String>, RangedValue<Level>>,
}
impl FromIterator<(RangedValue<String>, RangedValue<Level>)> for Rules {
fn from_iter<T: IntoIterator<Item = (RangedValue<String>, RangedValue<Level>)>>(
iter: T,
) -> Self {
Self {
inner: iter.into_iter().collect(),
}
}
}
#[derive(Error, Debug)]
pub enum KnotTomlError {
#[error(transparent)]

View File

@@ -2,9 +2,7 @@
## Deferred annotations in stubs always resolve
`mod.pyi`:
```pyi
```pyi path=mod.pyi
def get_foo() -> Foo: ...
class Foo: ...
```

View File

@@ -106,7 +106,7 @@ def union_example(
Literal["B"],
Literal[True],
None,
],
]
):
reveal_type(x) # revealed: Unknown | Literal[-1, "A", b"A", b"\x00", b"\x07", 0, 1, "B", "foo", "bar", True] | None
```
@@ -116,9 +116,7 @@ def union_example(
Only Literal that is defined in typing and typing_extension modules is detected as the special
Literal.
`other.pyi`:
```pyi
```pyi path=other.pyi
from typing import _SpecialForm
Literal: _SpecialForm

View File

@@ -25,9 +25,7 @@ x = "foo" # error: [invalid-assignment] "Object of type `Literal["foo"]` is not
## Tuple annotations are understood
`module.py`:
```py
```py path=module.py
from typing_extensions import Unpack
a: tuple[()] = ()
@@ -42,9 +40,7 @@ i: tuple[str | int, str | int] = (42, 42)
j: tuple[str | int] = (42,)
```
`script.py`:
```py
```py path=script.py
from module import a, b, c, d, e, f, g, h, i, j
reveal_type(a) # revealed: tuple[()]
@@ -118,7 +114,7 @@ reveal_type(x) # revealed: Foo
## Annotations in stub files are deferred
```pyi
```pyi path=main.pyi
x: Foo
class Foo: ...
@@ -129,7 +125,7 @@ reveal_type(x) # revealed: Foo
## Annotated assignments in stub files are inferred correctly
```pyi
```pyi path=main.pyi
x: int = 1
reveal_type(x) # revealed: Literal[1]
```

View File

@@ -13,90 +13,123 @@ accessed on the class itself.
```py
class C:
def __init__(self, param: int | None, flag: bool = False) -> None:
value = 1 if flag else "a"
self.inferred_from_value = value
self.inferred_from_other_attribute = self.inferred_from_value
self.inferred_from_param = param
self.declared_only: bytes
self.declared_and_bound: bool = True
def __init__(self, value2: int, flag: bool = False) -> None:
# bound but not declared
self.pure_instance_variable1 = "value set in __init__"
# bound but not declared - with type inferred from parameter
self.pure_instance_variable2 = value2
# declared but not bound
self.pure_instance_variable3: bytes
# declared and bound
self.pure_instance_variable4: bool = True
# possibly undeclared/unbound
if flag:
self.possibly_undeclared_unbound: str = "possibly set in __init__"
self.pure_instance_variable5: str = "possibly set in __init__"
c_instance = C(1)
reveal_type(c_instance.inferred_from_value) # revealed: Unknown | Literal[1, "a"]
# TODO: should be `Literal["value set in __init__"]`, or `Unknown | Literal[…]` to allow
# assignments to this unannotated attribute from other scopes.
reveal_type(c_instance.pure_instance_variable1) # revealed: @Todo(implicit instance attribute)
# TODO: Same here. This should be `Unknown | Literal[1, "a"]`
reveal_type(c_instance.inferred_from_other_attribute) # revealed: Unknown
# TODO: should be `int`
reveal_type(c_instance.pure_instance_variable2) # revealed: @Todo(implicit instance attribute)
# TODO: should be `int | None`
reveal_type(c_instance.inferred_from_param) # revealed: Unknown | int | None
# TODO: should be `bytes`
reveal_type(c_instance.pure_instance_variable3) # revealed: @Todo(implicit instance attribute)
reveal_type(c_instance.declared_only) # revealed: bytes
reveal_type(c_instance.declared_and_bound) # revealed: bool
# TODO: should be `bool`
reveal_type(c_instance.pure_instance_variable4) # revealed: @Todo(implicit instance attribute)
# TODO: should be `str`
# We probably don't want to emit a diagnostic for this being possibly undeclared/unbound.
# mypy and pyright do not show an error here.
reveal_type(c_instance.possibly_undeclared_unbound) # revealed: str
reveal_type(c_instance.pure_instance_variable5) # revealed: @Todo(implicit instance attribute)
# This assignment is fine, as we infer `Unknown | Literal[1, "a"]` for `inferred_from_value`.
c_instance.inferred_from_value = "value set on instance"
# This assignment is also fine:
c_instance.inferred_from_param = None
# TODO: If we choose to infer a precise `Literal[…]` type for the instance attribute (see
# above), this should be an error: incompatible types in assignment. If we choose to infer
# a gradual `Unknown | Literal[…]` type, this assignment is fine.
c_instance.pure_instance_variable1 = "value set on instance"
# TODO: this should be an error (incompatible types in assignment)
c_instance.inferred_from_param = "incompatible"
c_instance.pure_instance_variable2 = "incompatible"
# TODO: we already show an error here but the message might be improved?
# mypy shows no error here, but pyright raises "reportAttributeAccessIssue"
# error: [unresolved-attribute] "Type `Literal[C]` has no attribute `inferred_from_value`"
reveal_type(C.inferred_from_value) # revealed: Unknown
# error: [unresolved-attribute] "Type `Literal[C]` has no attribute `pure_instance_variable1`"
reveal_type(C.pure_instance_variable1) # revealed: Unknown
# TODO: this should be an error (pure instance variables cannot be accessed on the class)
# mypy shows no error here, but pyright raises "reportAttributeAccessIssue"
C.inferred_from_value = "overwritten on class"
C.pure_instance_variable1 = "overwritten on class"
# This assignment is fine:
c_instance.declared_and_bound = False
c_instance.pure_instance_variable4 = False
# TODO: After this assignment to the attribute within this scope, we may eventually want to narrow
# the `bool` type (see above) for this instance variable to `Literal[False]` here. This is unsound
# in general (we don't know what else happened to `c_instance` between the assignment and the use
# here), but mypy and pyright support this. In conclusion, this could be `bool` but should probably
# be `Literal[False]`.
reveal_type(c_instance.declared_and_bound) # revealed: bool
reveal_type(c_instance.pure_instance_variable4) # revealed: @Todo(implicit instance attribute)
```
#### Variable declared in class body and possibly bound in `__init__`
#### Variable declared in class body and declared/bound in `__init__`
The same rule applies even if the variable is *declared* (not bound!) in the class body: it is still
a pure instance variable.
```py
class C:
declared_and_bound: str | None
pure_instance_variable: str
def __init__(self) -> None:
self.declared_and_bound = "value set in __init__"
self.pure_instance_variable = "value set in __init__"
c_instance = C()
reveal_type(c_instance.declared_and_bound) # revealed: str | None
reveal_type(c_instance.pure_instance_variable) # revealed: str
# TODO: we currently plan to emit a diagnostic here. Note that both mypy
# and pyright show no error in this case! So we may reconsider this in
# the future, if it turns out to produce too many false positives.
reveal_type(C.declared_and_bound) # revealed: str | None
reveal_type(C.pure_instance_variable) # revealed: str
# TODO: same as above. We plan to emit a diagnostic here, even if both mypy
# and pyright allow this.
C.declared_and_bound = "overwritten on class"
C.pure_instance_variable = "overwritten on class"
# error: [invalid-assignment] "Object of type `Literal[1]` is not assignable to attribute `declared_and_bound` of type `str | None`"
c_instance.declared_and_bound = 1
# error: [invalid-assignment] "Object of type `Literal[1]` is not assignable to attribute `pure_instance_variable` of type `str`"
c_instance.pure_instance_variable = 1
```
#### Variable only defined in unrelated method
We also recognize pure instance variables if they are defined in a method that is not `__init__`.
```py
class C:
def set_instance_variable(self) -> None:
self.pure_instance_variable = "value set in method"
c_instance = C()
# Not that we would use this in static analysis, but for a more realistic example, let's actually
# call the method, so that the attribute is bound if this example is actually run.
c_instance.set_instance_variable()
# TODO: should be `Literal["value set in method"]` or `Unknown | Literal[…]` (see above).
reveal_type(c_instance.pure_instance_variable) # revealed: @Todo(implicit instance attribute)
# TODO: We already show an error here, but the message might be improved?
# error: [unresolved-attribute]
reveal_type(C.pure_instance_variable) # revealed: Unknown
# TODO: this should be an error
C.pure_instance_variable = "overwritten on class"
```
#### Variable declared in class body and not bound anywhere
@@ -106,270 +139,18 @@ instance variable and allow access to it via instances.
```py
class C:
only_declared: str
pure_instance_variable: str
c_instance = C()
reveal_type(c_instance.only_declared) # revealed: str
reveal_type(c_instance.pure_instance_variable) # revealed: str
# TODO: mypy and pyright do not show an error here, but we plan to emit a diagnostic.
# The type could be changed to 'Unknown' if we decide to emit an error?
reveal_type(C.only_declared) # revealed: str
reveal_type(C.pure_instance_variable) # revealed: str
# TODO: mypy and pyright do not show an error here, but we plan to emit one.
C.only_declared = "overwritten on class"
```
#### Mixed declarations/bindings in class body and `__init__`
```py
class C:
only_declared_in_body: str | None
declared_in_body_and_init: str | None
declared_in_body_defined_in_init: str | None
bound_in_body_declared_in_init = "a"
bound_in_body_and_init = None
def __init__(self, flag) -> None:
self.only_declared_in_init: str | None
self.declared_in_body_and_init: str | None = None
self.declared_in_body_defined_in_init = "a"
self.bound_in_body_declared_in_init: str | None
if flag:
self.bound_in_body_and_init = "a"
c_instance = C(True)
reveal_type(c_instance.only_declared_in_body) # revealed: str | None
reveal_type(c_instance.only_declared_in_init) # revealed: str | None
reveal_type(c_instance.declared_in_body_and_init) # revealed: str | None
reveal_type(c_instance.declared_in_body_defined_in_init) # revealed: str | None
reveal_type(c_instance.bound_in_body_declared_in_init) # revealed: str | None
reveal_type(c_instance.bound_in_body_and_init) # revealed: Unknown | None | Literal["a"]
```
#### Variable defined in non-`__init__` method
We also recognize pure instance variables if they are defined in a method that is not `__init__`.
```py
class C:
def __init__(self, param: int | None, flag: bool = False) -> None:
self.initialize(param, flag)
def initialize(self, param: int | None, flag: bool) -> None:
value = 1 if flag else "a"
self.inferred_from_value = value
self.inferred_from_other_attribute = self.inferred_from_value
self.inferred_from_param = param
self.declared_only: bytes
self.declared_and_bound: bool = True
c_instance = C(1)
reveal_type(c_instance.inferred_from_value) # revealed: Unknown | Literal[1, "a"]
# TODO: Should be `Unknown | Literal[1, "a"]`
reveal_type(c_instance.inferred_from_other_attribute) # revealed: Unknown
# TODO: Should be `int | None`
reveal_type(c_instance.inferred_from_param) # revealed: Unknown | int | None
reveal_type(c_instance.declared_only) # revealed: bytes
reveal_type(c_instance.declared_and_bound) # revealed: bool
# TODO: We already show an error here, but the message might be improved?
# error: [unresolved-attribute]
reveal_type(C.inferred_from_value) # revealed: Unknown
# TODO: this should be an error
C.inferred_from_value = "overwritten on class"
```
#### Variable defined in multiple methods
If we see multiple un-annotated assignments to a single attribute (`self.x` below), we build the
union of all inferred types (and `Unknown`). If we see multiple conflicting declarations of the same
attribute, that should be an error.
```py
def get_int() -> int:
return 0
def get_str() -> str:
return "a"
class C:
def __init__(self) -> None:
self.x = get_int()
self.y: int = 1
def other_method(self):
self.x = get_str()
# TODO: this redeclaration should be an error
self.y: str = "a"
c_instance = C()
reveal_type(c_instance.x) # revealed: Unknown | int | str
# TODO: We should probably infer `int | str` here.
reveal_type(c_instance.y) # revealed: int
```
#### Attributes defined in tuple unpackings
```py
def returns_tuple() -> tuple[int, str]:
return (1, "a")
class C:
a1, b1 = (1, "a")
c1, d1 = returns_tuple()
def __init__(self) -> None:
self.a2, self.b2 = (1, "a")
self.c2, self.d2 = returns_tuple()
c_instance = C()
reveal_type(c_instance.a1) # revealed: Unknown | Literal[1]
reveal_type(c_instance.b1) # revealed: Unknown | Literal["a"]
reveal_type(c_instance.c1) # revealed: Unknown | int
reveal_type(c_instance.d1) # revealed: Unknown | str
# TODO: This should be supported (no error; type should be: `Unknown | Literal[1]`)
# error: [unresolved-attribute]
reveal_type(c_instance.a2) # revealed: Unknown
# TODO: This should be supported (no error; type should be: `Unknown | Literal["a"]`)
# error: [unresolved-attribute]
reveal_type(c_instance.b2) # revealed: Unknown
# TODO: Similar for these two (should be `Unknown | int` and `Unknown | str`, respectively)
# error: [unresolved-attribute]
reveal_type(c_instance.c2) # revealed: Unknown
# error: [unresolved-attribute]
reveal_type(c_instance.d2) # revealed: Unknown
```
#### Attributes defined in for-loop (unpacking)
```py
class IntIterator:
def __next__(self) -> int:
return 1
class IntIterable:
def __iter__(self) -> IntIterator:
return IntIterator()
class TupleIterator:
def __next__(self) -> tuple[int, str]:
return (1, "a")
class TupleIterable:
def __iter__(self) -> TupleIterator:
return TupleIterator()
class C:
def __init__(self):
for self.x in IntIterable():
pass
for _, self.y in TupleIterable():
pass
# TODO: Pyright fully supports these, mypy detects the presence of the attributes,
# but infers type `Any` for both of them. We should infer `int` and `str` here:
# error: [unresolved-attribute]
reveal_type(C().x) # revealed: Unknown
# error: [unresolved-attribute]
reveal_type(C().y) # revealed: Unknown
```
#### Conditionally declared / bound attributes
We currently do not raise a diagnostic or change behavior if an attribute is only conditionally
defined. This is consistent with what mypy and pyright do.
```py
def flag() -> bool:
return True
class C:
def f(self) -> None:
if flag():
self.a1: str | None = "a"
self.b1 = 1
if flag():
def f(self) -> None:
self.a2: str | None = "a"
self.b2 = 1
c_instance = C()
reveal_type(c_instance.a1) # revealed: str | None
reveal_type(c_instance.a2) # revealed: str | None
reveal_type(c_instance.b1) # revealed: Unknown | Literal[1]
reveal_type(c_instance.b2) # revealed: Unknown | Literal[1]
```
#### Methods that does not use `self` as a first parameter
```py
class C:
# This might trigger a stylistic lint like `invalid-first-argument-name-for-method`, but
# it should be supported in general:
def __init__(this) -> None:
this.declared_and_bound: str | None = "a"
reveal_type(C().declared_and_bound) # revealed: str | None
```
#### Aliased `self` parameter
```py
class C:
def __init__(self) -> None:
this = self
this.declared_and_bound: str | None = "a"
# This would ideally be `str | None`, but mypy/pyright don't support this either,
# so `Unknown` + a diagnostic is also fine.
# error: [unresolved-attribute]
reveal_type(C().declared_and_bound) # revealed: Unknown
```
#### Attributes defined in statically-known-to-be-false branches
```py
class C:
def __init__(self) -> None:
# We use a "significantly complex" condition here (instead of just `False`)
# for a proper comparison with mypy and pyright, which distinguish between
# conditions that can be resolved from a simple pattern matching and those
# that need proper type inference.
if (2 + 3) < 4:
self.x: str = "a"
# TODO: Ideally, this would result in a `unresolved-attribute` error. But mypy and pyright
# do not support this either (for conditions that can only be resolved to `False` in type
# inference), so it does not seem to be particularly important.
reveal_type(C().x) # revealed: str
C.pure_instance_variable = "overwritten on class"
```
### Pure class variables (`ClassVar`)
@@ -446,7 +227,7 @@ reveal_type(C.pure_class_variable) # revealed: Unknown
c_instance = C()
# TODO: should be `Literal["overwritten on class"]`
reveal_type(c_instance.pure_class_variable) # revealed: Unknown | Literal["value set in class method"]
reveal_type(c_instance.pure_class_variable) # revealed: @Todo(implicit instance attribute)
# TODO: should raise an error.
c_instance.pure_class_variable = "value set on instance"
@@ -496,53 +277,6 @@ reveal_type(C.variable_with_class_default1) # revealed: str
reveal_type(c_instance.variable_with_class_default1) # revealed: str
```
### Inheritance of class/instance attributes
#### Instance variable defined in a base class
```py
class Base:
declared_in_body: int | None = 1
base_class_attribute_1: str | None
base_class_attribute_2: str | None
base_class_attribute_3: str | None
def __init__(self) -> None:
self.defined_in_init: str | None = "value in base"
class Intermediate(Base):
# Re-declaring base class attributes with the *same *type is fine:
base_class_attribute_1: str | None = None
# Re-declaring them with a *narrower type* is unsound, because modifications
# through a `Base` reference could violate that constraint.
#
# Mypy does not report an error here, but pyright does: "… overrides symbol
# of same name in class "Base". Variable is mutable so its type is invariant"
#
# We should introduce a diagnostic for this. Whether or not that should be
# enabled by default can still be discussed.
#
# TODO: This should be an error
base_class_attribute_2: str
# Re-declaring attributes with a *wider type* directly violates LSP.
#
# In this case, both mypy and pyright report an error.
#
# TODO: This should be an error
base_class_attribute_3: str | int | None
class Derived(Intermediate): ...
reveal_type(Derived.declared_in_body) # revealed: int | None
reveal_type(Derived().declared_in_body) # revealed: int | None
reveal_type(Derived().defined_in_init) # revealed: str | None
```
## Union of attributes
```py
@@ -703,9 +437,7 @@ reveal_type(Foo.__class__) # revealed: Literal[type]
## Module attributes
`mod.py`:
```py
```py path=mod.py
global_symbol: str = "a"
```
@@ -739,19 +471,13 @@ for mod.global_symbol in IntIterable():
## Nested attributes
`outer/__init__.py`:
```py
```py path=outer/__init__.py
```
`outer/nested/__init__.py`:
```py
```py path=outer/nested/__init__.py
```
`outer/nested/inner.py`:
```py
```py path=outer/nested/inner.py
class Outer:
class Nested:
class Inner:
@@ -774,9 +500,7 @@ outer.nested.inner.Outer.Nested.Inner.attr = "a"
Most attribute accesses on function-literal types are delegated to `types.FunctionType`, since all
functions are instances of that class:
`a.py`:
```py
```py path=a.py
def f(): ...
reveal_type(f.__defaults__) # revealed: @Todo(full tuple[...] support) | None
@@ -785,9 +509,7 @@ reveal_type(f.__kwdefaults__) # revealed: @Todo(generics) | None
Some attributes are special-cased, however:
`b.py`:
```py
```py path=b.py
def f(): ...
reveal_type(f.__get__) # revealed: @Todo(`__get__` method on functions)
@@ -799,18 +521,14 @@ reveal_type(f.__call__) # revealed: @Todo(`__call__` method on functions)
Most attribute accesses on int-literal types are delegated to `builtins.int`, since all literal
integers are instances of that class:
`a.py`:
```py
```py path=a.py
reveal_type((2).bit_length) # revealed: @Todo(bound method)
reveal_type((2).denominator) # revealed: @Todo(@property)
```
Some attributes are special-cased, however:
`b.py`:
```py
```py path=b.py
reveal_type((2).numerator) # revealed: Literal[2]
reveal_type((2).real) # revealed: Literal[2]
```
@@ -820,18 +538,14 @@ reveal_type((2).real) # revealed: Literal[2]
Most attribute accesses on bool-literal types are delegated to `builtins.bool`, since all literal
bols are instances of that class:
`a.py`:
```py
```py path=a.py
reveal_type(True.__and__) # revealed: @Todo(bound method)
reveal_type(False.__or__) # revealed: @Todo(bound method)
```
Some attributes are special-cased, however:
`b.py`:
```py
```py path=b.py
reveal_type(True.numerator) # revealed: Literal[1]
reveal_type(False.real) # revealed: Literal[0]
```
@@ -845,90 +559,6 @@ reveal_type(b"foo".join) # revealed: @Todo(bound method)
reveal_type(b"foo".endswith) # revealed: @Todo(bound method)
```
## Instance attribute edge cases
### Assignment to attribute that does not correspond to the instance
```py
class Other:
x: int = 1
class C:
def __init__(self, other: Other) -> None:
other.x = 1
def f(c: C):
# error: [unresolved-attribute]
reveal_type(c.x) # revealed: Unknown
```
### Nested classes
```py
class Outer:
def __init__(self):
self.x: int = 1
class Middle:
# has no 'x' attribute
class Inner:
def __init__(self):
self.x: str = "a"
reveal_type(Outer().x) # revealed: int
# error: [unresolved-attribute]
Outer.Middle().x
reveal_type(Outer.Middle.Inner().x) # revealed: str
```
### Shadowing of `self`
```py
class Other:
x: int = 1
class C:
def __init__(self) -> None:
# Redeclaration of self. `self` does not refer to the instance anymore.
self: Other = Other()
self.x: int = 1
# TODO: this should be an error
C().x
```
### Assignment to `self` after nested function
```py
class Other:
x: str = "a"
class C:
def __init__(self) -> None:
def nested_function(self: Other):
self.x = "b"
self.x: int = 1
reveal_type(C().x) # revealed: int
```
### Assignment to `self` from nested function
```py
class C:
def __init__(self) -> None:
def set_attribute(value: str):
self.x: str = value
set_attribute("a")
# TODO: ideally, this would be `str`. Mypy supports this, pyright does not.
# error: [unresolved-attribute]
reveal_type(C().x) # revealed: Unknown
```
## References
Some of the tests in the *Class and instance variables* section draw inspiration from

View File

@@ -3,8 +3,6 @@
## Class instances
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
@@ -138,8 +136,6 @@ reveal_type(No() // Yes()) # revealed: Unknown
## Subclass reflections override superclass dunders
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
@@ -298,8 +294,6 @@ itself. (For these operators to work on the class itself, they would have to be
class's type, i.e. `type`.)
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"
@@ -318,8 +312,6 @@ reveal_type(No + No) # revealed: Unknown
## Subclass
```py
from typing import Literal
class Yes:
def __add__(self, other) -> Literal["+"]:
return "+"

View File

@@ -1,6 +1,6 @@
# Boundness and declaredness: public uses
This document demonstrates how type-inference and diagnostics work for *public* uses of a symbol,
This document demonstrates how type-inference and diagnostics works for *public* uses of a symbol,
that is, a use of a symbol from another scope. If a symbol has a declared type in its local scope
(e.g. `int`), we use that as the symbol's "public type" (the type of the symbol from the perspective
of other scopes) even if there is a more precise local inferred type for the symbol (`Literal[1]`).
@@ -34,28 +34,20 @@ In particular, we should raise errors in the "possibly-undeclared-and-unbound" a
### Declared and bound
If a symbol has a declared type (`int`), we use that even if there is a more precise inferred type
(`Literal[1]`), or a conflicting inferred type (`str` vs. `Literal[2]` below):
(`Literal[1]`), or a conflicting inferred type (`Literal[2]`):
`mod.py`:
```py path=mod.py
x: int = 1
```py
from typing import Any
def any() -> Any: ...
a: int = 1
b: str = 2 # error: [invalid-assignment]
c: Any = 3
d: int = any()
# error: [invalid-assignment]
y: str = 2
```
```py
from mod import a, b, c, d
from mod import x, y
reveal_type(a) # revealed: int
reveal_type(b) # revealed: str
reveal_type(c) # revealed: Any
reveal_type(d) # revealed: int
reveal_type(x) # revealed: int
reveal_type(y) # revealed: str
```
### Declared and possibly unbound
@@ -63,33 +55,22 @@ reveal_type(d) # revealed: int
If a symbol is declared and *possibly* unbound, we trust that other module and use the declared type
without raising an error.
`mod.py`:
```py
from typing import Any
def any() -> Any: ...
```py path=mod.py
def flag() -> bool: ...
a: int
b: str
c: Any
d: int
x: int
y: str
if flag:
a = 1
b = 2 # error: [invalid-assignment]
c = 3
d = any()
x = 1
# error: [invalid-assignment]
y = 2
```
```py
from mod import a, b, c, d
from mod import x, y
reveal_type(a) # revealed: int
reveal_type(b) # revealed: str
reveal_type(c) # revealed: Any
reveal_type(d) # revealed: int
reveal_type(x) # revealed: int
reveal_type(y) # revealed: str
```
### Declared and unbound
@@ -97,20 +78,14 @@ reveal_type(d) # revealed: int
Similarly, if a symbol is declared but unbound, we do not raise an error. We trust that this symbol
is available somehow and simply use the declared type.
`mod.py`:
```py
from typing import Any
a: int
b: Any
```py path=mod.py
x: int
```
```py
from mod import a, b
from mod import x
reveal_type(a) # revealed: int
reveal_type(b) # revealed: Any
reveal_type(x) # revealed: int
```
## Possibly undeclared
@@ -120,70 +95,61 @@ reveal_type(b) # revealed: Any
If a symbol is possibly undeclared but definitely bound, we use the union of the declared and
inferred types:
`mod.py`:
```py
```py path=mod.py
from typing import Any
def any() -> Any: ...
def flag() -> bool: ...
a = 1
b = 2
c = 3
d = any()
x = 1
y = 2
z = 3
if flag():
a: int
b: Any
c: str # error: [invalid-declaration]
d: int
x: int
y: Any
# error: [invalid-declaration]
z: str
```
```py
from mod import a, b, c, d
from mod import x, y, z
reveal_type(a) # revealed: int
reveal_type(b) # revealed: Literal[2] | Any
reveal_type(c) # revealed: Literal[3] | Unknown
reveal_type(d) # revealed: Any | int
reveal_type(x) # revealed: int
reveal_type(y) # revealed: Literal[2] | Any
reveal_type(z) # revealed: Literal[3] | Unknown
# External modifications of `a` that violate the declared type are not allowed:
# External modifications of `x` that violate the declared type are not allowed:
# error: [invalid-assignment]
a = None
x = None
```
### Possibly undeclared and possibly unbound
If a symbol is possibly undeclared and possibly unbound, we also use the union of the declared and
inferred types. This case is interesting because the "possibly declared" definition might not be the
same as the "possibly bound" definition (symbol `b`). Note that we raise a `possibly-unbound-import`
error for both `a` and `b`:
`mod.py`:
```py
from typing import Any
same as the "possibly bound" definition (symbol `y`). Note that we raise a `possibly-unbound-import`
error for both `x` and `y`:
```py path=mod.py
def flag() -> bool: ...
if flag():
a: Any = 1
b = 2
x: Any = 1
y = 2
else:
b: str
y: str
```
```py
# error: [possibly-unbound-import]
# error: [possibly-unbound-import]
from mod import a, b
from mod import x, y
reveal_type(a) # revealed: Literal[1] | Any
reveal_type(b) # revealed: Literal[2] | str
reveal_type(x) # revealed: Literal[1] | Any
reveal_type(y) # revealed: Literal[2] | str
# External modifications of `b` that violate the declared type are not allowed:
# External modifications of `y` that violate the declared type are not allowed:
# error: [invalid-assignment]
b = None
y = None
```
### Possibly undeclared and unbound
@@ -191,53 +157,40 @@ b = None
If a symbol is possibly undeclared and definitely unbound, we currently do not raise an error. This
seems inconsistent when compared to the case just above.
`mod.py`:
```py
```py path=mod.py
def flag() -> bool: ...
if flag():
a: int
x: int
```
```py
# TODO: this should raise an error. Once we fix this, update the section description and the table
# on top of this document.
from mod import a
from mod import x
reveal_type(a) # revealed: int
reveal_type(x) # revealed: int
# External modifications to `a` that violate the declared type are not allowed:
# External modifications to `x` that violate the declared type are not allowed:
# error: [invalid-assignment]
a = None
x = None
```
## Undeclared
### Undeclared but bound
If a symbol is *undeclared*, we use the union of `Unknown` with the inferred type. Note that we
treat this case differently from the case where a symbol is implicitly declared with `Unknown`,
possibly due to the usage of an unknown name in the annotation:
`mod.py`:
```py
# Undeclared:
a = 1
# Implicitly declared with `Unknown`, due to the usage of an unknown name in the annotation:
b: SomeUnknownName = 1 # error: [unresolved-reference]
```py path=mod.py
x = 1
```
```py
from mod import a, b
from mod import x
reveal_type(a) # revealed: Unknown | Literal[1]
reveal_type(b) # revealed: Unknown
reveal_type(x) # revealed: Unknown | Literal[1]
# All external modifications of `a` are allowed:
a = None
# All external modifications of `x` are allowed:
x = None
```
### Undeclared and possibly unbound
@@ -245,45 +198,39 @@ a = None
If a symbol is undeclared and *possibly* unbound, we currently do not raise an error. This seems
inconsistent when compared to the "possibly-undeclared-and-possibly-unbound" case.
`mod.py`:
```py
```py path=mod.py
def flag() -> bool: ...
if flag:
a = 1
b: SomeUnknownName = 1 # error: [unresolved-reference]
x = 1
```
```py
# TODO: this should raise an error. Once we fix this, update the section description and the table
# on top of this document.
from mod import a, b
from mod import x
reveal_type(a) # revealed: Unknown | Literal[1]
reveal_type(b) # revealed: Unknown
reveal_type(x) # revealed: Unknown | Literal[1]
# All external modifications of `a` are allowed:
a = None
# All external modifications of `x` are allowed:
x = None
```
### Undeclared and unbound
If a symbol is undeclared *and* unbound, we infer `Unknown` and raise an error.
`mod.py`:
```py
```py path=mod.py
if False:
a: int = 1
x: int = 1
```
```py
# error: [unresolved-import]
from mod import a
from mod import x
reveal_type(a) # revealed: Unknown
reveal_type(x) # revealed: Unknown
# Modifications allowed in this case:
a = None
x = None
```

View File

@@ -6,8 +6,6 @@ If we have an intersection type `A & B` and we get a definitive true/false answe
types, we can infer that the result for the intersection type is also true/false:
```py
from typing import Literal
class Base: ...
class Child1(Base):

View File

@@ -33,9 +33,7 @@ reveal_type(a >= b) # revealed: Literal[False]
Even when tuples have different lengths, comparisons should be handled appropriately.
`different_length.py`:
```py
```py path=different_length.py
a = (1, 2, 3)
b = (1, 2, 3, 4)
@@ -104,9 +102,7 @@ reveal_type(a >= b) # revealed: bool
However, if the lexicographic comparison completes without reaching a point where str and int are
compared, Python will still produce a result based on the prior elements.
`short_circuit.py`:
```py
```py path=short_circuit.py
a = (1, 2)
b = (999999, "hello")

View File

@@ -78,7 +78,7 @@ def _(a: type[Unknown], b: type[Any]):
Tuple types with the same elements are the same.
```py
from typing_extensions import Any, assert_type
from typing_extensions import assert_type
from knot_extensions import Unknown

View File

@@ -29,9 +29,7 @@ completing. The type of `x` at the beginning of the `except` suite in this examp
`x = could_raise_returns_str()` redefinition, but we *also* could have jumped to the `except` suite
*after* that redefinition.
`union_type_inferred.py`:
```py
```py path=union_type_inferred.py
def could_raise_returns_str() -> str:
return "foo"
@@ -52,9 +50,7 @@ reveal_type(x) # revealed: str | Literal[2]
If `x` has the same type at the end of both branches, however, the branches unify and `x` is not
inferred as having a union type following the `try`/`except` block:
`branches_unify_to_non_union_type.py`:
```py
```py path=branches_unify_to_non_union_type.py
def could_raise_returns_str() -> str:
return "foo"
@@ -137,9 +133,7 @@ the `except` suite:
- At the end of `else`, `x == 3`
- At the end of `except`, `x == 2`
`single_except.py`:
```py
```py path=single_except.py
def could_raise_returns_str() -> str:
return "foo"
@@ -198,9 +192,7 @@ A `finally` suite is *always* executed. As such, if we reach the `reveal_type` c
this example, we know that `x` *must* have been reassigned to `2` during the `finally` suite. The
type of `x` at the end of the example is therefore `Literal[2]`:
`redef_in_finally.py`:
```py
```py path=redef_in_finally.py
def could_raise_returns_str() -> str:
return "foo"
@@ -225,9 +217,7 @@ at this point than there were when we were inside the `finally` block.
(Our current model does *not* correctly infer the types *inside* `finally` suites, however; this is
still a TODO item for us.)
`no_redef_in_finally.py`:
```py
```py path=no_redef_in_finally.py
def could_raise_returns_str() -> str:
return "foo"
@@ -259,9 +249,7 @@ suites:
exception raised in the `except` suite to cause us to jump to the `finally` suite before the
`except` suite ran to completion
`redef_in_finally.py`:
```py
```py path=redef_in_finally.py
def could_raise_returns_str() -> str:
return "foo"
@@ -298,9 +286,7 @@ itself. (In some control-flow possibilities, some exceptions were merely *suspen
`finally` suite; these lead to the scope's termination following the conclusion of the `finally`
suite.)
`no_redef_in_finally.py`:
```py
```py path=no_redef_in_finally.py
def could_raise_returns_str() -> str:
return "foo"
@@ -331,9 +317,7 @@ reveal_type(x) # revealed: str | bool
An example with multiple `except` branches and a `finally` branch:
`multiple_except_branches.py`:
```py
```py path=multiple_except_branches.py
def could_raise_returns_str() -> str:
return "foo"
@@ -380,9 +364,7 @@ If the exception handler has an `else` branch, we must also take into account th
control flow could have jumped to the `finally` suite from partway through the `else` suite due to
an exception raised *there*.
`single_except_branch.py`:
```py
```py path=single_except_branch.py
def could_raise_returns_str() -> str:
return "foo"
@@ -425,9 +407,7 @@ reveal_type(x) # revealed: bool | float
The same again, this time with multiple `except` branches:
`multiple_except_branches.py`:
```py
```py path=multiple_except_branches.py
def could_raise_returns_str() -> str:
return "foo"

View File

@@ -54,9 +54,7 @@ reveal_type("x" or "y" and "") # revealed: Literal["x"]
## Evaluates to builtin
`a.py`:
```py
```py path=a.py
redefined_builtin_bool: type[bool] = bool
def my_bool(x) -> bool:

View File

@@ -28,8 +28,6 @@ reveal_type(1 if 0 else 2) # revealed: Literal[2]
The test inside an if expression should not affect code outside of the expression.
```py
from typing import Literal
def _(flag: bool):
x: Literal[42, "hello"] = 42 if flag else "hello"

View File

@@ -51,7 +51,7 @@ In type stubs, classes can reference themselves in their base class definitions.
This should hold true even with generics at play.
```pyi
```py path=a.pyi
class Seq[T]: ...
# TODO not error on the subscripting

View File

@@ -9,9 +9,7 @@ E = D
reveal_type(E) # revealed: Literal[C]
```
`b.py`:
```py
```py path=b.py
class C: ...
```
@@ -24,9 +22,7 @@ D = b.C
reveal_type(D) # revealed: Literal[C]
```
`b.py`:
```py
```py path=b.py
class C: ...
```
@@ -38,14 +34,10 @@ import a.b
reveal_type(a.b.C) # revealed: Literal[C]
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b.py`:
```py
```py path=a/b.py
class C: ...
```
@@ -57,19 +49,13 @@ import a.b.c
reveal_type(a.b.c.C) # revealed: Literal[C]
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b/__init__.py`:
```py
```py path=a/b/__init__.py
```
`a/b/c.py`:
```py
```py path=a/b/c.py
class C: ...
```
@@ -81,14 +67,10 @@ import a.b as b
reveal_type(b.C) # revealed: Literal[C]
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b.py`:
```py
```py path=a/b.py
class C: ...
```
@@ -100,19 +82,13 @@ import a.b.c as c
reveal_type(c.C) # revealed: Literal[C]
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b/__init__.py`:
```py
```py path=a/b/__init__.py
```
`a/b/c.py`:
```py
```py path=a/b/c.py
class C: ...
```
@@ -126,7 +102,5 @@ import a.foo # error: [unresolved-import] "Cannot resolve import `a.foo`"
import b.foo # error: [unresolved-import] "Cannot resolve import `b.foo`"
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```

View File

@@ -29,17 +29,13 @@ builtins from the "actual" vendored typeshed:
typeshed = "/typeshed"
```
`/typeshed/stdlib/builtins.pyi`:
```pyi
```pyi path=/typeshed/stdlib/builtins.pyi
class Custom: ...
custom_builtin: Custom
```
`/typeshed/stdlib/typing_extensions.pyi`:
```pyi
```pyi path=/typeshed/stdlib/typing_extensions.pyi
def reveal_type(obj, /): ...
```
@@ -60,16 +56,12 @@ that point:
typeshed = "/typeshed"
```
`/typeshed/stdlib/builtins.pyi`:
```pyi
```pyi path=/typeshed/stdlib/builtins.pyi
foo = bar
bar = 1
```
`/typeshed/stdlib/typing_extensions.pyi`:
```pyi
```pyi path=/typeshed/stdlib/typing_extensions.pyi
def reveal_type(obj, /): ...
```

View File

@@ -2,9 +2,7 @@
## Maybe unbound
`maybe_unbound.py`:
```py
```py path=maybe_unbound.py
def coinflip() -> bool:
return True
@@ -31,9 +29,7 @@ reveal_type(y) # revealed: Unknown | Literal[3]
## Maybe unbound annotated
`maybe_unbound_annotated.py`:
```py
```py path=maybe_unbound_annotated.py
def coinflip() -> bool:
return True
@@ -64,9 +60,7 @@ reveal_type(y) # revealed: int
Importing a possibly undeclared name still gives us its declared type:
`maybe_undeclared.py`:
```py
```py path=maybe_undeclared.py
def coinflip() -> bool:
return True
@@ -82,15 +76,11 @@ reveal_type(x) # revealed: int
## Reimport
`c.py`:
```py
```py path=c.py
def f(): ...
```
`b.py`:
```py
```py path=b.py
def coinflip() -> bool:
return True
@@ -112,15 +102,11 @@ reveal_type(f) # revealed: Literal[f, f]
When we have a declared type in one path and only an inferred-from-definition type in the other, we
should still be able to unify those:
`c.pyi`:
```pyi
```py path=c.pyi
x: int
```
`b.py`:
```py
```py path=b.py
def coinflip() -> bool:
return True

View File

@@ -8,15 +8,11 @@ import a.b
reveal_type(a.b) # revealed: <module 'a.b'>
```
`a/__init__.py`:
```py
```py path=a/__init__.py
b: int = 42
```
`a/b.py`:
```py
```py path=a/b.py
```
## Via from/import
@@ -27,15 +23,11 @@ from a import b
reveal_type(b) # revealed: int
```
`a/__init__.py`:
```py
```py path=a/__init__.py
b: int = 42
```
`a/b.py`:
```py
```py path=a/b.py
```
## Via both
@@ -48,15 +40,11 @@ reveal_type(b) # revealed: <module 'a.b'>
reveal_type(a.b) # revealed: <module 'a.b'>
```
`a/__init__.py`:
```py
```py path=a/__init__.py
b: int = 42
```
`a/b.py`:
```py
```py path=a/b.py
```
## Via both (backwards)
@@ -77,15 +65,11 @@ reveal_type(b) # revealed: <module 'a.b'>
reveal_type(a.b) # revealed: <module 'a.b'>
```
`a/__init__.py`:
```py
```py path=a/__init__.py
b: int = 42
```
`a/b.py`:
```py
```py path=a/b.py
```
[from-import]: https://docs.python.org/3/reference/simple_stmts.html#the-import-statement

View File

@@ -18,9 +18,7 @@ reveal_type(baz) # revealed: Unknown
## Unresolved import from resolved module
`a.py`:
```py
```py path=a.py
```
```py
@@ -31,9 +29,7 @@ reveal_type(thing) # revealed: Unknown
## Resolved import of symbol from unresolved import
`a.py`:
```py
```py path=a.py
import foo as foo # error: "Cannot resolve import `foo`"
reveal_type(foo) # revealed: Unknown
@@ -50,9 +46,7 @@ reveal_type(foo) # revealed: Unknown
## No implicit shadowing
`b.py`:
```py
```py path=b.py
x: int
```
@@ -64,9 +58,7 @@ x = "foo" # error: [invalid-assignment] "Object of type `Literal["foo"]"
## Import cycle
`a.py`:
```py
```py path=a.py
class A: ...
reveal_type(A.__mro__) # revealed: tuple[Literal[A], Literal[object]]
@@ -77,9 +69,7 @@ class C(b.B): ...
reveal_type(C.__mro__) # revealed: tuple[Literal[C], Literal[B], Literal[A], Literal[object]]
```
`b.py`:
```py
```py path=b.py
from a import A
class B(A): ...

View File

@@ -23,13 +23,9 @@ reveal_type(b) # revealed: <module 'a.b'>
reveal_type(b.c) # revealed: int
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b.py`:
```py
```py path=a/b.py
c: int = 1
```

View File

@@ -2,14 +2,10 @@
## Non-existent
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/bar.py`:
```py
```py path=package/bar.py
from .foo import X # error: [unresolved-import]
reveal_type(X) # revealed: Unknown
@@ -17,20 +13,14 @@ reveal_type(X) # revealed: Unknown
## Simple
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/foo.py`:
```py
```py path=package/foo.py
X: int = 42
```
`package/bar.py`:
```py
```py path=package/bar.py
from .foo import X
reveal_type(X) # revealed: int
@@ -38,20 +28,14 @@ reveal_type(X) # revealed: int
## Dotted
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/foo/bar/baz.py`:
```py
```py path=package/foo/bar/baz.py
X: int = 42
```
`package/bar.py`:
```py
```py path=package/bar.py
from .foo.bar.baz import X
reveal_type(X) # revealed: int
@@ -59,15 +43,11 @@ reveal_type(X) # revealed: int
## Bare to package
`package/__init__.py`:
```py
```py path=package/__init__.py
X: int = 42
```
`package/bar.py`:
```py
```py path=package/bar.py
from . import X
reveal_type(X) # revealed: int
@@ -75,9 +55,7 @@ reveal_type(X) # revealed: int
## Non-existent + bare to package
`package/bar.py`:
```py
```py path=package/bar.py
from . import X # error: [unresolved-import]
reveal_type(X) # revealed: Unknown
@@ -85,25 +63,19 @@ reveal_type(X) # revealed: Unknown
## Dunder init
`package/__init__.py`:
```py
```py path=package/__init__.py
from .foo import X
reveal_type(X) # revealed: int
```
`package/foo.py`:
```py
```py path=package/foo.py
X: int = 42
```
## Non-existent + dunder init
`package/__init__.py`:
```py
```py path=package/__init__.py
from .foo import X # error: [unresolved-import]
reveal_type(X) # revealed: Unknown
@@ -111,20 +83,14 @@ reveal_type(X) # revealed: Unknown
## Long relative import
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/foo.py`:
```py
```py path=package/foo.py
X: int = 42
```
`package/subpackage/subsubpackage/bar.py`:
```py
```py path=package/subpackage/subsubpackage/bar.py
from ...foo import X
reveal_type(X) # revealed: int
@@ -132,20 +98,14 @@ reveal_type(X) # revealed: int
## Unbound symbol
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/foo.py`:
```py
```py path=package/foo.py
x # error: [unresolved-reference]
```
`package/bar.py`:
```py
```py path=package/bar.py
from .foo import x # error: [unresolved-import]
reveal_type(x) # revealed: Unknown
@@ -153,20 +113,14 @@ reveal_type(x) # revealed: Unknown
## Bare to module
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/foo.py`:
```py
```py path=package/foo.py
X: int = 42
```
`package/bar.py`:
```py
```py path=package/bar.py
from . import foo
reveal_type(foo.X) # revealed: int
@@ -177,14 +131,10 @@ reveal_type(foo.X) # revealed: int
This test verifies that we emit an error when we try to import a symbol that is neither a submodule
nor an attribute of `package`.
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/bar.py`:
```py
```py path=package/bar.py
from . import foo # error: [unresolved-import]
reveal_type(foo) # revealed: Unknown
@@ -198,20 +148,14 @@ submodule when that submodule name appears in the `imported_modules` set. That m
that are imported via `from...import` are not visible to our type inference if you also access that
submodule via the attribute on its parent package.
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/foo.py`:
```py
```py path=package/foo.py
X: int = 42
```
`package/bar.py`:
```py
```py path=package/bar.py
from . import foo
import package

View File

@@ -9,9 +9,7 @@ y = x
reveal_type(y) # revealed: int
```
`b.pyi`:
```pyi
```py path=b.pyi
x: int
```
@@ -24,8 +22,6 @@ y = x
reveal_type(y) # revealed: int
```
`b.py`:
```py
```py path=b.py
x: int = 1
```

View File

@@ -32,14 +32,10 @@ reveal_type(a.b.C) # revealed: Literal[C]
import a.b
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b.py`:
```py
```py path=a/b.py
class C: ...
```
@@ -59,20 +55,14 @@ reveal_type(a.b) # revealed: <module 'a.b'>
reveal_type(a.b.C) # revealed: Literal[C]
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b.py`:
```py
```py path=a/b.py
class C: ...
```
`q.py`:
```py
```py path=q.py
import a as a
import a.b as b
```
@@ -93,26 +83,18 @@ reveal_type(sub.b) # revealed: <module 'sub.b'>
reveal_type(attr.b) # revealed: <module 'attr.b'>
```
`sub/__init__.py`:
```py
```py path=sub/__init__.py
b = 1
```
`sub/b.py`:
```py
```py path=sub/b.py
```
`attr/__init__.py`:
```py
```py path=attr/__init__.py
from . import b as _
b = 1
```
`attr/b.py`:
```py
```py path=attr/b.py
```

View File

@@ -808,7 +808,6 @@ Dynamic types do not cancel each other out. Intersecting an unknown set of value
of another unknown set of values is not necessarily empty, so we keep the positive contribution:
```py
from typing import Any
from knot_extensions import Intersection, Not, Unknown
def any(
@@ -831,7 +830,6 @@ def unknown(
We currently do not simplify mixed dynamic types, but might consider doing so in the future:
```py
from typing import Any
from knot_extensions import Intersection, Not, Unknown
def mixed(

View File

@@ -31,9 +31,7 @@ reveal_type(TC) # revealed: Literal[True]
Make sure we only use our special handling for `typing.TYPE_CHECKING` and not for other constants
with the same name:
`constants.py`:
```py
```py path=constants.py
TYPE_CHECKING: bool = False
```

View File

@@ -13,8 +13,6 @@ python-version = "3.10"
Here, we simply make sure that we pick up the global configuration from the root section:
```py
import sys
reveal_type(sys.version_info[:2] == (3, 10)) # revealed: Literal[True]
```
@@ -27,8 +25,6 @@ reveal_type(sys.version_info[:2] == (3, 10)) # revealed: Literal[True]
The same should work for arbitrarily nested sections:
```py
import sys
reveal_type(sys.version_info[:2] == (3, 10)) # revealed: Literal[True]
```
@@ -42,8 +38,6 @@ python-version = "3.11"
```
```py
import sys
reveal_type(sys.version_info[:2] == (3, 11)) # revealed: Literal[True]
```
@@ -52,8 +46,6 @@ reveal_type(sys.version_info[:2] == (3, 11)) # revealed: Literal[True]
There is no global state. This section should again use the root configuration:
```py
import sys
reveal_type(sys.version_info[:2] == (3, 10)) # revealed: Literal[True]
```
@@ -71,7 +63,5 @@ python-version = "3.12"
### Grandchild
```py
import sys
reveal_type(sys.version_info[:2] == (3, 12)) # revealed: Literal[True]
```

View File

@@ -19,17 +19,13 @@ typeshed = "/typeshed"
We can then place custom stub files in `/typeshed/stdlib`, for example:
`/typeshed/stdlib/builtins.pyi`:
```pyi
```pyi path=/typeshed/stdlib/builtins.pyi
class BuiltinClass: ...
builtin_symbol: BuiltinClass
```
`/typeshed/stdlib/sys/__init__.pyi`:
```pyi
```pyi path=/typeshed/stdlib/sys/__init__.pyi
version = "my custom Python"
```
@@ -58,21 +54,15 @@ python-version = "3.10"
typeshed = "/typeshed"
```
`/typeshed/stdlib/old_module.pyi`:
```pyi
```pyi path=/typeshed/stdlib/old_module.pyi
class OldClass: ...
```
`/typeshed/stdlib/new_module.pyi`:
```pyi
```pyi path=/typeshed/stdlib/new_module.pyi
class NewClass: ...
```
`/typeshed/stdlib/VERSIONS`:
```text
```text path=/typeshed/stdlib/VERSIONS
old_module: 3.0-
new_module: 3.11-
```
@@ -96,9 +86,7 @@ simple untyped definition is enough to make `reveal_type` work in tests:
typeshed = "/typeshed"
```
`/typeshed/stdlib/typing_extensions.pyi`:
```pyi
```pyi path=/typeshed/stdlib/typing_extensions.pyi
def reveal_type(obj, /): ...
```

View File

@@ -205,7 +205,7 @@ reveal_type(D.__class__) # revealed: Literal[SignatureMismatch]
Retrieving the metaclass of a cyclically defined class should not cause an infinite loop.
```pyi
```py path=a.pyi
class A(B): ... # error: [cyclic-class-definition]
class B(C): ... # error: [cyclic-class-definition]
class C(A): ... # error: [cyclic-class-definition]

View File

@@ -347,7 +347,7 @@ reveal_type(unknown_object.__mro__) # revealed: Unknown
These are invalid, but we need to be able to handle them gracefully without panicking.
```pyi
```py path=a.pyi
class Foo(Foo): ... # error: [cyclic-class-definition]
reveal_type(Foo) # revealed: Literal[Foo]
@@ -365,7 +365,7 @@ reveal_type(Boz.__mro__) # revealed: tuple[Literal[Boz], Unknown, Literal[objec
These are similarly unlikely, but we still shouldn't crash:
```pyi
```py path=a.pyi
class Foo(Bar): ... # error: [cyclic-class-definition]
class Bar(Baz): ... # error: [cyclic-class-definition]
class Baz(Foo): ... # error: [cyclic-class-definition]
@@ -377,7 +377,7 @@ reveal_type(Baz.__mro__) # revealed: tuple[Literal[Baz], Unknown, Literal[objec
## Classes with cycles in their MROs, and multiple inheritance
```pyi
```py path=a.pyi
class Spam: ...
class Foo(Bar): ... # error: [cyclic-class-definition]
class Bar(Baz): ... # error: [cyclic-class-definition]
@@ -390,7 +390,7 @@ reveal_type(Baz.__mro__) # revealed: tuple[Literal[Baz], Unknown, Literal[objec
## Classes with cycles in their MRO, and a sub-graph
```pyi
```py path=a.pyi
class FooCycle(BarCycle): ... # error: [cyclic-class-definition]
class Foo: ...
class BarCycle(FooCycle): ... # error: [cyclic-class-definition]

View File

@@ -57,8 +57,6 @@ def _(flag1: bool, flag2: bool, flag3: bool, flag4: bool):
## Multiple predicates
```py
from typing import Literal
def _(flag1: bool, flag2: bool):
class A: ...
x: A | None | Literal[1] = A() if flag1 else None if flag2 else 1
@@ -69,8 +67,6 @@ def _(flag1: bool, flag2: bool):
## Mix of `and` and `or`
```py
from typing import Literal
def _(flag1: bool, flag2: bool):
class A: ...
x: A | None | Literal[1] = A() if flag1 else None if flag2 else 1

View File

@@ -3,8 +3,6 @@
## Value Literals
```py
from typing import Literal
def foo() -> Literal[0, -1, True, False, "", "foo", b"", b"bar", None] | tuple[()]:
return 0
@@ -125,8 +123,6 @@ always returns a fixed value.
These types can always be fully narrowed in boolean contexts, as shown below:
```py
from typing import Literal
class T:
def __bool__(self) -> Literal[True]:
return True
@@ -153,8 +149,6 @@ else:
## Narrowing Complex Intersection and Union
```py
from typing import Literal
class A: ...
class B: ...
@@ -187,8 +181,6 @@ if isinstance(x, str) and not isinstance(x, B):
## Narrowing Multiple Variables
```py
from typing import Literal
def f(x: Literal[0, 1], y: Literal["", "hello"]):
if x and y and not x and not y:
reveal_type(x) # revealed: Never
@@ -230,8 +222,6 @@ reveal_type(y) # revealed: A
## Truthiness of classes
```py
from typing import Literal
class MetaAmbiguous(type):
def __bool__(self) -> bool: ...

View File

@@ -2,16 +2,12 @@
Regression test for [this issue](https://github.com/astral-sh/ruff/issues/14334).
`base.py`:
```py
```py path=base.py
# error: [invalid-base]
class Base(2): ...
```
`a.py`:
```py
```py path=a.py
# No error here
from base import Base
```

View File

@@ -29,9 +29,7 @@ def foo():
However, three attributes on `types.ModuleType` are not present as implicit module globals; these
are excluded:
`unbound_dunders.py`:
```py
```py path=unbound_dunders.py
# error: [unresolved-reference]
# revealed: Unknown
reveal_type(__getattr__)
@@ -72,9 +70,7 @@ Typeshed includes a fake `__getattr__` method in the stub for `types.ModuleType`
dynamic imports; but we ignore that for module-literal types where we know exactly which module
we're dealing with:
`__getattr__.py`:
```py
```py path=__getattr__.py
import typing
# error: [unresolved-attribute]
@@ -87,17 +83,13 @@ It's impossible to override the `__dict__` attribute of `types.ModuleType` insta
module; we should prioritise the attribute in the `types.ModuleType` stub over a variable named
`__dict__` in the module's global namespace:
`foo.py`:
```py
```py path=foo.py
__dict__ = "foo"
reveal_type(__dict__) # revealed: Literal["foo"]
```
`bar.py`:
```py
```py path=bar.py
import foo
from foo import __dict__ as foo_dict

View File

@@ -5,18 +5,14 @@
Parameter `x` of type `str` is shadowed and reassigned with a new `int` value inside the function.
No diagnostics should be generated.
`a.py`:
```py
```py path=a.py
def f(x: str):
x: int = int(x)
```
## Implicit error
`a.py`:
```py
```py path=a.py
def f(): ...
f = 1 # error: "Implicit shadowing of function `f`; annotate to make it explicit if this is intentional"
@@ -24,9 +20,7 @@ f = 1 # error: "Implicit shadowing of function `f`; annotate to make it explici
## Explicit shadowing
`a.py`:
```py
```py path=a.py
def f(): ...
f: int = 1

View File

@@ -7,9 +7,7 @@ branches whose conditions we can statically determine to be always true or alway
useful for `sys.version_info` branches, which can make new features available based on the Python
version:
`module1.py`:
```py
```py path=module1.py
import sys
if sys.version_info >= (3, 9):
@@ -19,9 +17,7 @@ if sys.version_info >= (3, 9):
If we can statically determine that the condition is always true, then we can also understand that
`SomeFeature` is always bound, without raising any errors:
`test1.py`:
```py
```py path=test1.py
from module1 import SomeFeature
# SomeFeature is unconditionally available here, because we are on Python 3.9 or newer:
@@ -31,15 +27,11 @@ reveal_type(SomeFeature) # revealed: str
Another scenario where this is useful is for `typing.TYPE_CHECKING` branches, which are often used
for conditional imports:
`module2.py`:
```py
```py path=module2.py
class SomeType: ...
```
`test2.py`:
```py
```py path=test2.py
import typing
if typing.TYPE_CHECKING:
@@ -175,11 +167,7 @@ statically known conditions, but here, we show that the results are truly based
not some special handling of specific conditions in semantic index building. We use two modules to
demonstrate this, since semantic index building is inherently single-module:
`module.py`:
```py
from typing import Literal
```py path=module.py
class AlwaysTrue:
def __bool__(self) -> Literal[True]:
return True
@@ -1436,9 +1424,7 @@ def f():
#### Always false, unbound
`module.py`:
```py
```py path=module.py
if False:
symbol = 1
```
@@ -1450,9 +1436,7 @@ from module import symbol
#### Always true, bound
`module.py`:
```py
```py path=module.py
if True:
symbol = 1
```
@@ -1464,9 +1448,7 @@ from module import symbol
#### Ambiguous, possibly unbound
`module.py`:
```py
```py path=module.py
def flag() -> bool:
return True
@@ -1481,9 +1463,7 @@ from module import symbol
#### Always false, undeclared
`module.py`:
```py
```py path=module.py
if False:
symbol: int
```
@@ -1497,9 +1477,7 @@ reveal_type(symbol) # revealed: Unknown
#### Always true, declared
`module.py`:
```py
```py path=module.py
if True:
symbol: int
```

View File

@@ -5,7 +5,7 @@
In type stubs, classes can reference themselves in their base class definitions. For example, in
`typeshed`, we have `class str(Sequence[str]): ...`.
```pyi
```py path=a.pyi
class Foo[T]: ...
# TODO: actually is subscriptable

View File

@@ -5,7 +5,7 @@
The ellipsis literal `...` can be used as a placeholder default value for a function parameter, in a
stub file only, regardless of the type of the parameter.
```pyi
```py path=test.pyi
def f(x: int = ...) -> None:
reveal_type(x) # revealed: int
@@ -18,7 +18,7 @@ def f2(x: str = ...) -> None:
The ellipsis literal can be assigned to a class or module symbol, regardless of its declared type,
in a stub file only.
```pyi
```py path=test.pyi
y: bytes = ...
reveal_type(y) # revealed: bytes
x = ...
@@ -35,7 +35,7 @@ reveal_type(Foo.y) # revealed: int
No diagnostic is emitted if an ellipsis literal is "unpacked" in a stub file as part of an
assignment statement:
```pyi
```py path=test.pyi
x, y = ...
reveal_type(x) # revealed: Unknown
reveal_type(y) # revealed: Unknown
@@ -46,7 +46,7 @@ reveal_type(y) # revealed: Unknown
Iterating over an ellipsis literal as part of a `for` loop in a stub is invalid, however, and
results in a diagnostic:
```pyi
```py path=test.pyi
# error: [not-iterable] "Object of type `ellipsis` is not iterable"
for a, b in ...:
reveal_type(a) # revealed: Unknown
@@ -72,7 +72,7 @@ reveal_type(b) # revealed: ellipsis
There is no special treatment of the builtin name `Ellipsis` in stubs, only of `...` literals.
```pyi
```py path=test.pyi
# error: 7 [invalid-parameter-default] "Default value of type `ellipsis` is not assignable to annotated parameter type `int`"
def f(x: int = Ellipsis) -> None: ...
```

View File

@@ -97,7 +97,7 @@ reveal_type(A.__mro__) # revealed: tuple[Literal[A], Unknown, Literal[object]]
`typing.Tuple` can be used interchangeably with `tuple`:
```py
from typing import Any, Tuple
from typing import Tuple
class A: ...

View File

@@ -77,8 +77,7 @@ def test(a: f"f-string type annotation", b: b"byte-string-type-annotation"): ...
```py
# error: [invalid-syntax]
# error: [unused-ignore-comment]
def test($): # knot: ignore
pass
def test( # knot: ignore
```
<!-- blacken-docs:on -->
@@ -181,11 +180,3 @@ a = 4 / 0 # error: [division-by-zero]
# error: [unknown-rule] "Unknown rule `is-equal-14`"
a = 10 + 4 # knot: ignore[is-equal-14]
```
## Code with `lint:` prefix
```py
# error:[unknown-rule] "Unknown rule `lint:division-by-zero`. Did you mean `division-by-zero`?"
# error: [division-by-zero]
a = 10 / 0 # knot: ignore[lint:division-by-zero]
```

View File

@@ -37,9 +37,7 @@ child expression now suppresses errors in the outer expression.
For example, the `type: ignore` comment in this example suppresses the error of adding `2` to
`"test"` and adding `"other"` to the result of the cast.
`nested.py`:
```py
```py path=nested.py
# fmt: off
from typing import cast
@@ -153,7 +151,7 @@ b = a / 0
```py
"""
File level suppressions must come before any non-trivia token,
including module docstrings.
including module docstrings.
"""
# error: [unused-ignore-comment] "Unused blanket `type: ignore` directive"

View File

@@ -86,20 +86,14 @@ reveal_type(bar >= (3, 9)) # revealed: Literal[True]
Only comparisons with the symbol `version_info` from the `sys` module produce literal types:
`package/__init__.py`:
```py
```py path=package/__init__.py
```
`package/sys.py`:
```py
```py path=package/sys.py
version_info: tuple[int, int] = (4, 2)
```
`package/script.py`:
```py
```py path=package/script.py
from .sys import version_info
reveal_type(version_info >= (3, 9)) # revealed: bool
@@ -109,9 +103,7 @@ reveal_type(version_info >= (3, 9)) # revealed: bool
The fields of `sys.version_info` can be accessed by name:
`a.py`:
```py
```py path=a.py
import sys
reveal_type(sys.version_info.major >= 3) # revealed: Literal[True]
@@ -122,9 +114,7 @@ reveal_type(sys.version_info.minor >= 10) # revealed: Literal[False]
But the `micro`, `releaselevel` and `serial` fields are inferred as `@Todo` until we support
properties on instance types:
`b.py`:
```py
```py path=b.py
import sys
reveal_type(sys.version_info.micro) # revealed: @Todo(@property)

View File

@@ -1,640 +0,0 @@
# Terminal statements
## Introduction
Terminal statements complicate a naive control-flow analysis.
As a simple example:
```py
def f(cond: bool) -> str:
if cond:
x = "test"
else:
raise ValueError
return x
def g(cond: bool):
if cond:
x = "test"
reveal_type(x) # revealed: Literal["test"]
else:
x = "terminal"
reveal_type(x) # revealed: Literal["terminal"]
raise ValueError
reveal_type(x) # revealed: Literal["test"]
```
In `f`, we should be able to determine that the `else` branch ends in a terminal statement, and that
the `return` statement can only be executed when the condition is true. We should therefore consider
the reference always bound, even though `x` is only bound in the true branch.
Similarly, in `g`, we should see that the assignment of the value `"terminal"` can never be seen by
the final `reveal_type`.
## `return`
A `return` statement is terminal; bindings that occur before it are not visible after it.
```py
def resolved_reference(cond: bool) -> str:
if cond:
x = "test"
else:
return "early"
return x # no possibly-unresolved-reference diagnostic!
def return_in_then_branch(cond: bool):
if cond:
x = "terminal"
reveal_type(x) # revealed: Literal["terminal"]
return
else:
x = "test"
reveal_type(x) # revealed: Literal["test"]
reveal_type(x) # revealed: Literal["test"]
def return_in_else_branch(cond: bool):
if cond:
x = "test"
reveal_type(x) # revealed: Literal["test"]
else:
x = "terminal"
reveal_type(x) # revealed: Literal["terminal"]
return
reveal_type(x) # revealed: Literal["test"]
def return_in_both_branches(cond: bool):
if cond:
x = "terminal1"
reveal_type(x) # revealed: Literal["terminal1"]
return
else:
x = "terminal2"
reveal_type(x) # revealed: Literal["terminal2"]
return
def return_in_try(cond: bool):
x = "before"
try:
if cond:
x = "test"
return
except:
# TODO: Literal["before"]
reveal_type(x) # revealed: Literal["before", "test"]
else:
reveal_type(x) # revealed: Literal["before"]
finally:
reveal_type(x) # revealed: Literal["before", "test"]
reveal_type(x) # revealed: Literal["before", "test"]
def return_in_nested_then_branch(cond1: bool, cond2: bool):
if cond1:
x = "test1"
reveal_type(x) # revealed: Literal["test1"]
else:
if cond2:
x = "terminal"
reveal_type(x) # revealed: Literal["terminal"]
return
else:
x = "test2"
reveal_type(x) # revealed: Literal["test2"]
reveal_type(x) # revealed: Literal["test2"]
reveal_type(x) # revealed: Literal["test1", "test2"]
def return_in_nested_else_branch(cond1: bool, cond2: bool):
if cond1:
x = "test1"
reveal_type(x) # revealed: Literal["test1"]
else:
if cond2:
x = "test2"
reveal_type(x) # revealed: Literal["test2"]
else:
x = "terminal"
reveal_type(x) # revealed: Literal["terminal"]
return
reveal_type(x) # revealed: Literal["test2"]
reveal_type(x) # revealed: Literal["test1", "test2"]
def return_in_both_nested_branches(cond1: bool, cond2: bool):
if cond1:
x = "test"
reveal_type(x) # revealed: Literal["test"]
else:
x = "terminal0"
if cond2:
x = "terminal1"
reveal_type(x) # revealed: Literal["terminal1"]
return
else:
x = "terminal2"
reveal_type(x) # revealed: Literal["terminal2"]
return
reveal_type(x) # revealed: Literal["test"]
```
## `continue`
A `continue` statement jumps back to the top of the innermost loop. This makes it terminal within
the loop body: definitions before it are not visible after it within the rest of the loop body. They
are likely visible after the loop body, since loops do not introduce new scopes. (Statically known
infinite loops are one exception — if control never leaves the loop body, bindings inside of the
loop are not visible outside of it.)
TODO: We are not currently modeling the cyclic control flow for loops, pending fixpoint support in
Salsa. The false positives in this section are because of that, and not our terminal statement
support. See [ruff#14160](https://github.com/astral-sh/ruff/issues/14160) for more details.
```py
def resolved_reference(cond: bool) -> str:
while True:
if cond:
x = "test"
else:
continue
return x
def continue_in_then_branch(cond: bool, i: int):
x = "before"
for _ in range(i):
if cond:
x = "continue"
reveal_type(x) # revealed: Literal["continue"]
continue
else:
x = "loop"
reveal_type(x) # revealed: Literal["loop"]
reveal_type(x) # revealed: Literal["loop"]
# TODO: Should be Literal["before", "loop", "continue"]
reveal_type(x) # revealed: Literal["before", "loop"]
def continue_in_else_branch(cond: bool, i: int):
x = "before"
for _ in range(i):
if cond:
x = "loop"
reveal_type(x) # revealed: Literal["loop"]
else:
x = "continue"
reveal_type(x) # revealed: Literal["continue"]
continue
reveal_type(x) # revealed: Literal["loop"]
# TODO: Should be Literal["before", "loop", "continue"]
reveal_type(x) # revealed: Literal["before", "loop"]
def continue_in_both_branches(cond: bool, i: int):
x = "before"
for _ in range(i):
if cond:
x = "continue1"
reveal_type(x) # revealed: Literal["continue1"]
continue
else:
x = "continue2"
reveal_type(x) # revealed: Literal["continue2"]
continue
# TODO: Should be Literal["before", "continue1", "continue2"]
reveal_type(x) # revealed: Literal["before"]
def continue_in_nested_then_branch(cond1: bool, cond2: bool, i: int):
x = "before"
for _ in range(i):
if cond1:
x = "loop1"
reveal_type(x) # revealed: Literal["loop1"]
else:
if cond2:
x = "continue"
reveal_type(x) # revealed: Literal["continue"]
continue
else:
x = "loop2"
reveal_type(x) # revealed: Literal["loop2"]
reveal_type(x) # revealed: Literal["loop2"]
reveal_type(x) # revealed: Literal["loop1", "loop2"]
# TODO: Should be Literal["before", "loop1", "loop2", "continue"]
reveal_type(x) # revealed: Literal["before", "loop1", "loop2"]
def continue_in_nested_else_branch(cond1: bool, cond2: bool, i: int):
x = "before"
for _ in range(i):
if cond1:
x = "loop1"
reveal_type(x) # revealed: Literal["loop1"]
else:
if cond2:
x = "loop2"
reveal_type(x) # revealed: Literal["loop2"]
else:
x = "continue"
reveal_type(x) # revealed: Literal["continue"]
continue
reveal_type(x) # revealed: Literal["loop2"]
reveal_type(x) # revealed: Literal["loop1", "loop2"]
# TODO: Should be Literal["before", "loop1", "loop2", "continue"]
reveal_type(x) # revealed: Literal["before", "loop1", "loop2"]
def continue_in_both_nested_branches(cond1: bool, cond2: bool, i: int):
x = "before"
for _ in range(i):
if cond1:
x = "loop"
reveal_type(x) # revealed: Literal["loop"]
else:
if cond2:
x = "continue1"
reveal_type(x) # revealed: Literal["continue1"]
continue
else:
x = "continue2"
reveal_type(x) # revealed: Literal["continue2"]
continue
reveal_type(x) # revealed: Literal["loop"]
# TODO: Should be Literal["before", "loop", "continue1", "continue2"]
reveal_type(x) # revealed: Literal["before", "loop"]
```
## `break`
A `break` statement jumps to the end of the innermost loop. This makes it terminal within the loop
body: definitions before it are not visible after it within the rest of the loop body. They are
likely visible after the loop body, since loops do not introduce new scopes. (Statically known
infinite loops are one exception — if control never leaves the loop body, bindings inside of the
loop are not visible outside of it.)
```py
def resolved_reference(cond: bool) -> str:
while True:
if cond:
x = "test"
else:
break
return x
return x # error: [unresolved-reference]
def break_in_then_branch(cond: bool, i: int):
x = "before"
for _ in range(i):
if cond:
x = "break"
reveal_type(x) # revealed: Literal["break"]
break
else:
x = "loop"
reveal_type(x) # revealed: Literal["loop"]
reveal_type(x) # revealed: Literal["loop"]
reveal_type(x) # revealed: Literal["before", "break", "loop"]
def break_in_else_branch(cond: bool, i: int):
x = "before"
for _ in range(i):
if cond:
x = "loop"
reveal_type(x) # revealed: Literal["loop"]
else:
x = "break"
reveal_type(x) # revealed: Literal["break"]
break
reveal_type(x) # revealed: Literal["loop"]
reveal_type(x) # revealed: Literal["before", "loop", "break"]
def break_in_both_branches(cond: bool, i: int):
x = "before"
for _ in range(i):
if cond:
x = "break1"
reveal_type(x) # revealed: Literal["break1"]
break
else:
x = "break2"
reveal_type(x) # revealed: Literal["break2"]
break
reveal_type(x) # revealed: Literal["before", "break1", "break2"]
def break_in_nested_then_branch(cond1: bool, cond2: bool, i: int):
x = "before"
for _ in range(i):
if cond1:
x = "loop1"
reveal_type(x) # revealed: Literal["loop1"]
else:
if cond2:
x = "break"
reveal_type(x) # revealed: Literal["break"]
break
else:
x = "loop2"
reveal_type(x) # revealed: Literal["loop2"]
reveal_type(x) # revealed: Literal["loop2"]
reveal_type(x) # revealed: Literal["loop1", "loop2"]
reveal_type(x) # revealed: Literal["before", "loop1", "break", "loop2"]
def break_in_nested_else_branch(cond1: bool, cond2: bool, i: int):
x = "before"
for _ in range(i):
if cond1:
x = "loop1"
reveal_type(x) # revealed: Literal["loop1"]
else:
if cond2:
x = "loop2"
reveal_type(x) # revealed: Literal["loop2"]
else:
x = "break"
reveal_type(x) # revealed: Literal["break"]
break
reveal_type(x) # revealed: Literal["loop2"]
reveal_type(x) # revealed: Literal["loop1", "loop2"]
reveal_type(x) # revealed: Literal["before", "loop1", "loop2", "break"]
def break_in_both_nested_branches(cond1: bool, cond2: bool, i: int):
x = "before"
for _ in range(i):
if cond1:
x = "loop"
reveal_type(x) # revealed: Literal["loop"]
else:
if cond2:
x = "break1"
reveal_type(x) # revealed: Literal["break1"]
break
else:
x = "break2"
reveal_type(x) # revealed: Literal["break2"]
break
reveal_type(x) # revealed: Literal["loop"]
reveal_type(x) # revealed: Literal["before", "loop", "break1", "break2"]
```
## `raise`
A `raise` statement is terminal. If it occurs in a lexically containing `try` statement, it will
jump to one of the `except` clauses (if it matches the value being raised), or to the `else` clause
(if none match). Currently, we assume definitions from before the `raise` are visible in all
`except` and `else` clauses. (In the future, we might analyze the `except` clauses to see which ones
match the value being raised, and limit visibility to those clauses.) Definitions from before the
`raise` are not visible in any `else` clause, but are visible in `except` clauses or after the
containing `try` statement (since control flow may have passed through an `except`).
Currently we assume that an exception could be raised anywhere within a `try` block. We may want to
implement a more precise understanding of where exceptions (barring `KeyboardInterrupt` and
`MemoryError`) can and cannot actually be raised.
```py
def raise_in_then_branch(cond: bool):
x = "before"
try:
if cond:
x = "raise"
reveal_type(x) # revealed: Literal["raise"]
raise ValueError
else:
x = "else"
reveal_type(x) # revealed: Literal["else"]
reveal_type(x) # revealed: Literal["else"]
except ValueError:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise", "else"]
except:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise", "else"]
else:
reveal_type(x) # revealed: Literal["else"]
finally:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise", "else"]
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise", "else"]
def raise_in_else_branch(cond: bool):
x = "before"
try:
if cond:
x = "else"
reveal_type(x) # revealed: Literal["else"]
else:
x = "raise"
reveal_type(x) # revealed: Literal["raise"]
raise ValueError
reveal_type(x) # revealed: Literal["else"]
except ValueError:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise"]
except:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise"]
else:
reveal_type(x) # revealed: Literal["else"]
finally:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise"]
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise"]
def raise_in_both_branches(cond: bool):
x = "before"
try:
if cond:
x = "raise1"
reveal_type(x) # revealed: Literal["raise1"]
raise ValueError
else:
x = "raise2"
reveal_type(x) # revealed: Literal["raise2"]
raise ValueError
except ValueError:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise1", "raise2"]
except:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise1", "raise2"]
else:
x = "unreachable"
finally:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise1", "raise2"]
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "raise1", "raise2"]
def raise_in_nested_then_branch(cond1: bool, cond2: bool):
x = "before"
try:
if cond1:
x = "else1"
reveal_type(x) # revealed: Literal["else1"]
else:
if cond2:
x = "raise"
reveal_type(x) # revealed: Literal["raise"]
raise ValueError
else:
x = "else2"
reveal_type(x) # revealed: Literal["else2"]
reveal_type(x) # revealed: Literal["else2"]
reveal_type(x) # revealed: Literal["else1", "else2"]
except ValueError:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "raise", "else2"]
except:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "raise", "else2"]
else:
reveal_type(x) # revealed: Literal["else1", "else2"]
finally:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "raise", "else2"]
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "raise", "else2"]
def raise_in_nested_else_branch(cond1: bool, cond2: bool):
x = "before"
try:
if cond1:
x = "else1"
reveal_type(x) # revealed: Literal["else1"]
else:
if cond2:
x = "else2"
reveal_type(x) # revealed: Literal["else2"]
else:
x = "raise"
reveal_type(x) # revealed: Literal["raise"]
raise ValueError
reveal_type(x) # revealed: Literal["else2"]
reveal_type(x) # revealed: Literal["else1", "else2"]
except ValueError:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "else2", "raise"]
except:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "else2", "raise"]
else:
reveal_type(x) # revealed: Literal["else1", "else2"]
finally:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "else2", "raise"]
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else1", "else2", "raise"]
def raise_in_both_nested_branches(cond1: bool, cond2: bool):
x = "before"
try:
if cond1:
x = "else"
reveal_type(x) # revealed: Literal["else"]
else:
if cond2:
x = "raise1"
reveal_type(x) # revealed: Literal["raise1"]
raise ValueError
else:
x = "raise2"
reveal_type(x) # revealed: Literal["raise2"]
raise ValueError
reveal_type(x) # revealed: Literal["else"]
except ValueError:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise1", "raise2"]
except:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise1", "raise2"]
else:
reveal_type(x) # revealed: Literal["else"]
finally:
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise1", "raise2"]
# Exceptions can occur anywhere, so "before" and "raise" are valid possibilities
reveal_type(x) # revealed: Literal["before", "else", "raise1", "raise2"]
```
## Terminal in `try` with `finally` clause
TODO: we don't yet model that a `break` or `continue` in a `try` block will jump to a `finally`
clause before it jumps to end/start of the loop.
```py
def f():
x = 1
while True:
try:
break
finally:
x = 2
# TODO: should be Literal[2]
reveal_type(x) # revealed: Literal[1]
```
## Nested functions
Free references inside of a function body refer to variables defined in the containing scope.
Function bodies are _lazy scopes_: at runtime, these references are not resolved immediately at the
point of the function definition. Instead, they are resolved _at the time of the call_, which means
that their values (and types) can be different for different invocations. For simplicity, we instead
resolve free references _at the end of the containing scope_. That means that in the examples below,
all of the `x` bindings should be visible to the `reveal_type`, regardless of where we place the
`return` statements.
TODO: These currently produce the wrong results, but not because of our terminal statement support.
See [ruff#15777](https://github.com/astral-sh/ruff/issues/15777) for more details.
```py
def top_level_return(cond1: bool, cond2: bool):
x = 1
def g():
# TODO eliminate Unknown
reveal_type(x) # revealed: Unknown | Literal[1, 2, 3]
if cond1:
if cond2:
x = 2
else:
x = 3
return
def return_from_if(cond1: bool, cond2: bool):
x = 1
def g():
# TODO: Literal[1, 2, 3]
reveal_type(x) # revealed: Unknown | Literal[1]
if cond1:
if cond2:
x = 2
else:
x = 3
return
def return_from_nested_if(cond1: bool, cond2: bool):
x = 1
def g():
# TODO: Literal[1, 2, 3]
reveal_type(x) # revealed: Unknown | Literal[1, 3]
if cond1:
if cond2:
x = 2
return
else:
x = 3
```
## Statically known terminal statements
Terminal statements do not yet interact correctly with statically known bounds. In this example, we
should see that the `return` statement is always executed, and therefore that the `"b"` assignment
is not visible to the `reveal_type`.
```py
def _(cond: bool):
x = "a"
if cond:
x = "b"
if True:
return
# TODO: Literal["a"]
reveal_type(x) # revealed: Literal["a", "b"]
```

View File

@@ -15,7 +15,6 @@ directly.
### Negation
```py
from typing import Literal
from knot_extensions import Not, static_assert
def negate(n1: Not[int], n2: Not[Not[int]], n3: Not[Not[Not[int]]]) -> None:
@@ -35,7 +34,7 @@ n: Not[int, str]
```py
from knot_extensions import Intersection, Not, is_subtype_of, static_assert
from typing_extensions import Literal, Never
from typing_extensions import Never
class S: ...
class T: ...
@@ -305,7 +304,6 @@ static_assert(not is_assignable_to(int, str))
```py
from knot_extensions import is_disjoint_from, static_assert
from typing import Literal
static_assert(is_disjoint_from(None, int))
static_assert(not is_disjoint_from(Literal[2] | str, int))
@@ -328,7 +326,6 @@ static_assert(not is_fully_static(type[Any]))
```py
from knot_extensions import is_singleton, static_assert
from typing import Literal
static_assert(is_singleton(None))
static_assert(is_singleton(Literal[True]))
@@ -341,7 +338,6 @@ static_assert(not is_singleton(Literal["a"]))
```py
from knot_extensions import is_single_valued, static_assert
from typing import Literal
static_assert(is_single_valued(None))
static_assert(is_single_valued(Literal[True]))

View File

@@ -39,9 +39,7 @@ def f(c: type[A]):
reveal_type(c) # revealed: type[A]
```
`a.py`:
```py
```py path=a.py
class A: ...
```
@@ -54,31 +52,23 @@ def f(c: type[a.B]):
reveal_type(c) # revealed: type[B]
```
`a.py`:
```py
```py path=a.py
class B: ...
```
## Deeply qualified class literal from another module
`a/test.py`:
```py
```py path=a/test.py
import a.b
def f(c: type[a.b.C]):
reveal_type(c) # revealed: type[C]
```
`a/__init__.py`:
```py
```py path=a/__init__.py
```
`a/b.py`:
```py
```py path=a/b.py
class C: ...
```

View File

@@ -6,8 +6,6 @@ This file contains tests for non-fully-static `type[]` types, such as `type[Any]
## Simple
```py
from typing import Any
def f(x: type[Any], y: type[str]):
reveal_type(x) # revealed: type[Any]
# TODO: could be `<object.__repr__ type> & Any`

View File

@@ -41,7 +41,7 @@ static types can be assignable to gradual types):
```py
from knot_extensions import static_assert, is_assignable_to, Unknown
from typing import Any, Literal
from typing import Any
static_assert(is_assignable_to(Unknown, Literal[1]))
static_assert(is_assignable_to(Any, Literal[1]))
@@ -333,7 +333,7 @@ assignable to any arbitrary type.
```py
from knot_extensions import static_assert, is_assignable_to, Unknown
from typing_extensions import Never, Any, Literal
from typing_extensions import Never, Any
static_assert(is_assignable_to(Never, str))
static_assert(is_assignable_to(Never, Literal[1]))

View File

@@ -151,7 +151,7 @@ static_assert(is_disjoint_from(Never, object))
### `None`
```py
from typing_extensions import Literal, LiteralString
from typing_extensions import Literal
from knot_extensions import is_disjoint_from, static_assert
static_assert(is_disjoint_from(None, Literal[True]))
@@ -245,7 +245,6 @@ static_assert(not is_disjoint_from(TypeOf[f], object))
```py
from knot_extensions import AlwaysFalsy, AlwaysTruthy, is_disjoint_from, static_assert
from typing import Literal
static_assert(is_disjoint_from(None, AlwaysTruthy))
static_assert(not is_disjoint_from(None, AlwaysFalsy))

View File

@@ -84,38 +84,4 @@ static_assert(
)
```
## Unions containing tuples containing tuples containing unions (etc.)
```py
from knot_extensions import is_equivalent_to, static_assert, Intersection
class P: ...
class Q: ...
static_assert(
is_equivalent_to(
tuple[tuple[tuple[P | Q]]] | P,
tuple[tuple[tuple[Q | P]]] | P,
)
)
static_assert(
is_equivalent_to(
tuple[tuple[tuple[tuple[tuple[Intersection[P, Q]]]]]],
tuple[tuple[tuple[tuple[tuple[Intersection[Q, P]]]]]],
)
)
```
## Intersections containing tuples containing unions
```py
from knot_extensions import is_equivalent_to, static_assert, Intersection
class P: ...
class Q: ...
class R: ...
static_assert(is_equivalent_to(Intersection[tuple[P | Q], R], Intersection[tuple[Q | P], R]))
```
[the equivalence relation]: https://typing.readthedocs.io/en/latest/spec/glossary.html#term-equivalent

View File

@@ -54,7 +54,6 @@ static_assert(not is_gradual_equivalent_to(str | int | bytes, int | str | dict))
```py
from knot_extensions import Unknown, is_gradual_equivalent_to, static_assert
from typing import Any
static_assert(is_gradual_equivalent_to(tuple[str, Any], tuple[str, Unknown]))

View File

@@ -148,7 +148,6 @@ static_assert(is_subtype_of(tuple[int], tuple))
```py
from knot_extensions import is_subtype_of, static_assert
from typing import Literal
class A: ...
class B1(A): ...
@@ -272,7 +271,6 @@ static_assert(is_subtype_of(Never, AlwaysFalsy))
```py
from knot_extensions import AlwaysTruthy, AlwaysFalsy, is_subtype_of, static_assert
from typing import Literal
static_assert(is_subtype_of(Literal[1], AlwaysTruthy))
static_assert(is_subtype_of(Literal[0], AlwaysFalsy))
@@ -311,7 +309,7 @@ static_assert(is_subtype_of(TypeOf[1:2:3], slice))
### Special forms
```py
from typing import _SpecialForm, Literal
from typing import _SpecialForm
from knot_extensions import TypeOf, is_subtype_of, static_assert
static_assert(is_subtype_of(TypeOf[Literal], _SpecialForm))

View File

@@ -67,8 +67,6 @@ c.a = 2
## Too many arguments
```py
from typing import ClassVar
class C:
# error: [invalid-type-form] "Type qualifier `typing.ClassVar` expects exactly one type parameter"
x: ClassVar[int, str] = 1
@@ -77,8 +75,6 @@ class C:
## Illegal `ClassVar` in type expression
```py
from typing import ClassVar
class C:
# error: [invalid-type-form] "Type qualifier `typing.ClassVar` is not allowed in type expressions (only in annotation expressions)"
x: ClassVar | int
@@ -90,8 +86,6 @@ class C:
## Used outside of a class
```py
from typing import ClassVar
# TODO: this should be an error
x: ClassVar[int] = 1
```

View File

@@ -28,9 +28,7 @@ reveal_type(not b) # revealed: Literal[False]
reveal_type(not warnings) # revealed: Literal[False]
```
`b.py`:
```py
```py path=b.py
y = 1
```
@@ -125,8 +123,6 @@ classes without a `__bool__` method, with or without `__len__`, must be inferred
truthiness.
```py
from typing import Literal
class AlwaysTrue:
def __bool__(self) -> Literal[True]:
return True

View File

@@ -282,7 +282,7 @@ reveal_type(b) # revealed: Unknown
```py
# error: [invalid-assignment] "Not enough values to unpack (expected 2, got 1)"
(a, b) = "\u9e6c"
(a, b) = "\u9E6C"
reveal_type(a) # revealed: LiteralString
reveal_type(b) # revealed: Unknown
@@ -292,7 +292,7 @@ reveal_type(b) # revealed: Unknown
```py
# error: [invalid-assignment] "Not enough values to unpack (expected 2, got 1)"
(a, b) = "\U0010ffff"
(a, b) = "\U0010FFFF"
reveal_type(a) # revealed: LiteralString
reveal_type(b) # revealed: Unknown
@@ -301,7 +301,7 @@ reveal_type(b) # revealed: Unknown
### Surrogates
```py
(a, b) = "\ud800\udfff"
(a, b) = "\uD800\uDFFF"
reveal_type(a) # revealed: LiteralString
reveal_type(b) # revealed: LiteralString
@@ -361,8 +361,6 @@ def _(arg: tuple[int, int, int] | tuple[int, str, bytes] | tuple[int, int, str])
### Nested
```py
from typing import Literal
def _(arg: tuple[int, tuple[str, bytes]] | tuple[tuple[int, bytes], Literal["ab"]]):
a, (b, c) = arg
reveal_type(a) # revealed: int | tuple[int, bytes]

View File

@@ -88,8 +88,6 @@ with Manager():
## Context manager with non-callable `__exit__` attribute
```py
from typing_extensions import Self
class Manager:
def __enter__(self) -> Self: ...

View File

@@ -1,5 +1,5 @@
use itertools::Itertools;
use ruff_db::diagnostic::{DiagnosticId, LintName, Severity};
use ruff_db::diagnostic::{LintName, Severity};
use rustc_hash::FxHashMap;
use std::hash::Hasher;
use thiserror::Error;
@@ -345,18 +345,7 @@ impl LintRegistry {
}
}
Some(LintEntry::Removed(lint)) => Err(GetLintError::Removed(lint.name())),
None => {
if let Some(without_prefix) = DiagnosticId::strip_category(code) {
if let Some(entry) = self.by_name.get(without_prefix) {
return Err(GetLintError::PrefixedWithCategory {
prefixed: code.to_string(),
suggestion: entry.id().name.to_string(),
});
}
}
Err(GetLintError::Unknown(code.to_string()))
}
None => Err(GetLintError::Unknown(code.to_string())),
}
}
@@ -393,20 +382,12 @@ impl LintRegistry {
#[derive(Error, Debug, Clone, PartialEq, Eq)]
pub enum GetLintError {
/// The name maps to this removed lint.
#[error("lint `{0}` has been removed")]
#[error("lint {0} has been removed")]
Removed(LintName),
/// No lint with the given name is known.
#[error("unknown lint `{0}`")]
#[error("unknown lint {0}")]
Unknown(String),
/// The name uses the full qualified diagnostic id `lint:<rule>` instead of just `rule`.
/// The String is the name without the `lint:` category prefix.
#[error("unknown lint `{prefixed}`. Did you mean `{suggestion}`?")]
PrefixedWithCategory {
prefixed: String,
suggestion: String,
},
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
@@ -418,16 +399,6 @@ pub enum LintEntry {
Alias(LintId),
}
impl LintEntry {
fn id(self) -> LintId {
match self {
LintEntry::Lint(id) => id,
LintEntry::Removed(id) => id,
LintEntry::Alias(id) => id,
}
}
}
impl From<&'static LintMetadata> for LintEntry {
fn from(metadata: &'static LintMetadata) -> Self {
if metadata.status.is_removed() {

View File

@@ -11,7 +11,6 @@ use ruff_index::{IndexSlice, IndexVec};
use crate::module_name::ModuleName;
use crate::semantic_index::ast_ids::node_key::ExpressionNodeKey;
use crate::semantic_index::ast_ids::AstIds;
use crate::semantic_index::attribute_assignment::AttributeAssignments;
use crate::semantic_index::builder::SemanticIndexBuilder;
use crate::semantic_index::definition::{Definition, DefinitionNodeKey};
use crate::semantic_index::expression::Expression;
@@ -22,7 +21,6 @@ use crate::semantic_index::use_def::UseDefMap;
use crate::Db;
pub mod ast_ids;
pub mod attribute_assignment;
mod builder;
pub(crate) mod constraint;
pub mod definition;
@@ -32,7 +30,7 @@ mod use_def;
pub(crate) use self::use_def::{
BindingWithConstraints, BindingWithConstraintsIterator, DeclarationWithConstraint,
DeclarationsIterator,
DeclarationsIterator, ScopedVisibilityConstraintId,
};
type SymbolMap = hashbrown::HashMap<ScopedSymbolId, (), FxBuildHasher>;
@@ -95,25 +93,6 @@ pub(crate) fn use_def_map<'db>(db: &'db dyn Db, scope: ScopeId<'db>) -> Arc<UseD
index.use_def_map(scope.file_scope_id(db))
}
/// Returns all attribute assignments for a specific class body scope.
///
/// Using [`attribute_assignments`] over [`semantic_index`] has the advantage that
/// Salsa can avoid invalidating dependent queries if this scope's instance attributes
/// are unchanged.
#[salsa::tracked]
pub(crate) fn attribute_assignments<'db>(
db: &'db dyn Db,
class_body_scope: ScopeId<'db>,
) -> Option<Arc<AttributeAssignments<'db>>> {
let file = class_body_scope.file(db);
let index = semantic_index(db, file);
index
.attribute_assignments
.get(&class_body_scope.file_scope_id(db))
.cloned()
}
/// Returns the module global scope of `file`.
#[salsa::tracked]
pub(crate) fn global_scope(db: &dyn Db, file: File) -> ScopeId<'_> {
@@ -160,10 +139,6 @@ pub(crate) struct SemanticIndex<'db> {
/// Flags about the global scope (code usage impacting inference)
has_future_annotations: bool,
/// Maps from class body scopes to attribute assignments that were found
/// in methods of that class.
attribute_assignments: FxHashMap<FileScopeId, Arc<AttributeAssignments<'db>>>,
}
impl<'db> SemanticIndex<'db> {

View File

@@ -1,19 +0,0 @@
use crate::semantic_index::expression::Expression;
use ruff_python_ast::name::Name;
use rustc_hash::FxHashMap;
/// Describes an (annotated) attribute assignment that we discovered in a method
/// body, typically of the form `self.x: int`, `self.x: int = …` or `self.x = …`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum AttributeAssignment<'db> {
/// An attribute assignment with an explicit type annotation, either
/// `self.x: <annotation>` or `self.x: <annotation> = …`.
Annotated { annotation: Expression<'db> },
/// An attribute assignment without a type annotation, e.g. `self.x = <value>`.
Unannotated { value: Expression<'db> },
}
pub(crate) type AttributeAssignments<'db> = FxHashMap<Name, Vec<AttributeAssignment<'db>>>;

View File

@@ -14,21 +14,22 @@ use crate::ast_node_ref::AstNodeRef;
use crate::module_name::ModuleName;
use crate::semantic_index::ast_ids::node_key::ExpressionNodeKey;
use crate::semantic_index::ast_ids::AstIdsBuilder;
use crate::semantic_index::attribute_assignment::{AttributeAssignment, AttributeAssignments};
use crate::semantic_index::constraint::PatternConstraintKind;
use crate::semantic_index::definition::{
AssignmentDefinitionNodeRef, ComprehensionDefinitionNodeRef, Definition, DefinitionNodeKey,
DefinitionNodeRef, ForStmtDefinitionNodeRef, ImportFromDefinitionNodeRef,
};
use crate::semantic_index::expression::{Expression, ExpressionKind};
use crate::semantic_index::expression::Expression;
use crate::semantic_index::symbol::{
FileScopeId, NodeWithScopeKey, NodeWithScopeRef, Scope, ScopeId, ScopeKind, ScopedSymbolId,
FileScopeId, NodeWithScopeKey, NodeWithScopeRef, Scope, ScopeId, ScopedSymbolId,
SymbolTableBuilder,
};
use crate::semantic_index::use_def::{FlowSnapshot, ScopedConstraintId, UseDefMapBuilder};
use crate::semantic_index::use_def::{
FlowSnapshot, ScopedConstraintId, ScopedVisibilityConstraintId, UseDefMapBuilder,
};
use crate::semantic_index::SemanticIndex;
use crate::unpack::{Unpack, UnpackValue};
use crate::visibility_constraints::{ScopedVisibilityConstraintId, VisibilityConstraintsBuilder};
use crate::visibility_constraints::VisibilityConstraint;
use crate::Db;
use super::constraint::{Constraint, ConstraintNode, PatternConstraint};
@@ -52,24 +53,17 @@ impl LoopState {
}
}
struct ScopeInfo {
file_scope_id: FileScopeId,
loop_state: LoopState,
}
pub(super) struct SemanticIndexBuilder<'db> {
// Builder state
db: &'db dyn Db,
file: File,
module: &'db ParsedModule,
scope_stack: Vec<ScopeInfo>,
scope_stack: Vec<(FileScopeId, LoopState)>,
/// The assignments we're currently visiting, with
/// the most recent visit at the end of the Vec
current_assignments: Vec<CurrentAssignment<'db>>,
/// The match case we're currently visiting.
current_match_case: Option<CurrentMatchCase<'db>>,
/// The name of the first function parameter of the innermost function that we're currently visiting.
current_first_parameter_name: Option<&'db str>,
/// Flow states at each `break` in the current loop.
loop_break_states: Vec<FlowSnapshot>,
@@ -90,7 +84,6 @@ pub(super) struct SemanticIndexBuilder<'db> {
definitions_by_node: FxHashMap<DefinitionNodeKey, Definition<'db>>,
expressions_by_node: FxHashMap<ExpressionNodeKey, Expression<'db>>,
imported_modules: FxHashSet<ModuleName>,
attribute_assignments: FxHashMap<FileScopeId, AttributeAssignments<'db>>,
}
impl<'db> SemanticIndexBuilder<'db> {
@@ -102,7 +95,6 @@ impl<'db> SemanticIndexBuilder<'db> {
scope_stack: Vec::new(),
current_assignments: vec![],
current_match_case: None,
current_first_parameter_name: None,
loop_break_states: vec![],
try_node_context_stack_manager: TryNodeContextStackManager::default(),
@@ -120,8 +112,6 @@ impl<'db> SemanticIndexBuilder<'db> {
expressions_by_node: FxHashMap::default(),
imported_modules: FxHashSet::default(),
attribute_assignments: FxHashMap::default(),
};
builder.push_scope_with_parent(NodeWithScopeRef::Module, None);
@@ -133,7 +123,7 @@ impl<'db> SemanticIndexBuilder<'db> {
*self
.scope_stack
.last()
.map(|ScopeInfo { file_scope_id, .. }| file_scope_id)
.map(|(scope, _)| scope)
.expect("Always to have a root scope")
}
@@ -141,32 +131,14 @@ impl<'db> SemanticIndexBuilder<'db> {
self.scope_stack
.last()
.expect("Always to have a root scope")
.loop_state
}
/// Returns the scope ID of the surrounding class body scope if the current scope
/// is a method inside a class body. Returns `None` otherwise, e.g. if the current
/// scope is a function body outside of a class, or if the current scope is not a
/// function body.
fn is_method_of_class(&self) -> Option<FileScopeId> {
let mut scopes_rev = self.scope_stack.iter().rev();
let current = scopes_rev.next()?;
let parent = scopes_rev.next()?;
match (
self.scopes[current.file_scope_id].kind(),
self.scopes[parent.file_scope_id].kind(),
) {
(ScopeKind::Function, ScopeKind::Class) => Some(parent.file_scope_id),
_ => None,
}
.1
}
fn set_inside_loop(&mut self, state: LoopState) {
self.scope_stack
.last_mut()
.expect("Always to have a root scope")
.loop_state = state;
.1 = state;
}
fn push_scope(&mut self, node: NodeWithScopeRef) {
@@ -199,20 +171,16 @@ impl<'db> SemanticIndexBuilder<'db> {
debug_assert_eq!(ast_id_scope, file_scope_id);
self.scope_stack.push(ScopeInfo {
file_scope_id,
loop_state: LoopState::NotInLoop,
});
self.scope_stack.push((file_scope_id, LoopState::NotInLoop));
}
fn pop_scope(&mut self) -> FileScopeId {
let ScopeInfo { file_scope_id, .. } =
self.scope_stack.pop().expect("Root scope to be present");
let (id, _) = self.scope_stack.pop().expect("Root scope to be present");
let children_end = self.scopes.next_index();
let scope = &mut self.scopes[file_scope_id];
let scope = &mut self.scopes[id];
scope.descendents = scope.descendents.start..children_end;
self.try_node_context_stack_manager.exit_scope();
file_scope_id
id
}
fn current_symbol_table(&mut self) -> &mut SymbolTableBuilder {
@@ -230,11 +198,6 @@ impl<'db> SemanticIndexBuilder<'db> {
&self.use_def_maps[scope_id]
}
fn current_visibility_constraints_mut(&mut self) -> &mut VisibilityConstraintsBuilder<'db> {
let scope_id = self.current_scope();
&mut self.use_def_maps[scope_id].visibility_constraints
}
fn current_ast_ids(&mut self) -> &mut AstIdsBuilder {
let scope_id = self.current_scope();
&mut self.ast_ids[scope_id]
@@ -370,11 +333,21 @@ impl<'db> SemanticIndexBuilder<'db> {
id
}
/// Adds a new visibility constraint, but does not record it. Returns the constraint ID
/// for later recording using [`SemanticIndexBuilder::record_visibility_constraint_id`].
fn add_visibility_constraint(
&mut self,
constraint: VisibilityConstraint<'db>,
) -> ScopedVisibilityConstraintId {
self.current_use_def_map_mut()
.add_visibility_constraint(constraint)
}
/// Records a previously added visibility constraint by applying it to all live bindings
/// and declarations.
fn record_visibility_constraint_id(&mut self, constraint: ScopedVisibilityConstraintId) {
self.current_use_def_map_mut()
.record_visibility_constraint(constraint);
.record_visibility_constraint_id(constraint);
}
/// Negates the given visibility constraint and then adds it to all live bindings and declarations.
@@ -382,11 +355,8 @@ impl<'db> SemanticIndexBuilder<'db> {
&mut self,
constraint: ScopedVisibilityConstraintId,
) -> ScopedVisibilityConstraintId {
let id = self
.current_visibility_constraints_mut()
.add_not_constraint(constraint);
self.record_visibility_constraint_id(id);
id
self.current_use_def_map_mut()
.record_visibility_constraint(VisibilityConstraint::VisibleIfNot(constraint))
}
/// Records a visibility constraint by applying it to all live bindings and declarations.
@@ -394,23 +364,14 @@ impl<'db> SemanticIndexBuilder<'db> {
&mut self,
constraint: Constraint<'db>,
) -> ScopedVisibilityConstraintId {
let id = self
.current_visibility_constraints_mut()
.add_atom(constraint, 0);
self.record_visibility_constraint_id(id);
id
}
/// Records that all remaining statements in the current block are unreachable, and therefore
/// not visible.
fn mark_unreachable(&mut self) {
self.current_use_def_map_mut().mark_unreachable();
}
/// Records a visibility constraint that always evaluates to "ambiguous".
fn record_ambiguous_visibility(&mut self) {
self.current_use_def_map_mut()
.record_visibility_constraint(ScopedVisibilityConstraintId::AMBIGUOUS);
.record_visibility_constraint(VisibilityConstraint::VisibleIf(constraint))
}
/// Records a [`VisibilityConstraint::Ambiguous`] constraint.
fn record_ambiguous_visibility(&mut self) -> ScopedVisibilityConstraintId {
self.current_use_def_map_mut()
.record_visibility_constraint(VisibilityConstraint::Ambiguous)
}
/// Simplifies (resets) visibility constraints on all live bindings and declarations that did
@@ -437,32 +398,6 @@ impl<'db> SemanticIndexBuilder<'db> {
self.current_assignments.last_mut()
}
/// Records the fact that we saw an attribute assignment of the form
/// `object.attr: <annotation>( = …)` or `object.attr = <value>`.
fn register_attribute_assignment(
&mut self,
object: &ast::Expr,
attr: &'db ast::Identifier,
attribute_assignment: AttributeAssignment<'db>,
) {
if let Some(class_body_scope) = self.is_method_of_class() {
// We only care about attribute assignments to the first parameter of a method,
// i.e. typically `self` or `cls`.
let accessed_object_refers_to_first_parameter =
object.as_name_expr().map(|name| name.id.as_str())
== self.current_first_parameter_name;
if accessed_object_refers_to_first_parameter {
self.attribute_assignments
.entry(class_body_scope)
.or_default()
.entry(attr.id().clone())
.or_default()
.push(attribute_assignment);
}
}
}
fn add_pattern_constraint(
&mut self,
subject: Expression<'db>,
@@ -516,20 +451,6 @@ impl<'db> SemanticIndexBuilder<'db> {
/// Record an expression that needs to be a Salsa ingredient, because we need to infer its type
/// standalone (type narrowing tests, RHS of an assignment.)
fn add_standalone_expression(&mut self, expression_node: &ast::Expr) -> Expression<'db> {
self.add_standalone_expression_impl(expression_node, ExpressionKind::Normal)
}
/// Same as [`SemanticIndexBuilder::add_standalone_expression`], but marks the expression as a
/// *type* expression, which makes sure that it will later be inferred as such.
fn add_standalone_type_expression(&mut self, expression_node: &ast::Expr) -> Expression<'db> {
self.add_standalone_expression_impl(expression_node, ExpressionKind::TypeExpression)
}
fn add_standalone_expression_impl(
&mut self,
expression_node: &ast::Expr,
expression_kind: ExpressionKind,
) -> Expression<'db> {
let expression = Expression::new(
self.db,
self.file,
@@ -538,7 +459,6 @@ impl<'db> SemanticIndexBuilder<'db> {
unsafe {
AstNodeRef::new(self.module.clone(), expression_node)
},
expression_kind,
countme::Count::default(),
);
self.expressions_by_node
@@ -679,7 +599,7 @@ impl<'db> SemanticIndexBuilder<'db> {
}
fn declare_parameter(&mut self, parameter: &'db ast::ParameterWithDefault) {
let symbol = self.add_symbol(parameter.name().id().clone());
let symbol = self.add_symbol(parameter.parameter.name.id().clone());
let definition = self.add_definition(symbol, parameter);
@@ -742,11 +662,6 @@ impl<'db> SemanticIndexBuilder<'db> {
use_def_maps,
imported_modules: Arc::new(self.imported_modules),
has_future_annotations: self.has_future_annotations,
attribute_assignments: self
.attribute_assignments
.into_iter()
.map(|(k, v)| (k, Arc::new(v)))
.collect(),
}
}
}
@@ -785,17 +700,7 @@ where
builder.declare_parameters(parameters);
let mut first_parameter_name = parameters
.iter_non_variadic_params()
.next()
.map(|first_param| first_param.parameter.name.id().as_str());
std::mem::swap(
&mut builder.current_first_parameter_name,
&mut first_parameter_name,
);
builder.visit_body(body);
builder.current_first_parameter_name = first_parameter_name;
builder.pop_scope()
},
);
@@ -929,19 +834,6 @@ where
unpack: None,
first: false,
}),
ast::Expr::Attribute(ast::ExprAttribute {
value: object,
attr,
..
}) => {
self.register_attribute_assignment(
object,
attr,
AttributeAssignment::Unannotated { value },
);
None
}
_ => None,
};
@@ -960,7 +852,6 @@ where
ast::Stmt::AnnAssign(node) => {
debug_assert_eq!(&self.current_assignments, &[]);
self.visit_expr(&node.annotation);
let annotation = self.add_standalone_type_expression(&node.annotation);
if let Some(value) = &node.value {
self.visit_expr(value);
}
@@ -972,20 +863,6 @@ where
) {
self.push_assignment(node.into());
self.visit_expr(&node.target);
if let ast::Expr::Attribute(ast::ExprAttribute {
value: object,
attr,
..
}) = &*node.target
{
self.register_attribute_assignment(
object,
attr,
AttributeAssignment::Annotated { annotation },
);
}
self.pop_assignment();
} else {
self.visit_expr(&node.target);
@@ -1087,16 +964,6 @@ where
let pre_loop = self.flow_snapshot();
let constraint = self.record_expression_constraint(test);
// We need multiple copies of the visibility constraint for the while condition,
// since we need to model situations where the first evaluation of the condition
// returns True, but a later evaluation returns False.
let first_vis_constraint_id = self
.current_visibility_constraints_mut()
.add_atom(constraint, 0);
let later_vis_constraint_id = self
.current_visibility_constraints_mut()
.add_atom(constraint, 1);
// Save aside any break states from an outer loop
let saved_break_states = std::mem::take(&mut self.loop_break_states);
@@ -1107,42 +974,26 @@ where
self.visit_body(body);
self.set_inside_loop(outer_loop_state);
// If the body is executed, we know that we've evaluated the condition at least
// once, and that the first evaluation was True. We might not have evaluated the
// condition more than once, so we can't assume that later evaluations were True.
// So the body's full visibility constraint is `first`.
let body_vis_constraint_id = first_vis_constraint_id;
self.record_visibility_constraint_id(body_vis_constraint_id);
let vis_constraint_id = self.record_visibility_constraint(constraint);
// Get the break states from the body of this loop, and restore the saved outer
// ones.
let break_states =
std::mem::replace(&mut self.loop_break_states, saved_break_states);
// We execute the `else` once the condition evaluates to false. This could happen
// without ever executing the body, if the condition is false the first time it's
// tested. So the starting flow state of the `else` clause is the union of:
// - the pre-loop state with a visibility constraint that the first evaluation of
// the while condition was false,
// - the post-body state (which already has a visibility constraint that the
// first evaluation was true) with a visibility constraint that a _later_
// evaluation of the while condition was false.
// To model this correctly, we need two copies of the while condition constraint,
// since the first and later evaluations might produce different results.
let post_body = self.flow_snapshot();
self.flow_restore(pre_loop.clone());
self.record_negated_visibility_constraint(first_vis_constraint_id);
self.flow_merge(post_body);
// We may execute the `else` clause without ever executing the body, so merge in
// the pre-loop state before visiting `else`.
self.flow_merge(pre_loop.clone());
self.record_negated_constraint(constraint);
self.visit_body(orelse);
self.record_negated_visibility_constraint(later_vis_constraint_id);
self.record_negated_visibility_constraint(vis_constraint_id);
// Breaking out of a while loop bypasses the `else` clause, so merge in the break
// states after visiting `else`.
for break_state in break_states {
let snapshot = self.flow_snapshot();
self.flow_restore(break_state);
self.record_visibility_constraint_id(body_vis_constraint_id);
self.record_visibility_constraint(constraint);
self.flow_merge(snapshot);
}
@@ -1168,6 +1019,11 @@ where
}
self.visit_body(body);
}
ast::Stmt::Break(_) => {
if self.loop_state().is_inside() {
self.loop_break_states.push(self.flow_snapshot());
}
}
ast::Stmt::For(
for_stmt @ ast::StmtFor {
@@ -1414,21 +1270,6 @@ where
// - https://github.com/astral-sh/ruff/pull/13633#discussion_r1788626702
self.visit_body(finalbody);
}
ast::Stmt::Raise(_) | ast::Stmt::Return(_) | ast::Stmt::Continue(_) => {
walk_stmt(self, stmt);
// Everything in the current block after a terminal statement is unreachable.
self.mark_unreachable();
}
ast::Stmt::Break(_) => {
if self.loop_state().is_inside() {
self.loop_break_states.push(self.flow_snapshot());
}
// Everything in the current block after a terminal statement is unreachable.
self.mark_unreachable();
}
_ => {
walk_stmt(self, stmt);
}
@@ -1667,8 +1508,7 @@ where
ast::BoolOp::Or => self.add_negated_constraint(constraint),
};
let visibility_constraint = self
.current_visibility_constraints_mut()
.add_atom(constraint, 0);
.add_visibility_constraint(VisibilityConstraint::VisibleIf(constraint));
let after_expr = self.flow_snapshot();

View File

@@ -5,16 +5,6 @@ use ruff_db::files::File;
use ruff_python_ast as ast;
use salsa;
/// Whether or not this expression should be inferred as a normal expression or
/// a type expression. For example, in `self.x: <annotation> = <value>`, the
/// `<annotation>` is inferred as a type expression, while `<value>` is inferred
/// as a normal expression.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(crate) enum ExpressionKind {
Normal,
TypeExpression,
}
/// An independently type-inferable expression.
///
/// Includes constraint expressions (e.g. if tests) and the RHS of an unpacking assignment.
@@ -45,10 +35,6 @@ pub(crate) struct Expression<'db> {
#[return_ref]
pub(crate) node_ref: AstNodeRef<ast::Expr>,
/// Should this expression be inferred as a normal expression or a type expression?
#[id]
pub(crate) kind: ExpressionKind,
#[no_eq]
count: countme::Count<Expression<'static>>,
}

View File

@@ -119,7 +119,6 @@ impl<'db> ScopeId<'db> {
self.node(db).scope_kind(),
ScopeKind::Annotation
| ScopeKind::Function
| ScopeKind::Lambda
| ScopeKind::TypeAlias
| ScopeKind::Comprehension
)
@@ -204,7 +203,6 @@ pub enum ScopeKind {
Annotation,
Class,
Function,
Lambda,
Comprehension,
TypeAlias,
}
@@ -445,8 +443,7 @@ impl NodeWithScopeKind {
match self {
Self::Module => ScopeKind::Module,
Self::Class(_) => ScopeKind::Class,
Self::Function(_) => ScopeKind::Function,
Self::Lambda(_) => ScopeKind::Lambda,
Self::Function(_) | Self::Lambda(_) => ScopeKind::Function,
Self::FunctionTypeParameters(_)
| Self::ClassTypeParameters(_)
| Self::TypeAliasTypeParameters(_) => ScopeKind::Annotation,

View File

@@ -255,18 +255,16 @@
//! snapshot, and merging a snapshot into the current state. The logic using these methods lives in
//! [`SemanticIndexBuilder`](crate::semantic_index::builder::SemanticIndexBuilder), e.g. where it
//! visits a `StmtIf` node.
pub(crate) use self::symbol_state::ScopedConstraintId;
use self::symbol_state::{
BindingIdWithConstraintsIterator, ConstraintIdIterator, DeclarationIdIterator,
ScopedDefinitionId, SymbolBindings, SymbolDeclarations, SymbolState,
};
pub(crate) use self::symbol_state::{ScopedConstraintId, ScopedVisibilityConstraintId};
use crate::semantic_index::ast_ids::ScopedUseId;
use crate::semantic_index::definition::Definition;
use crate::semantic_index::symbol::ScopedSymbolId;
use crate::semantic_index::use_def::symbol_state::DeclarationIdWithConstraint;
use crate::visibility_constraints::{
ScopedVisibilityConstraintId, VisibilityConstraints, VisibilityConstraintsBuilder,
};
use crate::visibility_constraints::{VisibilityConstraint, VisibilityConstraints};
use ruff_index::IndexVec;
use rustc_hash::FxHashMap;
@@ -287,7 +285,7 @@ pub(crate) struct UseDefMap<'db> {
/// Array of [`Constraint`] in this scope.
all_constraints: AllConstraints<'db>,
/// Array of visibility constraints in this scope.
/// Array of [`VisibilityConstraint`]s in this scope.
visibility_constraints: VisibilityConstraints<'db>,
/// [`SymbolBindings`] reaching a [`ScopedUseId`].
@@ -478,7 +476,6 @@ impl std::iter::FusedIterator for DeclarationsIterator<'_, '_> {}
pub(super) struct FlowSnapshot {
symbol_states: IndexVec<ScopedSymbolId, SymbolState>,
scope_start_visibility: ScopedVisibilityConstraintId,
reachable: bool,
}
#[derive(Debug)]
@@ -489,8 +486,8 @@ pub(super) struct UseDefMapBuilder<'db> {
/// Append-only array of [`Constraint`].
all_constraints: AllConstraints<'db>,
/// Builder of visibility constraints.
pub(super) visibility_constraints: VisibilityConstraintsBuilder<'db>,
/// Append-only array of [`VisibilityConstraint`].
visibility_constraints: VisibilityConstraints<'db>,
/// A constraint which describes the visibility of the unbound/undeclared state, i.e.
/// whether or not the start of the scope is visible. This is important for cases like
@@ -506,8 +503,6 @@ pub(super) struct UseDefMapBuilder<'db> {
/// Currently live bindings and declarations for each symbol.
symbol_states: IndexVec<ScopedSymbolId, SymbolState>,
reachable: bool,
}
impl Default for UseDefMapBuilder<'_> {
@@ -515,21 +510,16 @@ impl Default for UseDefMapBuilder<'_> {
Self {
all_definitions: IndexVec::from_iter([None]),
all_constraints: IndexVec::new(),
visibility_constraints: VisibilityConstraintsBuilder::default(),
visibility_constraints: VisibilityConstraints::default(),
scope_start_visibility: ScopedVisibilityConstraintId::ALWAYS_TRUE,
bindings_by_use: IndexVec::new(),
definitions_by_definition: FxHashMap::default(),
symbol_states: IndexVec::new(),
reachable: true,
}
}
}
impl<'db> UseDefMapBuilder<'db> {
pub(super) fn mark_unreachable(&mut self) {
self.reachable = false;
}
pub(super) fn add_symbol(&mut self, symbol: ScopedSymbolId) {
let new_symbol = self
.symbol_states
@@ -563,18 +553,35 @@ impl<'db> UseDefMapBuilder<'db> {
new_constraint_id
}
pub(super) fn record_visibility_constraint(
pub(super) fn add_visibility_constraint(
&mut self,
constraint: VisibilityConstraint<'db>,
) -> ScopedVisibilityConstraintId {
self.visibility_constraints.add(constraint)
}
pub(super) fn record_visibility_constraint_id(
&mut self,
constraint: ScopedVisibilityConstraintId,
) {
for state in &mut self.symbol_states {
state.record_visibility_constraint(&mut self.visibility_constraints, constraint);
}
self.scope_start_visibility = self
.visibility_constraints
.add_and_constraint(self.scope_start_visibility, constraint);
}
pub(super) fn record_visibility_constraint(
&mut self,
constraint: VisibilityConstraint<'db>,
) -> ScopedVisibilityConstraintId {
let new_constraint_id = self.add_visibility_constraint(constraint);
self.record_visibility_constraint_id(new_constraint_id);
new_constraint_id
}
/// This method resets the visibility constraints for all symbols to a previous state
/// *if* there have been no new declarations or bindings since then. Consider the
/// following example:
@@ -649,7 +656,6 @@ impl<'db> UseDefMapBuilder<'db> {
FlowSnapshot {
symbol_states: self.symbol_states.clone(),
scope_start_visibility: self.scope_start_visibility,
reachable: self.reachable,
}
}
@@ -672,25 +678,12 @@ impl<'db> UseDefMapBuilder<'db> {
num_symbols,
SymbolState::undefined(self.scope_start_visibility),
);
self.reachable = snapshot.reachable;
}
/// Merge the given snapshot into the current state, reflecting that we might have taken either
/// path to get here. The new state for each symbol should include definitions from both the
/// prior state and the snapshot.
pub(super) fn merge(&mut self, snapshot: FlowSnapshot) {
// Unreachable snapshots should not be merged: If the current snapshot is unreachable, it
// should be completely overwritten by the snapshot we're merging in. If the other snapshot
// is unreachable, we should return without merging.
if !snapshot.reachable {
return;
}
if !self.reachable {
self.restore(snapshot);
return;
}
// We never remove symbols from `symbol_states` (it's an IndexVec, and the symbol
// IDs must line up), so the current number of known symbols must always be equal to or
// greater than the number of known symbols in a previously-taken snapshot.
@@ -712,9 +705,6 @@ impl<'db> UseDefMapBuilder<'db> {
self.scope_start_visibility = self
.visibility_constraints
.add_or_constraint(self.scope_start_visibility, snapshot.scope_start_visibility);
// Both of the snapshots are reachable, so the merged result is too.
self.reachable = true;
}
pub(super) fn finish(mut self) -> UseDefMap<'db> {
@@ -727,7 +717,7 @@ impl<'db> UseDefMapBuilder<'db> {
UseDefMap {
all_definitions: self.all_definitions,
all_constraints: self.all_constraints,
visibility_constraints: self.visibility_constraints.build(),
visibility_constraints: self.visibility_constraints,
bindings_by_use: self.bindings_by_use,
public_symbols: self.symbol_states,
definitions_by_definition: self.definitions_by_definition,

View File

@@ -43,15 +43,12 @@
//!
//! Tracking live declarations is simpler, since constraints are not involved, but otherwise very
//! similar to tracking live bindings.
use crate::semantic_index::use_def::VisibilityConstraints;
use itertools::{EitherOrBoth, Itertools};
use super::bitset::{BitSet, BitSetIterator};
use ruff_index::newtype_index;
use smallvec::SmallVec;
use crate::semantic_index::use_def::bitset::{BitSet, BitSetIterator};
use crate::semantic_index::use_def::VisibilityConstraintsBuilder;
use crate::visibility_constraints::ScopedVisibilityConstraintId;
/// A newtype-index for a definition in a particular scope.
#[newtype_index]
pub(super) struct ScopedDefinitionId;
@@ -99,6 +96,19 @@ type ConstraintsPerBinding = SmallVec<InlineConstraintArray>;
/// Iterate over all constraints for a single binding.
type ConstraintsIterator<'a> = std::slice::Iter<'a, Constraints>;
type ConstraintsIntoIterator = smallvec::IntoIter<InlineConstraintArray>;
/// A newtype-index for a visibility constraint in a particular scope.
#[newtype_index]
pub(crate) struct ScopedVisibilityConstraintId;
impl ScopedVisibilityConstraintId {
/// A special ID that is used for an "always true" / "always visible" constraint.
/// When we create a new [`VisibilityConstraints`] object, this constraint is always
/// present at index 0.
pub(crate) const ALWAYS_TRUE: ScopedVisibilityConstraintId =
ScopedVisibilityConstraintId::from_u32(0);
}
const INLINE_VISIBILITY_CONSTRAINTS: usize = 4;
type InlineVisibilityConstraintsArray =
@@ -113,18 +123,13 @@ type VisibilityConstraintPerBinding = SmallVec<InlineVisibilityConstraintsArray>
/// Iterator over the visibility constraints for all live bindings/declarations.
type VisibilityConstraintsIterator<'a> = std::slice::Iter<'a, ScopedVisibilityConstraintId>;
type VisibilityConstraintsIntoIterator = smallvec::IntoIter<InlineVisibilityConstraintsArray>;
/// Live declarations for a single symbol at some point in control flow, with their
/// corresponding visibility constraints.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub(super) struct SymbolDeclarations {
/// [`BitSet`]: which declarations (as [`ScopedDefinitionId`]) can reach the current location?
///
/// Invariant: Because this is a `BitSet`, it can be viewed as a _sorted_ set of definition
/// IDs. The `visibility_constraints` field stores constraints for each definition. Therefore
/// those fields must always have the same `len()` as `live_declarations`, and the elements
/// must appear in the same order. Effectively, this means that elements must always be added
/// in sorted order, or via a binary search that determines the correct place to insert new
/// constraints.
pub(crate) live_declarations: Declarations,
/// For each live declaration, which visibility constraint applies to it?
@@ -153,7 +158,7 @@ impl SymbolDeclarations {
/// Add given visibility constraint to all live declarations.
pub(super) fn record_visibility_constraint(
&mut self,
visibility_constraints: &mut VisibilityConstraintsBuilder,
visibility_constraints: &mut VisibilityConstraints,
constraint: ScopedVisibilityConstraintId,
) {
for existing in &mut self.visibility_constraints {
@@ -168,51 +173,13 @@ impl SymbolDeclarations {
visibility_constraints: self.visibility_constraints.iter(),
}
}
fn merge(&mut self, b: Self, visibility_constraints: &mut VisibilityConstraintsBuilder) {
let a = std::mem::take(self);
self.live_declarations = a.live_declarations.clone();
self.live_declarations.union(&b.live_declarations);
// Invariant: These zips are well-formed since we maintain an invariant that all of our
// fields are sets/vecs with the same length.
let a = (a.live_declarations.iter()).zip(a.visibility_constraints);
let b = (b.live_declarations.iter()).zip(b.visibility_constraints);
// Invariant: merge_join_by consumes the two iterators in sorted order, which ensures that
// the definition IDs and constraints line up correctly in the merged result. If a
// definition is found in both `a` and `b`, we compose the constraints from the two paths
// in an appropriate way (intersection for narrowing constraints; ternary OR for visibility
// constraints). If a definition is found in only one path, it is used as-is.
for zipped in a.merge_join_by(b, |(a_decl, _), (b_decl, _)| a_decl.cmp(b_decl)) {
match zipped {
EitherOrBoth::Both((_, a_vis_constraint), (_, b_vis_constraint)) => {
let vis_constraint = visibility_constraints
.add_or_constraint(a_vis_constraint, b_vis_constraint);
self.visibility_constraints.push(vis_constraint);
}
EitherOrBoth::Left((_, vis_constraint))
| EitherOrBoth::Right((_, vis_constraint)) => {
self.visibility_constraints.push(vis_constraint);
}
}
}
}
}
/// Live bindings for a single symbol at some point in control flow. Each live binding comes
/// with a set of narrowing constraints and a visibility constraint.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[derive(Clone, Debug, PartialEq, Eq)]
pub(super) struct SymbolBindings {
/// [`BitSet`]: which bindings (as [`ScopedDefinitionId`]) can reach the current location?
///
/// Invariant: Because this is a `BitSet`, it can be viewed as a _sorted_ set of definition
/// IDs. The `constraints` and `visibility_constraints` field stores constraints for each
/// definition. Therefore those fields must always have the same `len()` as
/// `live_bindings`, and the elements must appear in the same order. Effectively, this means
/// that elements must always be added in sorted order, or via a binary search that determines
/// the correct place to insert new constraints.
live_bindings: Bindings,
/// For each live binding, which [`ScopedConstraintId`] apply?
@@ -259,7 +226,7 @@ impl SymbolBindings {
/// Add given visibility constraint to all live bindings.
pub(super) fn record_visibility_constraint(
&mut self,
visibility_constraints: &mut VisibilityConstraintsBuilder,
visibility_constraints: &mut VisibilityConstraints,
constraint: ScopedVisibilityConstraintId,
) {
for existing in &mut self.visibility_constraints {
@@ -275,62 +242,6 @@ impl SymbolBindings {
visibility_constraints: self.visibility_constraints.iter(),
}
}
fn merge(&mut self, mut b: Self, visibility_constraints: &mut VisibilityConstraintsBuilder) {
let mut a = std::mem::take(self);
self.live_bindings = a.live_bindings.clone();
self.live_bindings.union(&b.live_bindings);
// Invariant: These zips are well-formed since we maintain an invariant that all of our
// fields are sets/vecs with the same length.
//
// Performance: We iterate over the `constraints` smallvecs via mut reference, because the
// individual elements are `BitSet`s (currently 24 bytes in size), and we don't want to
// move them by value multiple times during iteration. By iterating by reference, we only
// have to copy single pointers around. In the loop below, the `std::mem::take` calls
// specify precisely where we want to move them into the merged `constraints` smallvec.
//
// We don't need a similar optimization for `visibility_constraints`, since those elements
// are 32-bit IndexVec IDs, and so are already cheap to move/copy.
let a = (a.live_bindings.iter())
.zip(a.constraints.iter_mut())
.zip(a.visibility_constraints);
let b = (b.live_bindings.iter())
.zip(b.constraints.iter_mut())
.zip(b.visibility_constraints);
// Invariant: merge_join_by consumes the two iterators in sorted order, which ensures that
// the definition IDs and constraints line up correctly in the merged result. If a
// definition is found in both `a` and `b`, we compose the constraints from the two paths
// in an appropriate way (intersection for narrowing constraints; ternary OR for visibility
// constraints). If a definition is found in only one path, it is used as-is.
for zipped in a.merge_join_by(b, |((a_def, _), _), ((b_def, _), _)| a_def.cmp(b_def)) {
match zipped {
EitherOrBoth::Both(
((_, a_constraints), a_vis_constraint),
((_, b_constraints), b_vis_constraint),
) => {
// If the same definition is visible through both paths, any constraint
// that applies on only one path is irrelevant to the resulting type from
// unioning the two paths, so we intersect the constraints.
let constraints = a_constraints;
constraints.intersect(b_constraints);
self.constraints.push(std::mem::take(constraints));
// For visibility constraints, we merge them using a ternary OR operation:
let vis_constraint = visibility_constraints
.add_or_constraint(a_vis_constraint, b_vis_constraint);
self.visibility_constraints.push(vis_constraint);
}
EitherOrBoth::Left(((_, constraints), vis_constraint))
| EitherOrBoth::Right(((_, constraints), vis_constraint)) => {
self.constraints.push(std::mem::take(constraints));
self.visibility_constraints.push(vis_constraint);
}
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -362,7 +273,7 @@ impl SymbolState {
/// Add given visibility constraint to all live bindings.
pub(super) fn record_visibility_constraint(
&mut self,
visibility_constraints: &mut VisibilityConstraintsBuilder,
visibility_constraints: &mut VisibilityConstraints,
constraint: ScopedVisibilityConstraintId,
) {
self.bindings
@@ -390,11 +301,204 @@ impl SymbolState {
pub(super) fn merge(
&mut self,
b: SymbolState,
visibility_constraints: &mut VisibilityConstraintsBuilder,
visibility_constraints: &mut VisibilityConstraints,
) {
self.bindings.merge(b.bindings, visibility_constraints);
let mut a = Self {
bindings: SymbolBindings {
live_bindings: Bindings::default(),
constraints: ConstraintsPerBinding::default(),
visibility_constraints: VisibilityConstraintPerBinding::default(),
},
declarations: SymbolDeclarations {
live_declarations: self.declarations.live_declarations.clone(),
visibility_constraints: VisibilityConstraintPerDeclaration::default(),
},
};
std::mem::swap(&mut a, self);
self.declarations
.merge(b.declarations, visibility_constraints);
.live_declarations
.union(&b.declarations.live_declarations);
let mut a_defs_iter = a.bindings.live_bindings.iter();
let mut b_defs_iter = b.bindings.live_bindings.iter();
let mut a_constraints_iter = a.bindings.constraints.into_iter();
let mut b_constraints_iter = b.bindings.constraints.into_iter();
let mut a_vis_constraints_iter = a.bindings.visibility_constraints.into_iter();
let mut b_vis_constraints_iter = b.bindings.visibility_constraints.into_iter();
let mut opt_a_def: Option<u32> = a_defs_iter.next();
let mut opt_b_def: Option<u32> = b_defs_iter.next();
// Iterate through the definitions from `a` and `b`, always processing the lower definition
// ID first, and pushing each definition onto the merged `SymbolState` with its
// constraints. If a definition is found in both `a` and `b`, push it with the intersection
// of the constraints from the two paths; a constraint that applies from only one possible
// path is irrelevant.
// Helper to push `def`, with constraints in `constraints_iter`, onto `self`.
let push = |def,
constraints_iter: &mut ConstraintsIntoIterator,
visibility_constraints_iter: &mut VisibilityConstraintsIntoIterator,
merged: &mut Self| {
merged.bindings.live_bindings.insert(def);
// SAFETY: we only ever create SymbolState using [`SymbolState::undefined`], which adds
// one "unbound" definition with corresponding narrowing and visibility constraints, or
// using [`SymbolState::record_binding`] or [`SymbolState::record_declaration`], which
// similarly add one definition with corresponding constraints. [`SymbolState::merge`]
// always pushes one definition and one constraint bitset and one visibility constraint
// together (just below), so the number of definitions and the number of constraints can
// never get out of sync.
// get out of sync.
let constraints = constraints_iter
.next()
.expect("definitions and constraints length mismatch");
let visibility_constraints = visibility_constraints_iter
.next()
.expect("definitions and visibility_constraints length mismatch");
merged.bindings.constraints.push(constraints);
merged
.bindings
.visibility_constraints
.push(visibility_constraints);
};
loop {
match (opt_a_def, opt_b_def) {
(Some(a_def), Some(b_def)) => match a_def.cmp(&b_def) {
std::cmp::Ordering::Less => {
// Next definition ID is only in `a`, push it to `self` and advance `a`.
push(
a_def,
&mut a_constraints_iter,
&mut a_vis_constraints_iter,
self,
);
opt_a_def = a_defs_iter.next();
}
std::cmp::Ordering::Greater => {
// Next definition ID is only in `b`, push it to `self` and advance `b`.
push(
b_def,
&mut b_constraints_iter,
&mut b_vis_constraints_iter,
self,
);
opt_b_def = b_defs_iter.next();
}
std::cmp::Ordering::Equal => {
// Next definition is in both; push to `self` and intersect constraints.
push(
a_def,
&mut b_constraints_iter,
&mut b_vis_constraints_iter,
self,
);
// SAFETY: see comment in `push` above.
let a_constraints = a_constraints_iter
.next()
.expect("definitions and constraints length mismatch");
let current_constraints = self.bindings.constraints.last_mut().unwrap();
// If the same definition is visible through both paths, any constraint
// that applies on only one path is irrelevant to the resulting type from
// unioning the two paths, so we intersect the constraints.
current_constraints.intersect(&a_constraints);
// For visibility constraints, we merge them using a ternary OR operation:
let a_vis_constraint = a_vis_constraints_iter
.next()
.expect("visibility_constraints length mismatch");
let current_vis_constraint =
self.bindings.visibility_constraints.last_mut().unwrap();
*current_vis_constraint = visibility_constraints
.add_or_constraint(*current_vis_constraint, a_vis_constraint);
opt_a_def = a_defs_iter.next();
opt_b_def = b_defs_iter.next();
}
},
(Some(a_def), None) => {
// We've exhausted `b`, just push the def from `a` and move on to the next.
push(
a_def,
&mut a_constraints_iter,
&mut a_vis_constraints_iter,
self,
);
opt_a_def = a_defs_iter.next();
}
(None, Some(b_def)) => {
// We've exhausted `a`, just push the def from `b` and move on to the next.
push(
b_def,
&mut b_constraints_iter,
&mut b_vis_constraints_iter,
self,
);
opt_b_def = b_defs_iter.next();
}
(None, None) => break,
}
}
// Same as above, but for declarations.
let mut a_decls_iter = a.declarations.live_declarations.iter();
let mut b_decls_iter = b.declarations.live_declarations.iter();
let mut a_vis_constraints_iter = a.declarations.visibility_constraints.into_iter();
let mut b_vis_constraints_iter = b.declarations.visibility_constraints.into_iter();
let mut opt_a_decl: Option<u32> = a_decls_iter.next();
let mut opt_b_decl: Option<u32> = b_decls_iter.next();
let push = |vis_constraints_iter: &mut VisibilityConstraintsIntoIterator,
merged: &mut Self| {
let vis_constraints = vis_constraints_iter
.next()
.expect("declarations and visibility_constraints length mismatch");
merged
.declarations
.visibility_constraints
.push(vis_constraints);
};
loop {
match (opt_a_decl, opt_b_decl) {
(Some(a_decl), Some(b_decl)) => match a_decl.cmp(&b_decl) {
std::cmp::Ordering::Less => {
push(&mut a_vis_constraints_iter, self);
opt_a_decl = a_decls_iter.next();
}
std::cmp::Ordering::Greater => {
push(&mut b_vis_constraints_iter, self);
opt_b_decl = b_decls_iter.next();
}
std::cmp::Ordering::Equal => {
push(&mut b_vis_constraints_iter, self);
let a_vis_constraint = a_vis_constraints_iter
.next()
.expect("declarations and visibility_constraints length mismatch");
let current = self.declarations.visibility_constraints.last_mut().unwrap();
*current =
visibility_constraints.add_or_constraint(*current, a_vis_constraint);
opt_a_decl = a_decls_iter.next();
opt_b_decl = b_decls_iter.next();
}
},
(Some(_), None) => {
push(&mut a_vis_constraints_iter, self);
opt_a_decl = a_decls_iter.next();
}
(None, Some(_)) => {
push(&mut b_vis_constraints_iter, self);
opt_b_decl = b_decls_iter.next();
}
(None, None) => break,
}
}
}
pub(super) fn bindings(&self) -> &SymbolBindings {
@@ -573,7 +677,7 @@ mod tests {
#[test]
fn merge() {
let mut visibility_constraints = VisibilityConstraintsBuilder::default();
let mut visibility_constraints = VisibilityConstraints::default();
// merging the same definition with the same constraint keeps the constraint
let mut sym1a = SymbolState::undefined(ScopedVisibilityConstraintId::ALWAYS_TRUE);
@@ -644,7 +748,7 @@ mod tests {
#[test]
fn record_declaration_merge() {
let mut visibility_constraints = VisibilityConstraintsBuilder::default();
let mut visibility_constraints = VisibilityConstraints::default();
let mut sym = SymbolState::undefined(ScopedVisibilityConstraintId::ALWAYS_TRUE);
sym.record_declaration(ScopedDefinitionId::from_u32(1));
@@ -658,7 +762,7 @@ mod tests {
#[test]
fn record_declaration_merge_partial_undeclared() {
let mut visibility_constraints = VisibilityConstraintsBuilder::default();
let mut visibility_constraints = VisibilityConstraints::default();
let mut sym = SymbolState::undefined(ScopedVisibilityConstraintId::ALWAYS_TRUE);
sym.record_declaration(ScopedDefinitionId::from_u32(1));

View File

@@ -163,17 +163,6 @@ fn check_unknown_rule(context: &mut CheckSuppressionsContext) {
format_args!("Unknown rule `{rule}`"),
);
}
GetLintError::PrefixedWithCategory {
prefixed,
suggestion,
} => {
context.report_lint(
&UNKNOWN_RULE,
unknown.range,
format_args!("Unknown rule `{prefixed}`. Did you mean `{suggestion}`?"),
);
}
};
}
}
@@ -776,9 +765,8 @@ impl<'src> SuppressionParser<'src> {
fn eat_word(&mut self) -> bool {
if self.cursor.eat_if(char::is_alphabetic) {
// Allow `:` for better error recovery when someone uses `lint:code` instead of just `code`.
self.cursor
.eat_while(|c| c.is_alphanumeric() || matches!(c, '_' | '-' | ':'));
.eat_while(|c| c.is_alphanumeric() || matches!(c, '_' | '-'));
true
} else {
false

View File

@@ -15,8 +15,7 @@ pub(crate) use self::diagnostic::register_lints;
pub use self::diagnostic::{TypeCheckDiagnostic, TypeCheckDiagnostics};
pub(crate) use self::display::TypeArrayDisplay;
pub(crate) use self::infer::{
infer_deferred_types, infer_definition_types, infer_expression_type, infer_expression_types,
infer_scope_types,
infer_deferred_types, infer_definition_types, infer_expression_types, infer_scope_types,
};
pub use self::narrow::KnownConstraintFunction;
pub(crate) use self::signatures::Signature;
@@ -24,12 +23,11 @@ pub use self::subclass_of::SubclassOfType;
use crate::module_name::ModuleName;
use crate::module_resolver::{file_to_module, resolve_module, KnownModule};
use crate::semantic_index::ast_ids::HasScopedExpressionId;
use crate::semantic_index::attribute_assignment::AttributeAssignment;
use crate::semantic_index::definition::Definition;
use crate::semantic_index::symbol::{self as symbol, ScopeId, ScopedSymbolId};
use crate::semantic_index::{
attribute_assignments, global_scope, imported_modules, semantic_index, symbol_table,
use_def_map, BindingWithConstraints, BindingWithConstraintsIterator, DeclarationWithConstraint,
global_scope, imported_modules, semantic_index, symbol_table, use_def_map,
BindingWithConstraints, BindingWithConstraintsIterator, DeclarationWithConstraint,
DeclarationsIterator,
};
use crate::stdlib::{builtins_symbol, known_module_symbol, typing_extensions_symbol};
@@ -55,7 +53,6 @@ mod mro;
mod narrow;
mod signatures;
mod slots;
mod statistics;
mod string_annotation;
mod subclass_of;
mod type_ordering;
@@ -814,35 +811,6 @@ impl<'db> Type<'db> {
}
}
/// Return a normalized version of `self` in which all unions and intersections are sorted
/// according to a canonical order, no matter how "deeply" a union/intersection may be nested.
#[must_use]
pub fn with_sorted_unions(self, db: &'db dyn Db) -> Self {
match self {
Type::Union(union) => Type::Union(union.to_sorted_union(db)),
Type::Intersection(intersection) => {
Type::Intersection(intersection.to_sorted_intersection(db))
}
Type::Tuple(tuple) => Type::Tuple(tuple.with_sorted_unions(db)),
Type::LiteralString
| Type::Instance(_)
| Type::AlwaysFalsy
| Type::AlwaysTruthy
| Type::BooleanLiteral(_)
| Type::SliceLiteral(_)
| Type::BytesLiteral(_)
| Type::StringLiteral(_)
| Type::Dynamic(_)
| Type::Never
| Type::FunctionLiteral(_)
| Type::ModuleLiteral(_)
| Type::ClassLiteral(_)
| Type::KnownInstance(_)
| Type::IntLiteral(_)
| Type::SubclassOf(_) => self,
}
}
/// Return true if this type is a [subtype of] type `target`.
///
/// This method returns `false` if either `self` or `other` is not fully static.
@@ -1186,7 +1154,7 @@ impl<'db> Type<'db> {
left.is_equivalent_to(db, right)
}
(Type::Tuple(left), Type::Tuple(right)) => left.is_equivalent_to(db, right),
_ => self == other && self.is_fully_static(db) && other.is_fully_static(db),
_ => self.is_fully_static(db) && other.is_fully_static(db) && self == other,
}
}
@@ -4136,66 +4104,9 @@ impl<'db> Class<'db> {
}
}
SymbolAndQualifiers(Symbol::Unbound, TypeQualifiers::empty())
}
/// Tries to find declarations/bindings of an instance attribute named `name` that are only
/// "implicitly" defined in a method of the class that corresponds to `class_body_scope`.
fn implicit_instance_attribute(
db: &'db dyn Db,
class_body_scope: ScopeId<'db>,
name: &str,
inferred_type_from_class_body: Option<Type<'db>>,
) -> Symbol<'db> {
// If we do not see any declarations of an attribute, neither in the class body nor in
// any method, we build a union of `Unknown` with the inferred types of all bindings of
// that attribute. We include `Unknown` in that union to account for the fact that the
// attribute might be externally modified.
let mut union_of_inferred_types = UnionBuilder::new(db).add(Type::unknown());
if let Some(ty) = inferred_type_from_class_body {
union_of_inferred_types = union_of_inferred_types.add(ty);
}
let attribute_assignments = attribute_assignments(db, class_body_scope);
let Some(attribute_assignments) = attribute_assignments
.as_deref()
.and_then(|assignments| assignments.get(name))
else {
if inferred_type_from_class_body.is_some() {
return union_of_inferred_types.build().into();
}
return Symbol::Unbound;
};
for attribute_assignment in attribute_assignments {
match attribute_assignment {
AttributeAssignment::Annotated { annotation } => {
// We found an annotated assignment of one of the following forms (using 'self' in these
// examples, but we support arbitrary names for the first parameters of methods):
//
// self.name: <annotation>
// self.name: <annotation> = …
let annotation_ty = infer_expression_type(db, *annotation);
// TODO: check if there are conflicting declarations
return annotation_ty.into();
}
AttributeAssignment::Unannotated { value } => {
// We found an un-annotated attribute assignment of the form:
//
// self.name = <value>
let inferred_ty = infer_expression_type(db, *value);
union_of_inferred_types = union_of_inferred_types.add(inferred_ty);
}
}
}
union_of_inferred_types.build().into()
// TODO: The symbol is not present in any class body, but it could be implicitly
// defined in `__init__` or other methods anywhere in the MRO.
todo_type!("implicit instance attribute").into()
}
/// A helper function for `instance_member` that looks up the `name` attribute only on
@@ -4217,8 +4128,6 @@ impl<'db> Class<'db> {
match symbol_from_declarations(db, declarations) {
Ok(SymbolAndQualifiers(Symbol::Type(declared_ty, _), qualifiers)) => {
// The attribute is declared in the class body.
if let Some(function) = declared_ty.into_function_literal() {
// TODO: Eventually, we are going to process all decorators correctly. This is
// just a temporary heuristic to provide a broad categorization into properties
@@ -4232,26 +4141,22 @@ impl<'db> Class<'db> {
SymbolAndQualifiers(Symbol::Type(declared_ty, Boundness::Bound), qualifiers)
}
}
Ok(SymbolAndQualifiers(Symbol::Unbound, _)) => {
// The attribute is not *declared* in the class body. It could still be declared
// in a method, and it could also be *bound* in the class body (and/or in a method).
Ok(symbol @ SymbolAndQualifiers(Symbol::Unbound, qualifiers)) => {
let bindings = use_def.public_bindings(symbol_id);
let inferred = symbol_from_bindings(db, bindings);
let inferred_ty = inferred.ignore_possibly_unbound();
Self::implicit_instance_attribute(db, body_scope, name, inferred_ty).into()
SymbolAndQualifiers(
widen_type_for_undeclared_public_symbol(db, inferred, symbol.is_final()),
qualifiers,
)
}
Err((declared_ty, _conflicting_declarations)) => {
// There are conflicting declarations for this attribute in the class body.
// Ignore conflicting declarations
SymbolAndQualifiers(declared_ty.inner_type().into(), declared_ty.qualifiers())
}
}
} else {
// This attribute is neither declared nor bound in the class body.
// It could still be implicitly defined in a method.
Self::implicit_instance_attribute(db, body_scope, name, None).into()
Symbol::Unbound.into()
}
}
@@ -4447,11 +4352,12 @@ impl<'db> UnionType<'db> {
/// Create a new union type with the elements sorted according to a canonical ordering.
#[must_use]
pub fn to_sorted_union(self, db: &'db dyn Db) -> Self {
let mut new_elements: Vec<Type<'db>> = self
.elements(db)
.iter()
.map(|element| element.with_sorted_unions(db))
.collect();
let mut new_elements = self.elements(db).to_vec();
for element in &mut new_elements {
if let Type::Intersection(intersection) = element {
intersection.sort(db);
}
}
new_elements.sort_unstable_by(union_elements_ordering);
UnionType::new(db, new_elements.into_boxed_slice())
}
@@ -4547,24 +4453,19 @@ impl<'db> IntersectionType<'db> {
/// according to a canonical ordering.
#[must_use]
pub fn to_sorted_intersection(self, db: &'db dyn Db) -> Self {
fn normalized_set<'db>(
db: &'db dyn Db,
elements: &FxOrderSet<Type<'db>>,
) -> FxOrderSet<Type<'db>> {
let mut elements: FxOrderSet<Type<'db>> = elements
.iter()
.map(|ty| ty.with_sorted_unions(db))
.collect();
let mut positive = self.positive(db).clone();
positive.sort_unstable_by(union_elements_ordering);
elements.sort_unstable_by(union_elements_ordering);
elements
}
let mut negative = self.negative(db).clone();
negative.sort_unstable_by(union_elements_ordering);
IntersectionType::new(
db,
normalized_set(db, self.positive(db)),
normalized_set(db, self.negative(db)),
)
IntersectionType::new(db, positive, negative)
}
/// Perform an in-place sort of this [`IntersectionType`] instance
/// according to a canonical ordering.
fn sort(&mut self, db: &'db dyn Db) {
*self = self.to_sorted_intersection(db);
}
pub fn is_fully_static(self, db: &'db dyn Db) -> bool {
@@ -4582,33 +4483,21 @@ impl<'db> IntersectionType<'db> {
}
let self_positive = self.positive(db);
if !all_fully_static(db, self_positive) {
return false;
}
let other_positive = other.positive(db);
if self_positive.len() != other_positive.len() {
return false;
}
if !all_fully_static(db, other_positive) {
return false;
}
let self_negative = self.negative(db);
if !all_fully_static(db, self_negative) {
return false;
}
let other_negative = other.negative(db);
if self_negative.len() != other_negative.len() {
let other_positive = other.positive(db);
if !all_fully_static(db, other_positive) {
return false;
}
let other_negative = other.negative(db);
if !all_fully_static(db, other_negative) {
return false;
}
@@ -4617,13 +4506,7 @@ impl<'db> IntersectionType<'db> {
return true;
}
let sorted_self = self.to_sorted_intersection(db);
if sorted_self == other {
return true;
}
sorted_self == other.to_sorted_intersection(db)
self_positive.set_eq(other_positive) && self_negative.set_eq(other_negative)
}
/// Return `true` if `self` has exactly the same set of possible static materializations as `other`
@@ -4725,18 +4608,6 @@ impl<'db> TupleType<'db> {
Type::Tuple(Self::new(db, elements.into_boxed_slice()))
}
/// Return a normalized version of `self` in which all unions and intersections are sorted
/// according to a canonical order, no matter how "deeply" a union/intersection may be nested.
#[must_use]
pub fn with_sorted_unions(self, db: &'db dyn Db) -> Self {
let elements: Box<[Type<'db>]> = self
.elements(db)
.iter()
.map(|ty| ty.with_sorted_unions(db))
.collect();
TupleType::new(db, elements)
}
pub fn is_equivalent_to(self, db: &'db dyn Db, other: Self) -> bool {
let self_elements = self.elements(db);
let other_elements = other.elements(db);

View File

@@ -3,7 +3,7 @@
use std::fmt::{self, Display, Formatter, Write};
use ruff_db::display::FormatterJoinExtension;
use ruff_python_ast::str::{Quote, TripleQuotes};
use ruff_python_ast::str::Quote;
use ruff_python_literal::escape::AsciiEscape;
use crate::types::class_base::ClassBase;
@@ -98,7 +98,7 @@ impl Display for DisplayRepresentation<'_> {
let escape =
AsciiEscape::with_preferred_quote(bytes.value(self.db).as_ref(), Quote::Double);
escape.bytes_repr(TripleQuotes::No).write(f)
escape.bytes_repr().write(f)
}
Type::SliceLiteral(slice) => {
f.write_str("slice[")?;

View File

@@ -44,7 +44,7 @@ use crate::semantic_index::definition::{
AssignmentDefinitionKind, Definition, DefinitionKind, DefinitionNodeKey,
ExceptHandlerDefinitionKind, ForStmtDefinitionKind, TargetKind,
};
use crate::semantic_index::expression::{Expression, ExpressionKind};
use crate::semantic_index::expression::Expression;
use crate::semantic_index::semantic_index;
use crate::semantic_index::symbol::{NodeWithScopeKind, NodeWithScopeRef, ScopeId};
use crate::semantic_index::SemanticIndex;
@@ -61,7 +61,6 @@ use crate::types::diagnostic::{
UNDEFINED_REVEAL, UNRESOLVED_ATTRIBUTE, UNRESOLVED_IMPORT, UNSUPPORTED_OPERATOR,
};
use crate::types::mro::MroErrorKind;
use crate::types::statistics::TypeStatistics;
use crate::types::unpacker::{UnpackResult, Unpacker};
use crate::types::{
builtins_symbol, global_symbol, symbol, symbol_from_bindings, symbol_from_declarations,
@@ -193,20 +192,6 @@ pub(crate) fn infer_expression_types<'db>(
TypeInferenceBuilder::new(db, InferenceRegion::Expression(expression), index).finish()
}
// Similar to `infer_expression_types` (with the same restrictions). Directly returns the
// type of the overall expression. This is a salsa query because it accesses `node_ref`,
// which is sensitive to changes in the AST. Making it a query allows downstream queries
// to short-circuit if the result type has not changed.
#[salsa::tracked]
pub(crate) fn infer_expression_type<'db>(
db: &'db dyn Db,
expression: Expression<'db>,
) -> Type<'db> {
let inference = infer_expression_types(db, expression);
let expr_scope = expression.scope(db);
inference.expression_type(expression.node_ref(db).scoped_expression_id(db, expr_scope))
}
/// Infer the types for an [`Unpack`] operation.
///
/// This infers the expression type and performs structural match against the target expression
@@ -314,14 +299,6 @@ impl<'db> TypeInference<'db> {
self.diagnostics.shrink_to_fit();
self.deferred.shrink_to_fit();
}
pub(super) fn statistics(&self) -> TypeStatistics {
let mut statistics = TypeStatistics::default();
for ty in self.expressions.values() {
statistics.increment(*ty);
}
statistics
}
}
impl WithDiagnostics for TypeInference<'_> {
@@ -846,14 +823,7 @@ impl<'db> TypeInferenceBuilder<'db> {
}
fn infer_region_expression(&mut self, expression: Expression<'db>) {
match expression.kind(self.db()) {
ExpressionKind::Normal => {
self.infer_expression_impl(expression.node_ref(self.db()));
}
ExpressionKind::TypeExpression => {
self.infer_type_expression(expression.node_ref(self.db()));
}
}
self.infer_expression_impl(expression.node_ref(self.db()));
}
/// Raise a diagnostic if the given type cannot be divided by zero.
@@ -1315,7 +1285,7 @@ impl<'db> TypeInferenceBuilder<'db> {
parameter: &ast::Parameter,
definition: Definition<'db>,
) {
if let Some(annotation) = parameter.annotation() {
if let Some(annotation) = parameter.annotation.as_ref() {
let _annotated_ty = self.file_expression_type(annotation);
// TODO `tuple[annotated_ty, ...]`
let ty = KnownClass::Tuple.to_instance(self.db());
@@ -1344,7 +1314,7 @@ impl<'db> TypeInferenceBuilder<'db> {
parameter: &ast::Parameter,
definition: Definition<'db>,
) {
if let Some(annotation) = parameter.annotation() {
if let Some(annotation) = parameter.annotation.as_ref() {
let _annotated_ty = self.file_expression_type(annotation);
// TODO `dict[str, annotated_ty]`
let ty = KnownClass::Dict.to_instance(self.db());
@@ -6040,7 +6010,7 @@ mod tests {
use crate::types::check_types;
use ruff_db::files::{system_path_to_file, File};
use ruff_db::system::DbWithTestSystem;
use ruff_db::testing::{assert_function_query_was_not_run, assert_function_query_was_run};
use ruff_db::testing::assert_function_query_was_not_run;
use super::*;
@@ -6367,84 +6337,4 @@ mod tests {
);
Ok(())
}
#[test]
fn dependency_implicit_instance_attribute() -> anyhow::Result<()> {
fn x_rhs_expression(db: &TestDb) -> Expression<'_> {
let file_main = system_path_to_file(db, "/src/main.py").unwrap();
let ast = parsed_module(db, file_main);
// Get the second statement in `main.py` (x = …) and extract the expression
// node on the right-hand side:
let x_rhs_node = &ast.syntax().body[1].as_assign_stmt().unwrap().value;
let index = semantic_index(db, file_main);
index.expression(x_rhs_node.as_ref())
}
let mut db = setup_db();
db.write_dedented(
"/src/mod.py",
r#"
class C:
def f(self):
self.attr: int | None = None
"#,
)?;
db.write_dedented(
"/src/main.py",
r#"
from mod import C
x = C().attr
"#,
)?;
let file_main = system_path_to_file(&db, "/src/main.py").unwrap();
let attr_ty = global_symbol(&db, file_main, "x").expect_type();
assert_eq!(attr_ty.display(&db).to_string(), "Unknown | int | None");
// Change the type of `attr` to `str | None`; this should trigger the type of `x` to be re-inferred
db.write_dedented(
"/src/mod.py",
r#"
class C:
def f(self):
self.attr: str | None = None
"#,
)?;
let events = {
db.clear_salsa_events();
let attr_ty = global_symbol(&db, file_main, "x").expect_type();
assert_eq!(attr_ty.display(&db).to_string(), "Unknown | str | None");
db.take_salsa_events()
};
assert_function_query_was_run(&db, infer_expression_types, x_rhs_expression(&db), &events);
// Add a comment; this should not trigger the type of `x` to be re-inferred
db.write_dedented(
"/src/mod.py",
r#"
class C:
def f(self):
# a comment!
self.attr: str | None = None
"#,
)?;
let events = {
db.clear_salsa_events();
let attr_ty = global_symbol(&db, file_main, "x").expect_type();
assert_eq!(attr_ty.display(&db).to_string(), "Unknown | str | None");
db.take_salsa_events()
};
assert_function_query_was_not_run(
&db,
infer_expression_types,
x_rhs_expression(&db),
&events,
);
Ok(())
}
}

View File

@@ -467,13 +467,6 @@ mod stable {
assignable_to_is_reflexive, db,
forall types t. t.is_assignable_to(db, t)
);
// For *any* pair of types, whether fully static or not,
// each of the pair should be assignable to the union of the two.
type_property_test!(
all_type_pairs_are_assignable_to_their_union, db,
forall types s, t. s.is_assignable_to(db, union(db, [s, t])) && t.is_assignable_to(db, union(db, [s, t]))
);
}
/// This module contains property tests that currently lead to many false positives.
@@ -522,6 +515,13 @@ mod flaky {
forall types s, t. intersection(db, [s, t]).is_assignable_to(db, s) && intersection(db, [s, t]).is_assignable_to(db, t)
);
// For *any* pair of types, whether fully static or not,
// each of the pair should be assignable to the union of the two.
type_property_test!(
all_type_pairs_are_assignable_to_their_union, db,
forall types s, t. s.is_assignable_to(db, union(db, [s, t])) && t.is_assignable_to(db, union(db, [s, t]))
);
// Equal element sets of intersections implies equivalence
// flaky at least in part because of https://github.com/astral-sh/ruff/issues/15513
type_property_test!(

View File

@@ -93,9 +93,10 @@ impl<'db> Parameters<'db> {
kwarg,
range: _,
} = parameters;
let default_ty = |param: &ast::ParameterWithDefault| {
param
.default()
let default_ty = |parameter_with_default: &ast::ParameterWithDefault| {
parameter_with_default
.default
.as_deref()
.map(|default| definition_expression_type(db, definition, default))
};
let positional_only = posonlyargs.iter().map(|arg| {
@@ -242,7 +243,8 @@ impl<'db> Parameter<'db> {
Self {
name: Some(parameter.name.id.clone()),
annotated_ty: parameter
.annotation()
.annotation
.as_deref()
.map(|annotation| definition_expression_type(db, definition, annotation)),
kind,
}

View File

@@ -1,121 +0,0 @@
use crate::types::{infer_scope_types, semantic_index, Type};
use crate::Db;
use ruff_db::files::File;
use rustc_hash::FxHashMap;
/// Get type-coverage statistics for a file.
#[salsa::tracked(return_ref)]
pub fn type_statistics<'db>(db: &'db dyn Db, file: File) -> TypeStatistics<'db> {
let _span = tracing::trace_span!("type_statistics", file=?file.path(db)).entered();
tracing::debug!(
"Gathering statistics for file '{path}'",
path = file.path(db)
);
let index = semantic_index(db, file);
let mut statistics = TypeStatistics::default();
for scope_id in index.scope_ids() {
let result = infer_scope_types(db, scope_id);
statistics.extend(&result.statistics());
}
statistics
}
/// Map each type to count of expressions with that type.
#[derive(Debug, Default, Eq, PartialEq)]
pub(super) struct TypeStatistics<'db>(FxHashMap<Type<'db>, u32>);
impl<'db> TypeStatistics<'db> {
fn extend(&mut self, other: &TypeStatistics<'db>) {
for (ty, count) in &other.0 {
self.0
.entry(*ty)
.and_modify(|my_count| *my_count += count)
.or_insert(*count);
}
}
pub(super) fn increment(&mut self, ty: Type<'db>) {
self.0
.entry(ty)
.and_modify(|count| *count += 1)
.or_insert(1);
}
#[allow(unused)]
fn expression_count(&self) -> u32 {
self.0.values().sum()
}
#[allow(unused)]
fn todo_count(&self) -> u32 {
self.0
.iter()
.filter(|(key, _)| key.is_todo())
.map(|(_, count)| count)
.sum()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::db::tests::{setup_db, TestDb};
use ruff_db::files::system_path_to_file;
use ruff_db::system::DbWithTestSystem;
fn get_stats<'db>(
db: &'db mut TestDb,
filename: &str,
source: &str,
) -> &'db TypeStatistics<'db> {
db.write_dedented(filename, source).unwrap();
type_statistics(db, system_path_to_file(db, filename).unwrap())
}
#[test]
fn all_static() {
let mut db = setup_db();
let stats = get_stats(&mut db, "src/foo.py", "1");
assert_eq!(stats.0, FxHashMap::from_iter([(Type::IntLiteral(1), 1)]));
}
#[test]
fn todo_and_expression_count() {
let mut db = setup_db();
let stats = get_stats(
&mut db,
"src/foo.py",
r#"
x = [x for x in [1]]
"#,
);
assert_eq!(stats.todo_count(), 4);
assert_eq!(stats.expression_count(), 6);
}
#[test]
fn sum() {
let mut db = setup_db();
let stats = get_stats(
&mut db,
"src/foo.py",
r#"
1
def f():
1
"#,
);
assert_eq!(stats.0[&Type::IntLiteral(1)], 2);
}
}

View File

@@ -7,7 +7,7 @@ use ruff_python_ast::{self as ast, AnyNodeRef};
use crate::semantic_index::ast_ids::{HasScopedExpressionId, ScopedExpressionId};
use crate::semantic_index::symbol::ScopeId;
use crate::types::{infer_expression_type, todo_type, Type, TypeCheckDiagnostics};
use crate::types::{infer_expression_types, todo_type, Type, TypeCheckDiagnostics};
use crate::unpack::UnpackValue;
use crate::Db;
@@ -42,7 +42,8 @@ impl<'db> Unpacker<'db> {
"Unpacking target must be a list or tuple expression"
);
let mut value_ty = infer_expression_type(self.db(), value.expression());
let mut value_ty = infer_expression_types(self.db(), value.expression())
.expression_type(value.scoped_expression_id(self.db(), self.scope));
if value.is_assign()
&& self.context.in_stub()

View File

@@ -3,6 +3,7 @@ use ruff_python_ast::{self as ast, AnyNodeRef};
use ruff_text_size::{Ranged, TextRange};
use crate::ast_node_ref::AstNodeRef;
use crate::semantic_index::ast_ids::{HasScopedExpressionId, ScopedExpressionId};
use crate::semantic_index::expression::Expression;
use crate::semantic_index::symbol::{FileScopeId, ScopeId};
use crate::Db;
@@ -87,6 +88,17 @@ impl<'db> UnpackValue<'db> {
}
}
/// Returns the [`ScopedExpressionId`] of the underlying expression.
pub(crate) fn scoped_expression_id(
self,
db: &'db dyn Db,
scope: ScopeId<'db>,
) -> ScopedExpressionId {
self.expression()
.node_ref(db)
.scoped_expression_id(db, scope)
}
/// Returns the expression as an [`AnyNodeRef`].
pub(crate) fn as_any_node_ref(self, db: &'db dyn Db) -> AnyNodeRef<'db> {
self.expression().node_ref(db).node().into()

View File

@@ -122,7 +122,7 @@
//!
//! ### Explicit ambiguity
//!
//! In some cases, we explicitly add an “ambiguous constraint to all bindings
//! In some cases, we explicitly add a `VisibilityConstraint::Ambiguous` constraint to all bindings
//! in a certain control flow path. We do this when branching on something that we can not (or
//! intentionally do not want to) analyze statically. `for` loops are one example:
//! ```py
@@ -150,10 +150,14 @@
//!
//! [Kleene]: <https://en.wikipedia.org/wiki/Three-valued_logic#Kleene_and_Priest_logics>
use ruff_index::{newtype_index, IndexVec};
use ruff_index::IndexVec;
use crate::semantic_index::constraint::{Constraint, ConstraintNode, PatternConstraintKind};
use crate::types::{infer_expression_type, Truthiness};
use crate::semantic_index::ScopedVisibilityConstraintId;
use crate::semantic_index::{
ast_ids::HasScopedExpressionId,
constraint::{Constraint, ConstraintNode, PatternConstraintKind},
};
use crate::types::{infer_expression_types, Truthiness};
use crate::Db;
/// The maximum depth of recursion when evaluating visibility constraints.
@@ -164,113 +168,35 @@ use crate::Db;
/// resulting from a few files with a lot of boolean expressions and `if`-statements.
const MAX_RECURSION_DEPTH: usize = 24;
/// A ternary formula that defines under what conditions a binding is visible. (A ternary formula
/// is just like a boolean formula, but with `Ambiguous` as a third potential result. See the
/// module documentation for more details.)
///
/// The primitive atoms of the formula are [`Constraint`]s, which express some property of the
/// runtime state of the code that we are analyzing.
///
/// We assume that each atom has a stable value each time that the formula is evaluated. An atom
/// that resolves to `Ambiguous` might be true or false, and we can't tell which — but within that
/// evaluation, we assume that the atom has the _same_ unknown value each time it appears. That
/// allows us to perform simplifications like `A !A → true` and `A ∧ !A → false`.
///
/// That means that when you are constructing a formula, you might need to create distinct atoms
/// for a particular [`Constraint`], if your formula needs to consider how a particular runtime
/// property might be different at different points in the execution of the program.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct VisibilityConstraint<'db>(VisibilityConstraintInner<'db>);
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum VisibilityConstraintInner<'db> {
pub(crate) enum VisibilityConstraint<'db> {
AlwaysTrue,
AlwaysFalse,
Ambiguous,
VisibleIf(Constraint<'db>, u8),
VisibleIf(Constraint<'db>),
VisibleIfNot(ScopedVisibilityConstraintId),
KleeneAnd(ScopedVisibilityConstraintId, ScopedVisibilityConstraintId),
KleeneOr(ScopedVisibilityConstraintId, ScopedVisibilityConstraintId),
}
/// A newtype-index for a visibility constraint in a particular scope.
#[newtype_index]
pub(crate) struct ScopedVisibilityConstraintId;
impl ScopedVisibilityConstraintId {
/// A special ID that is used for an "always true" / "always visible" constraint.
/// When we create a new [`VisibilityConstraints`] object, this constraint is always
/// present at index 0.
pub(crate) const ALWAYS_TRUE: ScopedVisibilityConstraintId =
ScopedVisibilityConstraintId::from_u32(0);
/// A special ID that is used for an "always false" / "never visible" constraint.
/// When we create a new [`VisibilityConstraints`] object, this constraint is always
/// present at index 1.
pub(crate) const ALWAYS_FALSE: ScopedVisibilityConstraintId =
ScopedVisibilityConstraintId::from_u32(1);
/// A special ID that is used for an ambiguous constraint.
/// When we create a new [`VisibilityConstraints`] object, this constraint is always
/// present at index 2.
pub(crate) const AMBIGUOUS: ScopedVisibilityConstraintId =
ScopedVisibilityConstraintId::from_u32(2);
}
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct VisibilityConstraints<'db> {
constraints: IndexVec<ScopedVisibilityConstraintId, VisibilityConstraint<'db>>,
}
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct VisibilityConstraintsBuilder<'db> {
constraints: IndexVec<ScopedVisibilityConstraintId, VisibilityConstraint<'db>>,
}
impl Default for VisibilityConstraintsBuilder<'_> {
impl Default for VisibilityConstraints<'_> {
fn default() -> Self {
Self {
constraints: IndexVec::from_iter([
VisibilityConstraint(VisibilityConstraintInner::AlwaysTrue),
VisibilityConstraint(VisibilityConstraintInner::AlwaysFalse),
VisibilityConstraint(VisibilityConstraintInner::Ambiguous),
]),
constraints: IndexVec::from_iter([VisibilityConstraint::AlwaysTrue]),
}
}
}
impl<'db> VisibilityConstraintsBuilder<'db> {
pub(crate) fn build(self) -> VisibilityConstraints<'db> {
VisibilityConstraints {
constraints: self.constraints,
}
}
fn add(&mut self, constraint: VisibilityConstraintInner<'db>) -> ScopedVisibilityConstraintId {
self.constraints.push(VisibilityConstraint(constraint))
}
pub(crate) fn add_atom(
impl<'db> VisibilityConstraints<'db> {
pub(crate) fn add(
&mut self,
constraint: Constraint<'db>,
copy: u8,
constraint: VisibilityConstraint<'db>,
) -> ScopedVisibilityConstraintId {
self.add(VisibilityConstraintInner::VisibleIf(constraint, copy))
}
pub(crate) fn add_not_constraint(
&mut self,
a: ScopedVisibilityConstraintId,
) -> ScopedVisibilityConstraintId {
if a == ScopedVisibilityConstraintId::ALWAYS_FALSE {
ScopedVisibilityConstraintId::ALWAYS_TRUE
} else if a == ScopedVisibilityConstraintId::ALWAYS_TRUE {
ScopedVisibilityConstraintId::ALWAYS_FALSE
} else if a == ScopedVisibilityConstraintId::AMBIGUOUS {
ScopedVisibilityConstraintId::AMBIGUOUS
} else {
self.add(VisibilityConstraintInner::VisibleIfNot(a))
}
self.constraints.push(constraint)
}
pub(crate) fn add_or_constraint(
@@ -278,23 +204,14 @@ impl<'db> VisibilityConstraintsBuilder<'db> {
a: ScopedVisibilityConstraintId,
b: ScopedVisibilityConstraintId,
) -> ScopedVisibilityConstraintId {
if a == ScopedVisibilityConstraintId::ALWAYS_TRUE
|| b == ScopedVisibilityConstraintId::ALWAYS_TRUE
{
return ScopedVisibilityConstraintId::ALWAYS_TRUE;
} else if a == ScopedVisibilityConstraintId::ALWAYS_FALSE {
return b;
} else if b == ScopedVisibilityConstraintId::ALWAYS_FALSE {
return a;
}
match (&self.constraints[a], &self.constraints[b]) {
(_, VisibilityConstraint(VisibilityConstraintInner::VisibleIfNot(id))) if a == *id => {
(_, VisibilityConstraint::VisibleIfNot(id)) if a == *id => {
ScopedVisibilityConstraintId::ALWAYS_TRUE
}
(VisibilityConstraint(VisibilityConstraintInner::VisibleIfNot(id)), _) if *id == b => {
(VisibilityConstraint::VisibleIfNot(id), _) if *id == b => {
ScopedVisibilityConstraintId::ALWAYS_TRUE
}
_ => self.add(VisibilityConstraintInner::KleeneOr(a, b)),
_ => self.add(VisibilityConstraint::KleeneOr(a, b)),
}
}
@@ -303,28 +220,15 @@ impl<'db> VisibilityConstraintsBuilder<'db> {
a: ScopedVisibilityConstraintId,
b: ScopedVisibilityConstraintId,
) -> ScopedVisibilityConstraintId {
if a == ScopedVisibilityConstraintId::ALWAYS_FALSE
|| b == ScopedVisibilityConstraintId::ALWAYS_FALSE
{
return ScopedVisibilityConstraintId::ALWAYS_FALSE;
} else if a == ScopedVisibilityConstraintId::ALWAYS_TRUE {
return b;
if a == ScopedVisibilityConstraintId::ALWAYS_TRUE {
b
} else if b == ScopedVisibilityConstraintId::ALWAYS_TRUE {
return a;
}
match (&self.constraints[a], &self.constraints[b]) {
(_, VisibilityConstraint(VisibilityConstraintInner::VisibleIfNot(id))) if a == *id => {
ScopedVisibilityConstraintId::ALWAYS_FALSE
}
(VisibilityConstraint(VisibilityConstraintInner::VisibleIfNot(id)), _) if *id == b => {
ScopedVisibilityConstraintId::ALWAYS_FALSE
}
_ => self.add(VisibilityConstraintInner::KleeneAnd(a, b)),
a
} else {
self.add(VisibilityConstraint::KleeneAnd(a, b))
}
}
}
impl<'db> VisibilityConstraints<'db> {
/// Analyze the statically known visibility for a given visibility constraint.
pub(crate) fn evaluate(&self, db: &'db dyn Db, id: ScopedVisibilityConstraintId) -> Truthiness {
self.evaluate_impl(db, id, MAX_RECURSION_DEPTH)
@@ -340,18 +244,15 @@ impl<'db> VisibilityConstraints<'db> {
return Truthiness::Ambiguous;
}
let VisibilityConstraint(visibility_constraint) = &self.constraints[id];
let visibility_constraint = &self.constraints[id];
match visibility_constraint {
VisibilityConstraintInner::AlwaysTrue => Truthiness::AlwaysTrue,
VisibilityConstraintInner::AlwaysFalse => Truthiness::AlwaysFalse,
VisibilityConstraintInner::Ambiguous => Truthiness::Ambiguous,
VisibilityConstraintInner::VisibleIf(constraint, _) => {
Self::analyze_single(db, constraint)
}
VisibilityConstraintInner::VisibleIfNot(negated) => {
VisibilityConstraint::AlwaysTrue => Truthiness::AlwaysTrue,
VisibilityConstraint::Ambiguous => Truthiness::Ambiguous,
VisibilityConstraint::VisibleIf(constraint) => Self::analyze_single(db, constraint),
VisibilityConstraint::VisibleIfNot(negated) => {
self.evaluate_impl(db, *negated, max_depth - 1).negate()
}
VisibilityConstraintInner::KleeneAnd(lhs, rhs) => {
VisibilityConstraint::KleeneAnd(lhs, rhs) => {
let lhs = self.evaluate_impl(db, *lhs, max_depth - 1);
if lhs == Truthiness::AlwaysFalse {
@@ -368,7 +269,7 @@ impl<'db> VisibilityConstraints<'db> {
Truthiness::Ambiguous
}
}
VisibilityConstraintInner::KleeneOr(lhs_id, rhs_id) => {
VisibilityConstraint::KleeneOr(lhs_id, rhs_id) => {
let lhs = self.evaluate_impl(db, *lhs_id, max_depth - 1);
if lhs == Truthiness::AlwaysTrue {
@@ -391,15 +292,28 @@ impl<'db> VisibilityConstraints<'db> {
fn analyze_single(db: &dyn Db, constraint: &Constraint) -> Truthiness {
match constraint.node {
ConstraintNode::Expression(test_expr) => {
let ty = infer_expression_type(db, test_expr);
let inference = infer_expression_types(db, test_expr);
let scope = test_expr.scope(db);
let ty = inference
.expression_type(test_expr.node_ref(db).scoped_expression_id(db, scope));
ty.bool(db).negate_if(!constraint.is_positive)
}
ConstraintNode::Pattern(inner) => match inner.kind(db) {
PatternConstraintKind::Value(value, guard) => {
let subject_expression = inner.subject(db);
let subject_ty = infer_expression_type(db, *subject_expression);
let value_ty = infer_expression_type(db, *value);
let inference = infer_expression_types(db, *subject_expression);
let scope = subject_expression.scope(db);
let subject_ty = inference.expression_type(
subject_expression
.node_ref(db)
.scoped_expression_id(db, scope),
);
let inference = infer_expression_types(db, *value);
let scope = value.scope(db);
let value_ty = inference
.expression_type(value.node_ref(db).scoped_expression_id(db, scope));
if subject_ty.is_single_valued(db) {
let truthiness =

View File

@@ -20,10 +20,10 @@ reveal_type(1) # revealed: Literal[1]
````
When running this test, the mdtest framework will write a file with these contents to the default
file path (`/src/mdtest_snippet__1.py`) in its in-memory file system, run a type check on that file,
and then match the resulting diagnostics with the assertions in the test. Assertions are in the form
of Python comments. If all diagnostics and all assertions are matched, the test passes; otherwise,
it fails.
file path (`/src/test.py`) in its in-memory file system, run a type check on that file, and then
match the resulting diagnostics with the assertions in the test. Assertions are in the form of
Python comments. If all diagnostics and all assertions are matched, the test passes; otherwise, it
fails.
<!---
(If you are reading this document in raw Markdown source rather than rendered Markdown, note that
@@ -129,12 +129,8 @@ assertion as the line of source code on which the matched diagnostics are emitte
## Multi-file tests
Some tests require multiple files, with imports from one file into another. Multiple fenced code
blocks represent multiple embedded files. If there are multiple unnamed files, mdtest will name them
according to the numbered scheme `/src/mdtest_snippet__1.py`, `/src/mdtest_snippet__2.py`, etc. (If
they are `pyi` files, they will be named with a `pyi` extension instead.)
Tests should not rely on these default names. If a test must import from a file, then it should
explicitly specify the file name:
blocks represent multiple embedded files. Since files must have unique names, at most one file can
use the default name of `/src/test.py`. Other files must explicitly specify their file name:
````markdown
```py
@@ -142,9 +138,7 @@ from b import C
reveal_type(C) # revealed: Literal[C]
```
`b.py`:
```py
```py path=b.py
class C: pass
```
````
@@ -155,8 +149,8 @@ is, the equivalent of a runtime entry on `sys.path`).
The default workspace root is `/src/`. Currently it is not possible to customize this in a test, but
this is a feature we will want to add in the future.
So the above test creates two files, `/src/mdtest_snippet__1.py` and `/src/b.py`, and sets the
workspace root to `/src/`, allowing imports from `b.py` using the module name `b`.
So the above test creates two files, `/src/test.py` and `/src/b.py`, and sets the workspace root to
`/src/`, allowing `test.py` to import from `b.py` using the module name `b`.
## Multi-test suites
@@ -177,9 +171,7 @@ from b import y
x: int = y # error: [invalid-assignment]
```
`b.py`:
```py
```py path=b.py
y = "foo"
```
````
@@ -291,7 +283,7 @@ cargo test -p red_knot_python_semantic -- mdtest__
Alternatively, you can use the `mdtest.py` runner which has a watch mode that will re-run corresponding tests when Markdown files change, and recompile automatically when Rust code changes:
```bash
uv run crates/red_knot_python_semantic/mdtest.py
uv -q run crates/red_knot_python_semantic/mdtest.py
```
## Planned features
@@ -365,17 +357,17 @@ This is just an example, not a proposal that red-knot would ever actually output
precisely this format:
```output
mdtest_snippet__1.py, line 1, col 1: revealed type is 'Literal[1]'
test.py, line 1, col 1: revealed type is 'Literal[1]'
```
````
We will want to build tooling to automatically capture and update these “full diagnostic output”
blocks, when tests are run in an update-output mode (probably specified by an environment variable.)
By default, an `output` block will specify diagnostic output for the file
`<workspace-root>/mdtest_snippet__1.py`. An `output` block can be prefixed by a
<code>`&lt;path>`:</code> label as usual, to explicitly specify the Python file for which it asserts
diagnostic output.
By default, an `output` block will specify diagnostic output for the file `<workspace-root>/test.py`.
An `output` block can have a `path=` option, to explicitly specify the Python file for which it
asserts diagnostic output, and a `stage=` option, to specify which stage of an incremental test it
specifies diagnostic output at. (See “incremental tests” below.)
It is an error for an `output` block to exist, if there is no `py` or `python` block in the same
test for the same file path.
@@ -393,43 +385,39 @@ fenced code blocks in the test:
## modify a file
Initial file contents:
Initial version of `test.py` and `b.py`:
```py
from b import x
reveal_type(x)
```
`b.py`:
```py
```py path=b.py
x = 1
```
Initial expected output for the unnamed file:
Initial expected output for `test.py`:
```output
/src/mdtest_snippet__1.py, line 1, col 1: revealed type is 'Literal[1]'
/src/test.py, line 1, col 1: revealed type is 'Literal[1]'
```
Now in our first incremental stage, modify the contents of `b.py`:
`b.py`:
```py stage=1
```py path=b.py stage=1
# b.py
x = 2
```
And this is our updated expected output for the unnamed file at stage 1:
And this is our updated expected output for `test.py` at stage 1:
```output stage=1
/src/mdtest_snippet__1.py, line 1, col 1: revealed type is 'Literal[2]'
/src/test.py, line 1, col 1: revealed type is 'Literal[2]'
```
(One reason to use full-diagnostic-output blocks in this test is that updating inline-comment
diagnostic assertions for `mdtest_snippet__1.py` would require specifying new contents for
`mdtest_snippet__1.py` in stage 1, which we don't want to do in this test.)
(One reason to use full-diagnostic-output blocks in this test is that updating
inline-comment diagnostic assertions for `test.py` would require specifying new
contents for `test.py` in stage 1, which we don't want to do in this test.)
````
It will be possible to provide any number of stages in an incremental test. If a stage re-specifies

View File

@@ -109,9 +109,9 @@ fn run_test(db: &mut db::Db, test: &parser::MarkdownTest) -> Result<(), Failures
);
let full_path = if embedded.path.starts_with('/') {
SystemPathBuf::from(embedded.path.clone())
SystemPathBuf::from(embedded.path)
} else {
project_root.join(&embedded.path)
project_root.join(embedded.path)
};
if let Some(ref typeshed_path) = custom_typeshed_path {
@@ -135,7 +135,7 @@ fn run_test(db: &mut db::Db, test: &parser::MarkdownTest) -> Result<(), Failures
Some(TestFile {
file,
backtick_offset: embedded.backtick_offset,
backtick_offset: embedded.md_offset,
})
})
.collect();

File diff suppressed because it is too large Load Diff

View File

@@ -1 +1 @@
c193cd2a36839c8e6336f350397f51ce52fedd5e
101287091cbd71a3305a4fc4a1a8eb5df0e3f6f7

View File

@@ -27,14 +27,14 @@ _TrapType: TypeAlias = type[DecimalException]
__version__: Final[str]
__libmpdec_version__: Final[str]
ROUND_DOWN: Final = "ROUND_DOWN"
ROUND_HALF_UP: Final = "ROUND_HALF_UP"
ROUND_HALF_EVEN: Final = "ROUND_HALF_EVEN"
ROUND_CEILING: Final = "ROUND_CEILING"
ROUND_FLOOR: Final = "ROUND_FLOOR"
ROUND_UP: Final = "ROUND_UP"
ROUND_HALF_DOWN: Final = "ROUND_HALF_DOWN"
ROUND_05UP: Final = "ROUND_05UP"
ROUND_DOWN: Final[str]
ROUND_HALF_UP: Final[str]
ROUND_HALF_EVEN: Final[str]
ROUND_CEILING: Final[str]
ROUND_FLOOR: Final[str]
ROUND_UP: Final[str]
ROUND_HALF_DOWN: Final[str]
ROUND_05UP: Final[str]
HAVE_CONTEXTVAR: Final[bool]
HAVE_THREADS: Final[bool]
MAX_EMAX: Final[int]

View File

@@ -1,13 +1,13 @@
from typing import Final, SupportsComplex, SupportsFloat, SupportsIndex
from typing import SupportsComplex, SupportsFloat, SupportsIndex
from typing_extensions import TypeAlias
e: Final[float]
pi: Final[float]
inf: Final[float]
infj: Final[complex]
nan: Final[float]
nanj: Final[complex]
tau: Final[float]
e: float
pi: float
inf: float
infj: complex
nan: float
nanj: complex
tau: float
_C: TypeAlias = SupportsFloat | SupportsComplex | SupportsIndex | complex

View File

@@ -32,9 +32,9 @@ _T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_T_io = TypeVar("_T_io", bound=IO[str] | None)
_ExitT_co = TypeVar("_ExitT_co", covariant=True, bound=bool | None, default=bool | None)
_F = TypeVar("_F", bound=Callable[..., Any])
_G = TypeVar("_G", bound=Generator[Any, Any, Any] | AsyncGenerator[Any, Any], covariant=True)
_P = ParamSpec("_P")
_R = TypeVar("_R")
_SendT_contra = TypeVar("_SendT_contra", contravariant=True, default=None)
_ReturnT_co = TypeVar("_ReturnT_co", covariant=True, default=None)
@@ -64,13 +64,9 @@ class AbstractAsyncContextManager(ABC, Protocol[_T_co, _ExitT_co]): # type: ign
self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, /
) -> _ExitT_co: ...
class _WrappedCallable(Generic[_P, _R]):
__wrapped__: Callable[_P, _R]
def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R: ...
class ContextDecorator:
def _recreate_cm(self) -> Self: ...
def __call__(self, func: Callable[_P, _R]) -> _WrappedCallable[_P, _R]: ...
def __call__(self, func: _F) -> _F: ...
class _GeneratorContextManagerBase(Generic[_G]):
# Ideally this would use ParamSpec, but that requires (*args, **kwargs), which this isn't. see #6676
@@ -97,11 +93,11 @@ class _GeneratorContextManager(
def contextmanager(func: Callable[_P, Iterator[_T_co]]) -> Callable[_P, _GeneratorContextManager[_T_co]]: ...
if sys.version_info >= (3, 10):
_AR = TypeVar("_AR", bound=Awaitable[Any])
_AF = TypeVar("_AF", bound=Callable[..., Awaitable[Any]])
class AsyncContextDecorator:
def _recreate_cm(self) -> Self: ...
def __call__(self, func: Callable[_P, _AR]) -> _WrappedCallable[_P, _AR]: ...
def __call__(self, func: _AF) -> _AF: ...
class _AsyncGeneratorContextManager(
_GeneratorContextManagerBase[AsyncGenerator[_T_co, _SendT_contra]],

View File

@@ -65,7 +65,7 @@ class Underflow(Inexact, Rounded, Subnormal): ...
class FloatOperation(DecimalException, TypeError): ...
class Decimal:
def __new__(cls, value: _DecimalNew = "0", context: Context | None = None) -> Self: ...
def __new__(cls, value: _DecimalNew = ..., context: Context | None = ...) -> Self: ...
@classmethod
def from_float(cls, f: float, /) -> Self: ...
def __bool__(self) -> bool: ...
@@ -163,12 +163,12 @@ class Decimal:
def __reduce__(self) -> tuple[type[Self], tuple[str]]: ...
def __copy__(self) -> Self: ...
def __deepcopy__(self, memo: Any, /) -> Self: ...
def __format__(self, specifier: str, context: Context | None = None, /) -> str: ...
def __format__(self, specifier: str, context: Context | None = ..., /) -> str: ...
class Context:
# TODO: Context doesn't allow you to delete *any* attributes from instances of the class at runtime,
# even settable attributes like `prec` and `rounding`,
# but that's inexpressible in the stub.
# but that's inexpressable in the stub.
# Type checkers either ignore it or misinterpret it
# if you add a `def __delattr__(self, name: str, /) -> NoReturn` method to the stub
prec: int
@@ -181,14 +181,14 @@ class Context:
flags: dict[_TrapType, bool]
def __init__(
self,
prec: int | None = None,
rounding: str | None = None,
Emin: int | None = None,
Emax: int | None = None,
capitals: int | None = None,
clamp: int | None = None,
flags: dict[_TrapType, bool] | Container[_TrapType] | None = None,
traps: dict[_TrapType, bool] | Container[_TrapType] | None = None,
prec: int | None = ...,
rounding: str | None = ...,
Emin: int | None = ...,
Emax: int | None = ...,
capitals: int | None = ...,
clamp: int | None = ...,
flags: None | dict[_TrapType, bool] | Container[_TrapType] = ...,
traps: None | dict[_TrapType, bool] | Container[_TrapType] = ...,
) -> None: ...
def __reduce__(self) -> tuple[type[Self], tuple[Any, ...]]: ...
def clear_flags(self) -> None: ...

View File

@@ -16,7 +16,7 @@ if sys.version_info >= (3, 11):
Anchor: TypeAlias = Package
def package_to_anchor(
func: Callable[[Anchor | None], Traversable],
func: Callable[[Anchor | None], Traversable]
) -> Callable[[Anchor | None, Anchor | None], Traversable]: ...
@overload
def files(anchor: Anchor | None = None) -> Traversable: ...

View File

@@ -370,7 +370,7 @@ if sys.version_info >= (3, 12):
AGEN_CLOSED: Final = "AGEN_CLOSED"
def getasyncgenstate(
agen: AsyncGenerator[Any, Any],
agen: AsyncGenerator[Any, Any]
) -> Literal["AGEN_CREATED", "AGEN_RUNNING", "AGEN_SUSPENDED", "AGEN_CLOSED"]: ...
def getasyncgenlocals(agen: AsyncGeneratorType[Any, Any]) -> dict[str, Any]: ...
@@ -590,7 +590,7 @@ GEN_SUSPENDED: Final = "GEN_SUSPENDED"
GEN_CLOSED: Final = "GEN_CLOSED"
def getgeneratorstate(
generator: Generator[Any, Any, Any],
generator: Generator[Any, Any, Any]
) -> Literal["GEN_CREATED", "GEN_RUNNING", "GEN_SUSPENDED", "GEN_CLOSED"]: ...
CORO_CREATED: Final = "CORO_CREATED"
@@ -599,7 +599,7 @@ CORO_SUSPENDED: Final = "CORO_SUSPENDED"
CORO_CLOSED: Final = "CORO_CLOSED"
def getcoroutinestate(
coroutine: Coroutine[Any, Any, Any],
coroutine: Coroutine[Any, Any, Any]
) -> Literal["CORO_CREATED", "CORO_RUNNING", "CORO_SUSPENDED", "CORO_CLOSED"]: ...
def getgeneratorlocals(generator: Generator[Any, Any, Any]) -> dict[str, Any]: ...
def getcoroutinelocals(coroutine: Coroutine[Any, Any, Any]) -> dict[str, Any]: ...

View File

@@ -18,7 +18,7 @@ def ip_network(
address: _RawIPAddress | _RawNetworkPart | tuple[_RawIPAddress] | tuple[_RawIPAddress, int], strict: bool = True
) -> IPv4Network | IPv6Network: ...
def ip_interface(
address: _RawIPAddress | _RawNetworkPart | tuple[_RawIPAddress] | tuple[_RawIPAddress, int],
address: _RawIPAddress | _RawNetworkPart | tuple[_RawIPAddress] | tuple[_RawIPAddress, int]
) -> IPv4Interface | IPv6Interface: ...
class _IPAddressBase:

View File

@@ -2,11 +2,11 @@ from collections.abc import Callable, Iterator
from re import Pattern
from typing import Any, Final
ESCAPE: Final[Pattern[str]] # undocumented
ESCAPE_ASCII: Final[Pattern[str]] # undocumented
HAS_UTF8: Final[Pattern[bytes]] # undocumented
ESCAPE_DCT: Final[dict[str, str]] # undocumented
INFINITY: Final[float] # undocumented
ESCAPE: Final[Pattern[str]]
ESCAPE_ASCII: Final[Pattern[str]]
HAS_UTF8: Final[Pattern[bytes]]
ESCAPE_DCT: Final[dict[str, str]]
INFINITY: Final[float]
def py_encode_basestring(s: str) -> str: ... # undocumented
def py_encode_basestring_ascii(s: str) -> str: ... # undocumented

View File

@@ -1,7 +1,3 @@
from _json import make_scanner as make_scanner
from re import Pattern
from typing import Final
__all__ = ["make_scanner"]
NUMBER_RE: Final[Pattern[str]] # undocumented

Some files were not shown because too many files have changed in this diff Show More