Compare commits

...

1 Commits

Author SHA1 Message Date
Charlie Marsh
7fcf4c067c Remove string from comment 2024-01-06 16:19:48 -05:00
25 changed files with 34 additions and 61 deletions

View File

@@ -61,7 +61,7 @@ pub(crate) fn check_tokens(
}
}
Tok::FStringMiddle { .. } => Context::String,
Tok::Comment(_) => Context::Comment,
Tok::Comment => Context::Comment,
_ => continue,
};
ruff::rules::ambiguous_unicode_character(

View File

@@ -39,7 +39,7 @@ impl Iterator for DocLines<'_> {
let (tok, range) = self.inner.next()?;
match tok {
Tok::Comment(..) => {
Tok::Comment => {
if at_start_of_line {
break Some(range.start());
}

View File

@@ -174,7 +174,7 @@ impl<'a> Insertion<'a> {
// Once we've seen the colon, we're looking for a newline; otherwise, there's no
// block body (e.g. `if True: pass`).
Awaiting::Newline => match tok {
Tok::Comment(..) => {}
Tok::Comment => {}
Tok::Newline => {
state = Awaiting::Indent;
}
@@ -185,7 +185,7 @@ impl<'a> Insertion<'a> {
},
// Once we've seen the newline, we're looking for the indentation of the block body.
Awaiting::Indent => match tok {
Tok::Comment(..) => {}
Tok::Comment => {}
Tok::NonLogicalNewline => {}
Tok::Indent => {
// This is like:

View File

@@ -33,11 +33,9 @@ pub(crate) struct StateMachine {
impl StateMachine {
pub(crate) fn consume(&mut self, tok: &Tok) -> bool {
match tok {
Tok::NonLogicalNewline
| Tok::Newline
| Tok::Indent
| Tok::Dedent
| Tok::Comment(..) => false,
Tok::NonLogicalNewline | Tok::Newline | Tok::Indent | Tok::Dedent | Tok::Comment => {
false
}
Tok::String { .. } => {
if matches!(

View File

@@ -242,7 +242,7 @@ pub(crate) fn trailing_commas(
.flatten()
.filter_map(|spanned @ (tok, tok_range)| match tok {
// Completely ignore comments -- they just interfere with the logic.
Tok::Comment(_) => None,
Tok::Comment => None,
// F-strings are handled as `String` token type with the complete range
// of the outermost f-string. This means that the expression inside the
// f-string is not checked for trailing commas.

View File

@@ -461,7 +461,7 @@ pub(crate) fn check_string_quotes(
// range to the sequence.
sequence.push(fstring_range_builder.finish());
}
Tok::Comment(..) | Tok::NonLogicalNewline => continue,
Tok::Comment | Tok::NonLogicalNewline => continue,
_ => {
// Otherwise, consume the sequence.
if !sequence.is_empty() {

View File

@@ -26,7 +26,7 @@ pub(super) fn trailing_comma(
if count == 1 {
if matches!(
tok,
Tok::NonLogicalNewline | Tok::Indent | Tok::Dedent | Tok::Comment(_)
Tok::NonLogicalNewline | Tok::Indent | Tok::Dedent | Tok::Comment
) {
continue;
} else if matches!(tok, Tok::Comma) {

View File

@@ -239,7 +239,7 @@ pub(crate) fn compound_statements(
semi = Some((range.start(), range.end()));
allow_ellipsis = false;
}
Tok::Comment(..) | Tok::Indent | Tok::Dedent | Tok::NonLogicalNewline => {}
Tok::Comment | Tok::Indent | Tok::Dedent | Tok::NonLogicalNewline => {}
_ => {
if let Some((start, end)) = semi {
diagnostics.push(Diagnostic::new(
@@ -347,7 +347,7 @@ fn has_non_trivia_tokens_till<'a>(
}
if !matches!(
tok,
Tok::Newline | Tok::Comment(_) | Tok::EndOfFile | Tok::NonLogicalNewline
Tok::Newline | Tok::Comment | Tok::EndOfFile | Tok::NonLogicalNewline
) {
return true;
}

View File

@@ -48,7 +48,7 @@ fn match_extraneous_parentheses(tokens: &[LexResult], mut i: usize) -> Option<(u
return None;
};
match tok {
Tok::Comment(..) | Tok::NonLogicalNewline => {
Tok::Comment | Tok::NonLogicalNewline => {
i += 1;
}
Tok::Lpar => {
@@ -88,12 +88,7 @@ fn match_extraneous_parentheses(tokens: &[LexResult], mut i: usize) -> Option<(u
let end = i;
// Verify that we're not in an empty tuple.
if (start + 1..i).all(|i| {
matches!(
tokens[i],
Ok((Tok::Comment(..) | Tok::NonLogicalNewline, _))
)
}) {
if (start + 1..i).all(|i| matches!(tokens[i], Ok((Tok::Comment | Tok::NonLogicalNewline, _)))) {
return None;
}
@@ -107,7 +102,7 @@ fn match_extraneous_parentheses(tokens: &[LexResult], mut i: usize) -> Option<(u
return None;
};
match tok {
Tok::Comment(..) | Tok::NonLogicalNewline => {
Tok::Comment | Tok::NonLogicalNewline => {
i += 1;
}
_ => {

View File

@@ -426,7 +426,7 @@ pub(crate) fn f_strings(
if !lexer::lex_starts_at(rest, Mode::Expression, prev_end)
.flatten()
.all(|(token, _)| match token {
Tok::Comment(_) | Tok::Newline | Tok::NonLogicalNewline | Tok::Indent | Tok::Dedent => {
Tok::Comment | Tok::Newline | Tok::NonLogicalNewline | Tok::Indent | Tok::Dedent => {
true
}
Tok::String { value, .. } => value.is_empty(),

View File

@@ -411,7 +411,7 @@ impl<'source> Lexer<'source> {
let offset = memchr::memchr2(b'\n', b'\r', bytes).unwrap_or(bytes.len());
self.cursor.skip_bytes(offset);
Tok::Comment(self.token_text().to_string())
Tok::Comment
}
/// Lex a single IPython escape command.

View File

@@ -176,7 +176,7 @@ pub fn locate_cmp_ops(expr: &Expr, source: &str) -> Vec<LocatedCmpOp> {
.flatten()
.skip(1)
.map(|(tok, range)| (tok, range - TextSize::from(1)))
.filter(|(tok, _)| !matches!(tok, Tok::NonLogicalNewline | Tok::Comment(_)))
.filter(|(tok, _)| !matches!(tok, Tok::NonLogicalNewline | Tok::Comment))
.peekable();
let mut ops: Vec<LocatedCmpOp> = vec![];

View File

@@ -2076,6 +2076,6 @@ extern {
},
"\n" => token::Tok::Newline,
";" => token::Tok::Semi,
// "#" => token::Tok::Comment(_),
// "#" => token::Tok::Comment,
}
}

View File

@@ -1,5 +1,5 @@
// auto-generated: "lalrpop 0.20.0"
// sha3: 031689e389556292d9dbd8a1b1ff8ca29bac76d83f1b345630481d620b89e1c2
// sha3: 64183bfe2809b4e883c796c3b5125541e4be6bf9022efb1374d4bf2b842236a7
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use ruff_python_ast::{self as ast, Int, IpyEscapeKind};
use crate::{

View File

@@ -10,9 +10,7 @@ expression: comment_until_eol(MAC_EOL)
0..3,
),
(
Comment(
"# Foo",
),
Comment,
5..10,
),
(

View File

@@ -10,9 +10,7 @@ expression: comment_until_eol(UNIX_EOL)
0..3,
),
(
Comment(
"# Foo",
),
Comment,
5..10,
),
(

View File

@@ -10,9 +10,7 @@ expression: comment_until_eol(WINDOWS_EOL)
0..3,
),
(
Comment(
"# Foo",
),
Comment,
5..10,
),
(

View File

@@ -19,9 +19,7 @@ expression: lex_source(source)
21..22,
),
(
Comment(
"# comment {",
),
Comment,
23..34,
),
(

View File

@@ -10,9 +10,7 @@ expression: lex_source(&source)
0..5,
),
(
Comment(
"#",
),
Comment,
7..8,
),
(

View File

@@ -10,9 +10,7 @@ expression: lex_source(&source)
0..5,
),
(
Comment(
"# foo",
),
Comment,
7..12,
),
(

View File

@@ -10,9 +10,7 @@ expression: lex_source(&source)
0..5,
),
(
Comment(
"# ",
),
Comment,
7..9,
),
(

View File

@@ -10,9 +10,7 @@ expression: lex_source(&source)
0..5,
),
(
Comment(
"# ",
),
Comment,
7..10,
),
(

View File

@@ -4,9 +4,7 @@ expression: lex_source(source)
---
[
(
Comment(
"#Hello",
),
Comment,
0..6,
),
(
@@ -14,9 +12,7 @@ expression: lex_source(source)
6..7,
),
(
Comment(
"#World",
),
Comment,
7..13,
),
(

View File

@@ -66,7 +66,7 @@ pub enum Tok {
kind: IpyEscapeKind,
},
/// Token value for a comment. These are filtered out of the token stream prior to parsing.
Comment(String),
Comment,
/// Token value for a newline.
Newline,
/// Token value for a newline that is not a logical line break. These are filtered out of
@@ -268,7 +268,7 @@ impl fmt::Display for Tok {
Rsqb => f.write_str("']'"),
Colon => f.write_str("':'"),
Comma => f.write_str("','"),
Comment(value) => f.write_str(value),
Comment => f.write_str("Comment"),
Semi => f.write_str("';'"),
Plus => f.write_str("'+'"),
Minus => f.write_str("'-'"),
@@ -806,7 +806,7 @@ impl TokenKind {
Tok::FStringMiddle { .. } => TokenKind::FStringMiddle,
Tok::FStringEnd => TokenKind::FStringEnd,
Tok::IpyEscapeCommand { .. } => TokenKind::EscapeCommand,
Tok::Comment(_) => TokenKind::Comment,
Tok::Comment => TokenKind::Comment,
Tok::Newline => TokenKind::Newline,
Tok::NonLogicalNewline => TokenKind::NonLogicalNewline,
Tok::Indent => TokenKind::Indent,

View File

@@ -1019,7 +1019,7 @@ mod tests {
let comment_ranges: Vec<_> = lex(self.source, Mode::Module)
.filter_map(|result| {
let (token, range) = result.expect("Input to be a valid python program.");
if matches!(token, Tok::Comment(_)) {
if matches!(token, Tok::Comment) {
Some(range)
} else {
None