Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 48 additions & 1 deletion compiler/rustc_ast/src/tokenstream.rs
Original file line number Diff line number Diff line change
Expand Up @@ -978,10 +978,56 @@ pub struct DelimSpan {

impl DelimSpan {
pub fn from_single(sp: Span) -> Self {
DelimSpan { open: sp, close: sp }
let default_open = sp.shrink_to_lo();
let default_close = sp.shrink_to_hi();

let Some(sm) = rustc_span::source_map::get_source_map() else {
// No source map available.
return Self { open: default_open, close: default_close };
};

let (open, close) = sm
.span_to_source(sp, |src, start, end| {
let src = match src.get(start..end) {
Some(s) if !s.is_empty() => s.as_bytes(),
_ => return Ok((default_open, default_close)),
};

// Only check the first and last characters.
// If there is white space or other characters
// other than `( ... )`, `[ ... ]`, and `{ ... }`.
// I assume that is intentionally included in this
// span so we don't want to shrink the span by
// searching for the delimiters, and setting
// the open and close spans to some more interior
// position.
let first = src[0];
let last = src[src.len() - 1];

// Thought maybe scan through if first is '(', '[', or '{'
// and see if the last matches up e.g. make sure it's not some
// extra mismatched delimiter.

let open = sp.subspan(0..1).unwrap_or(default_open);
let pos = (sp.hi() - sp.lo()).0.checked_sub(1).unwrap_or(0);
let close = sp.subspan(pos..).unwrap_or(default_close);

Ok(match (first, last) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure why the logic above is so overcomplicated.
let first = src[0] and let last = src[src.len() - 1].
open is sp.with_lo(sp.lo() + BytePos(1)) if the source map check passes, close is sp.with_hi(sp.hi() - BytePos(1)) .

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let first = chars.next().unwrap_or('_');
let last = chars.last().unwrap_or('_');

Won't panic even if the length is some reason Zero. Also it won't panic if the first and last glyphs are not an ASCII code points still, although I could use src.as_bytes() since it is unlikely that a non-ASCII Delimiter would be added to the language. So it would probably be fair to remove needing to use the len_utf8() and just work on the bytes directly instead of a &str

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Everything we are checking for is ASCII, we can work with byte positions in src.
The empty string case needs to be excluded from the start, yeah.

(b'(', b')') | (b'{', b'}') | (b'[', b']') => (open, close),
(b'(', _) | (b'{', _) | (b'[', _) => (open, default_close),
(_, b')') | (_, b'}') | (_, b']') => (default_open, close),
(_, _) => (default_open, default_close),
})
})
.ok()
.unwrap_or((default_open, default_close));

debug_assert!(open.lo() <= close.lo());
DelimSpan { open, close }
}

pub fn from_pair(open: Span, close: Span) -> Self {
debug_assert!(open.lo() <= close.lo());
DelimSpan { open, close }
}

Expand All @@ -990,6 +1036,7 @@ impl DelimSpan {
}

pub fn entire(self) -> Span {
debug_assert!(self.open.lo() <= self.close.lo());
self.open.with_hi(self.close.hi())
}
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_attr_parsing/src/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ impl<'a, 'sess> MetaItemListParserContext<'a, 'sess> {

fn expected_lit(&mut self) -> Diag<'sess> {
let mut err = InvalidMetaItem {
span: self.parser.token.span,
span: self.parser.token_diag_span(),
descr: token_descr(&self.parser.token),
quote_ident_sugg: None,
remove_neg_sugg: None,
Expand Down
29 changes: 24 additions & 5 deletions compiler/rustc_expand/src/mbe/macro_parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ use std::rc::Rc;

pub(crate) use NamedMatch::*;
pub(crate) use ParseResult::*;
use rustc_ast::token::{self, DocComment, NonterminalKind, Token, TokenKind};
use rustc_ast::token::{self, Delimiter, DocComment, NonterminalKind, Token, TokenKind};
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::ErrorGuaranteed;
use rustc_lint_defs::pluralize;
Expand Down Expand Up @@ -177,8 +177,25 @@ pub(super) fn compute_locs(matcher: &[TokenTree]) -> Vec<MatcherLoc> {
locs.push(MatcherLoc::Token { token: *token });
}
TokenTree::Delimited(span, _, delimited) => {
let open_token = Token::new(delimited.delim.as_open_token_kind(), span.open);
let close_token = Token::new(delimited.delim.as_close_token_kind(), span.close);
// Is there a better way for MatcherLoc to store these??
// I see we push a `MatcherLoc::Delimited` would it be
// better to to store the entire span for all delimiters
// types in that instead just leaving it as an empty enum
// variant?
// The diagnostics currently use `MatcherLoc::Span()`
// however for invisible delimiters they seem to expect
// that is the whole span. It would make more sense to me
// if they got that data from `MatcherLoc::Delimited` that
// also means any diagnostics for non-invisible delims could
// get that entire span information if needed as well.
// However for now I am just going to do this to make
// existing diagnostics happy.
let (open, close) = match delimited.delim {
Delimiter::Invisible(_) => (span.entire(), span.entire()),
_ => (span.open, span.close),
};
let open_token = Token::new(delimited.delim.as_open_token_kind(), open);
let close_token = Token::new(delimited.delim.as_close_token_kind(), close);

locs.push(MatcherLoc::Delimited);
locs.push(MatcherLoc::Token { token: open_token });
Expand Down Expand Up @@ -479,7 +496,7 @@ impl TtParser {
MatcherLoc::Token { token: t } => {
// If it's a doc comment, we just ignore it and move on to the next tt in the
// matcher. This is a bug, but #95267 showed that existing programs rely on
// this behaviour, and changing it would require some care and a transition
// this behavior, and changing it would require some care and a transition
// period.
//
// If the token matches, we can just advance the parser.
Expand Down Expand Up @@ -649,10 +666,12 @@ impl TtParser {
// Error messages here could be improved with links to original rules.
match (self.next_mps.len(), self.bb_mps.len()) {
(0, 0) => {
let mut tok = parser.token;
tok.span = parser.token_diag_span();
// There are no possible next positions AND we aren't waiting for the black-box
// parser: syntax error.
return Failure(T::build_failure(
parser.token,
tok,
parser.approx_token_stream_pos(),
"no rules expected this token in macro call",
));
Expand Down
5 changes: 3 additions & 2 deletions compiler/rustc_parse/src/parser/diagnostics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,7 @@ impl<'a> Parser<'a> {
// we take this here so that the correct original token is retained in
// the diagnostic, regardless of eager recovery.
let bad_token = self.token;
let bad_span = self.token_diag_span();

// suggest prepending a keyword in identifier position with `r#`
let suggest_raw = if let Some((ident, IdentIsRaw::No)) = self.token.ident()
Expand All @@ -358,7 +359,7 @@ impl<'a> Parser<'a> {
recovered_ident = self.ident_or_err(false).ok();
};

Some(SuggRemoveComma { span: bad_token.span })
Some(SuggRemoveComma { span: bad_span })
} else {
None
};
Expand All @@ -372,7 +373,7 @@ impl<'a> Parser<'a> {
});

let err = ExpectedIdentifier {
span: bad_token.span,
span: bad_span,
token: bad_token,
suggest_raw,
suggest_remove_comma,
Expand Down
16 changes: 7 additions & 9 deletions compiler/rustc_parse/src/parser/expr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1414,7 +1414,7 @@ impl<'a> Parser<'a> {
fn parse_expr_bottom(&mut self) -> PResult<'a, Box<Expr>> {
maybe_recover_from_interpolated_ty_qpath!(self, true);

let span = self.token.span;
let span = self.token_diag_span();
if let Some(expr) = self.eat_metavar_seq_with_matcher(
|mv_kind| matches!(mv_kind, MetaVarKind::Expr { .. }),
|this| {
Expand Down Expand Up @@ -1910,9 +1910,9 @@ impl<'a> Parser<'a> {
{
self.psess.buffer_lint(
BREAK_WITH_LABEL_AND_LOOP,
lo.to(expr.span),
lo.until(self.token.span),
ast::CRATE_NODE_ID,
BuiltinLintDiag::BreakWithLabelAndLoop(expr.span),
BuiltinLintDiag::BreakWithLabelAndLoop(expr.span.until(self.token.span)),
);
}

Expand Down Expand Up @@ -2357,13 +2357,11 @@ impl<'a> Parser<'a> {
}

if self.token.is_metavar_block() {
let sp = self.token_diag_span();
self.dcx().emit_err(errors::InvalidBlockMacroSegment {
span: self.token.span,
context: lo.to(self.token.span),
wrap: errors::WrapInExplicitBlock {
lo: self.token.span.shrink_to_lo(),
hi: self.token.span.shrink_to_hi(),
},
span: sp,
context: lo.to(sp),
wrap: errors::WrapInExplicitBlock { lo: sp.shrink_to_lo(), hi: sp.shrink_to_hi() },
});
}

Expand Down
42 changes: 40 additions & 2 deletions compiler/rustc_parse/src/parser/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,12 +155,23 @@ macro_rules! maybe_recover_from_interpolated_ty_qpath {
&& let token::MetaVarKind::Ty { .. } = mv_kind
&& $self.check_noexpect_past_close_delim(&token::PathSep)
{
// We want the span before it eat gets eaten.
// For example the current token has an OpenInvisible
// After being eaten the parsers previous token will
// then become a CloseInvisible which only has the
// close span and the current token will just be a
// a path separator.

// Probably only need `$self.token.span` here because the Span::until(...)
let sp = $self.parent_tree_span().unwrap_or($self.token.span);

// Reparse the type, then move to recovery.
let ty = $self
.eat_metavar_seq(mv_kind, |this| this.parse_ty_no_question_mark_recover())
.expect("metavar seq ty");

return $self.maybe_recover_from_bad_qpath_stage_2($self.prev_token.span, ty);
let span = sp.until($self.token.span);
return $self.maybe_recover_from_bad_qpath_stage_2(span, ty);
}
};
}
Expand Down Expand Up @@ -908,7 +919,7 @@ impl<'a> Parser<'a> {
);
expect_err
.with_span_suggestion_verbose(
self.prev_token.span.shrink_to_hi().until(self.token.span),
sp.until(self.token.span),
msg,
" @ ",
Applicability::MaybeIncorrect,
Expand Down Expand Up @@ -1665,6 +1676,33 @@ impl<'a> Parser<'a> {
_ => self.prev_token.span,
}
}

/// Returns the full `Span` of the parent enclosing delimited construct, if any.
///
/// The span covers both the opening and closing delimiters as well as everything
/// in between.
///
/// This is primarily used fo improve diagnostics that need the outer syntactic context.
/// (e.g full span of `∅ ... ∅`, `( ... )`, `{ ... }`, or `[ ... ]`).
///
/// Returns `None` if the parser is not currently inside a delimited group.
pub fn parent_tree_span(&self) -> Option<Span> {
//println!("{:#?}", self.token_cursor.stack.last()?);
match self.token_cursor.stack.last()?.curr()? {
TokenTree::Delimited(dspan, _, _, _) => return Some(dspan.entire()),
// I don't think this case is possible
_ => None,
}
}

/// Get the full span of the current token for diagnostic purposes.
pub fn token_diag_span(&self) -> Span {
let sp = self.token.span;
if self.token.is_metavar_seq().is_some() {
return self.parent_tree_span().unwrap_or(sp);
}
sp
}
}

// Metavar captures of various kinds.
Expand Down
33 changes: 32 additions & 1 deletion compiler/rustc_span/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ use std::cmp::{self, Ordering};
use std::fmt::Display;
use std::hash::Hash;
use std::io::{self, Read};
use std::ops::{Add, Range, Sub};
use std::ops::{Add, Bound, Range, RangeBounds, Sub};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::Arc;
Expand Down Expand Up @@ -1176,6 +1176,37 @@ impl Span {
pub fn normalize_to_macro_rules(self) -> Span {
self.map_ctxt(|ctxt| ctxt.normalize_to_macro_rules())
}

/// This function is similar to `Span::from_inner`, but it
/// will return `None` if the relative Range span exceeds
/// the bounds of span.
pub fn subspan<I: Copy, R: RangeBounds<I>>(self, subspan: R) -> Option<Span>
where
u32: TryFrom<I>,
{
let lo = self.lo().0;
let hi = self.hi().0;

let start = match subspan.start_bound() {
Bound::Included(s) => u32::try_from(*s).ok()?,
Bound::Excluded(s) => u32::try_from(*s).ok()?.checked_add(1)?,
Bound::Unbounded => 0,
};

let end = match subspan.end_bound() {
Bound::Included(e) => u32::try_from(*e).ok()?.checked_add(1)?,
Bound::Excluded(e) => u32::try_from(*e).ok()?,
Bound::Unbounded => hi - lo,
};

let new_lo = lo.checked_add(start)?;
let new_hi = lo.checked_add(end)?;
if new_lo > hi || new_hi > hi {
return None;
}

Some(self.with_lo(BytePos(new_lo)).with_hi(BytePos(new_hi)))
}
}

impl Default for Span {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,13 @@ LL | x.unwrap();
| ^^^^^^^^^^

error: called `unwrap` on `x` after checking its variant with `is_some`
--> tests/ui/checked_unwrap/simple_conditionals.rs:13:13
--> tests/ui/checked_unwrap/simple_conditionals.rs:13:15
|
LL | if $a.is_some() {
| --------------- help: try: `if let Some(<item>) = x`
LL | // unnecessary
LL | $a.unwrap();
| ^^^^^^^^^^^
| ^^^^^^^^^
...
LL | m!(x);
| ----- in this macro invocation
Expand Down
10 changes: 9 additions & 1 deletion src/tools/clippy/tests/ui/cognitive_complexity.stderr
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,14 @@ LL | fn kaboom() {
|
= help: you could split it up into multiple smaller functions

error: the function has a cognitive complexity of (2/1)
--> tests/ui/cognitive_complexity.rs:121:4
|
LL | fn bloo() {
| ^^^^
|
= help: you could split it up into multiple smaller functions

error: the function has a cognitive complexity of (2/1)
--> tests/ui/cognitive_complexity.rs:158:4
|
Expand Down Expand Up @@ -176,5 +184,5 @@ LL | fn bar() {
|
= help: you could split it up into multiple smaller functions

error: aborting due to 22 previous errors
error: aborting due to 23 previous errors

2 changes: 1 addition & 1 deletion src/tools/clippy/tests/ui/string_lit_as_bytes.fixed
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ extern crate macro_rules;

macro_rules! b {
($b:literal) => {
const B: &[u8] = b"warning";
const B: &[u8] = $bb"warning";
//~^ string_lit_as_bytes
};
}
Expand Down
4 changes: 2 additions & 2 deletions src/tools/clippy/tests/ui/string_lit_as_bytes.stderr
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ LL | let bs = "lit to owned".to_owned().into_bytes();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: consider using a byte string literal instead: `b"lit to owned".to_vec()`

error: calling `as_bytes()` on a string literal
--> tests/ui/string_lit_as_bytes.rs:11:26
--> tests/ui/string_lit_as_bytes.rs:11:28
|
LL | const B: &[u8] = $b.as_bytes();
| ^^^^^^^^^^^^^ help: consider using a byte string literal instead: `b"warning"`
| ^^^^^^^^^^^ help: consider using a byte string literal instead: `b"warning"`
...
LL | b!("warning");
| ------------- in this macro invocation
Expand Down
4 changes: 2 additions & 2 deletions tests/ui/borrowck/issue-25793.stderr
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
error[E0503]: cannot use `self.width` because it was mutably borrowed
--> $DIR/issue-25793.rs:4:9
--> $DIR/issue-25793.rs:4:14
|
LL | $this.width.unwrap()
| ^^^^^^^^^^^ use of borrowed `*self`
| ^^^^^^ use of borrowed `*self`
...
LL | let r = &mut *self;
| ---------- `*self` is borrowed here
Expand Down
2 changes: 1 addition & 1 deletion tests/ui/issues/issue-26237.stderr
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ error[E0618]: expected function, found `{integer}`
--> $DIR/issue-26237.rs:10:18
|
LL | $not_a_function($some_argument)
| ------------------------------- call expression requires function
| ---------------- call expression requires function
...
LL | let mut value_a = 0;
| ----------- `value_a` has type `{integer}`
Expand Down
Loading
Loading