@@ -2739,8 +2739,11 @@ impl<'a> Parser<'a> {
2739
2739
return infix;
2740
2740
}
2741
2741
2742
- let mut tok = self.next_token();
2743
- let regular_binary_operator = match &mut tok.token {
2742
+ let dialect = self.dialect;
2743
+
2744
+ let (tok, tok_index) = self.next_token_ref_with_index();
2745
+ let span = tok.span;
2746
+ let regular_binary_operator = match &tok.token {
2744
2747
Token::Spaceship => Some(BinaryOperator::Spaceship),
2745
2748
Token::DoubleEq => Some(BinaryOperator::Eq),
2746
2749
Token::Eq => Some(BinaryOperator::Eq),
@@ -2758,30 +2761,30 @@ impl<'a> Parser<'a> {
2758
2761
Token::Caret => {
2759
2762
// In PostgreSQL, ^ stands for the exponentiation operation,
2760
2763
// and # stands for XOR. See https://www.postgresql.org/docs/current/functions-math.html
2761
- if dialect_of!(self is PostgreSqlDialect) {
2764
+ if dialect_is!(dialect is PostgreSqlDialect) {
2762
2765
Some(BinaryOperator::PGExp)
2763
2766
} else {
2764
2767
Some(BinaryOperator::BitwiseXor)
2765
2768
}
2766
2769
}
2767
2770
Token::Ampersand => Some(BinaryOperator::BitwiseAnd),
2768
2771
Token::Div => Some(BinaryOperator::Divide),
2769
- Token::DuckIntDiv if dialect_of!(self is DuckDbDialect | GenericDialect) => {
2772
+ Token::DuckIntDiv if dialect_is!(dialect is DuckDbDialect | GenericDialect) => {
2770
2773
Some(BinaryOperator::DuckIntegerDivide)
2771
2774
}
2772
- Token::ShiftLeft if dialect_of!(self is PostgreSqlDialect | DuckDbDialect | GenericDialect) => {
2775
+ Token::ShiftLeft if dialect_is!(dialect is PostgreSqlDialect | DuckDbDialect | GenericDialect) => {
2773
2776
Some(BinaryOperator::PGBitwiseShiftLeft)
2774
2777
}
2775
- Token::ShiftRight if dialect_of!(self is PostgreSqlDialect | DuckDbDialect | GenericDialect) => {
2778
+ Token::ShiftRight if dialect_is!(dialect is PostgreSqlDialect | DuckDbDialect | GenericDialect) => {
2776
2779
Some(BinaryOperator::PGBitwiseShiftRight)
2777
2780
}
2778
- Token::Sharp if dialect_of!(self is PostgreSqlDialect) => {
2781
+ Token::Sharp if dialect_is!(dialect is PostgreSqlDialect) => {
2779
2782
Some(BinaryOperator::PGBitwiseXor)
2780
2783
}
2781
- Token::Overlap if dialect_of!(self is PostgreSqlDialect | GenericDialect) => {
2784
+ Token::Overlap if dialect_is!(dialect is PostgreSqlDialect | GenericDialect) => {
2782
2785
Some(BinaryOperator::PGOverlap)
2783
2786
}
2784
- Token::CaretAt if dialect_of!(self is PostgreSqlDialect | GenericDialect) => {
2787
+ Token::CaretAt if dialect_is!(dialect is PostgreSqlDialect | GenericDialect) => {
2785
2788
Some(BinaryOperator::PGStartsWith)
2786
2789
}
2787
2790
Token::Tilde => Some(BinaryOperator::PGRegexMatch),
@@ -2804,21 +2807,21 @@ impl<'a> Parser<'a> {
2804
2807
Token::Question => Some(BinaryOperator::Question),
2805
2808
Token::QuestionAnd => Some(BinaryOperator::QuestionAnd),
2806
2809
Token::QuestionPipe => Some(BinaryOperator::QuestionPipe),
2807
- Token::CustomBinaryOperator(s) => Some(BinaryOperator::Custom(core::mem::take(s ))),
2810
+ Token::CustomBinaryOperator(s) => Some(BinaryOperator::Custom(s.clone( ))),
2808
2811
2809
2812
Token::Word(w) => match w.keyword {
2810
2813
Keyword::AND => Some(BinaryOperator::And),
2811
2814
Keyword::OR => Some(BinaryOperator::Or),
2812
2815
Keyword::XOR => Some(BinaryOperator::Xor),
2813
- Keyword::OPERATOR if dialect_of!(self is PostgreSqlDialect | GenericDialect) => {
2816
+ Keyword::OPERATOR if dialect_is!(dialect is PostgreSqlDialect | GenericDialect) => {
2814
2817
self.expect_token(&Token::LParen)?;
2815
2818
// there are special rules for operator names in
2816
2819
// postgres so we can not use 'parse_object'
2817
2820
// or similar.
2818
2821
// See https://www.postgresql.org/docs/current/sql-createoperator.html
2819
2822
let mut idents = vec![];
2820
2823
loop {
2821
- idents.push(self.next_token ().to_string());
2824
+ idents.push(self.next_token_ref ().to_string());
2822
2825
if !self.consume_token(&Token::Period) {
2823
2826
break;
2824
2827
}
@@ -2831,6 +2834,7 @@ impl<'a> Parser<'a> {
2831
2834
_ => None,
2832
2835
};
2833
2836
2837
+ let tok = self.token_at(tok_index);
2834
2838
if let Some(op) = regular_binary_operator {
2835
2839
if let Some(keyword) =
2836
2840
self.parse_one_of_keywords(&[Keyword::ANY, Keyword::ALL, Keyword::SOME])
@@ -2861,7 +2865,7 @@ impl<'a> Parser<'a> {
2861
2865
format!(
2862
2866
"Expected one of [=, >, <, =>, =<, !=] as comparison operator, found: {op}"
2863
2867
),
2864
- tok. span.start
2868
+ span.start
2865
2869
);
2866
2870
};
2867
2871
@@ -2990,19 +2994,19 @@ impl<'a> Parser<'a> {
2990
2994
tok.span.start
2991
2995
),
2992
2996
}
2993
- } else if Token::DoubleColon == tok {
2997
+ } else if Token::DoubleColon == * tok {
2994
2998
Ok(Expr::Cast {
2995
2999
kind: CastKind::DoubleColon,
2996
3000
expr: Box::new(expr),
2997
3001
data_type: self.parse_data_type()?,
2998
3002
format: None,
2999
3003
})
3000
- } else if Token::ExclamationMark == tok && self.dialect.supports_factorial_operator() {
3004
+ } else if Token::ExclamationMark == * tok && self.dialect.supports_factorial_operator() {
3001
3005
Ok(Expr::UnaryOp {
3002
3006
op: UnaryOperator::PGPostfixFactorial,
3003
3007
expr: Box::new(expr),
3004
3008
})
3005
- } else if Token::LBracket == tok {
3009
+ } else if Token::LBracket == * tok {
3006
3010
if dialect_of!(self is PostgreSqlDialect | DuckDbDialect | GenericDialect) {
3007
3011
self.parse_subscript(expr)
3008
3012
} else if dialect_of!(self is SnowflakeDialect) || self.dialect.supports_partiql() {
@@ -3011,7 +3015,7 @@ impl<'a> Parser<'a> {
3011
3015
} else {
3012
3016
self.parse_map_access(expr)
3013
3017
}
3014
- } else if dialect_of!(self is SnowflakeDialect | GenericDialect) && Token::Colon == tok {
3018
+ } else if dialect_of!(self is SnowflakeDialect | GenericDialect) && Token::Colon == * tok {
3015
3019
self.prev_token();
3016
3020
self.parse_json_access(expr)
3017
3021
} else {
@@ -3282,6 +3286,12 @@ impl<'a> Parser<'a> {
3282
3286
self.dialect.get_next_precedence_default(self)
3283
3287
}
3284
3288
3289
+ /// Return the token at the given location, or EOF if the index is beyond
3290
+ /// the length of the current set of tokens.
3291
+ pub fn token_at(&self, index: usize) -> &TokenWithSpan {
3292
+ self.tokens.get(index).unwrap_or(&EOF_TOKEN)
3293
+ }
3294
+
3285
3295
/// Return the first non-whitespace token that has not yet been processed
3286
3296
/// or Token::EOF
3287
3297
pub fn peek_token(&self) -> TokenWithSpan {
@@ -3398,18 +3408,22 @@ impl<'a> Parser<'a> {
3398
3408
self.next_token_ref().clone()
3399
3409
}
3400
3410
3411
+ pub fn next_token_ref(&mut self) -> &TokenWithSpan {
3412
+ self.next_token_ref_with_index().0
3413
+ }
3414
+
3401
3415
/// Return the first non-whitespace token that has not yet been processed
3402
3416
/// (or None if reached end-of-file) and mark it as processed. OK to call
3403
3417
/// repeatedly after reaching EOF.
3404
- pub fn next_token_ref (&mut self) -> &TokenWithSpan {
3418
+ pub fn next_token_ref_with_index (&mut self) -> ( &TokenWithSpan, usize) {
3405
3419
loop {
3406
3420
self.index += 1;
3407
3421
match self.tokens.get(self.index - 1) {
3408
3422
Some(TokenWithSpan {
3409
3423
token: Token::Whitespace(_),
3410
3424
span: _,
3411
3425
}) => continue,
3412
- token => return token.unwrap_or(&EOF_TOKEN),
3426
+ token => return ( token.unwrap_or(&EOF_TOKEN), self.index ),
3413
3427
}
3414
3428
}
3415
3429
}
0 commit comments