Skip to content

Commit

Permalink
cargo fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
vcfxb committed Feb 26, 2024
1 parent ca1dfc2 commit 99f19c8
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 22 deletions.
40 changes: 20 additions & 20 deletions wright/src/parser/lexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ use std::{iter::Peekable, ptr};
use token::{Token, TokenTy};
use unicode_ident::{is_xid_continue, is_xid_start};

pub mod comments;
pub mod token;
pub mod trivial;
pub mod comments;

/// The lexical analyser for wright. This produces a series of tokens that make up the larger elements of the language.
#[derive(Debug, Clone, Copy)]
Expand Down Expand Up @@ -112,13 +112,13 @@ impl<'src> Lexer<'src> {
*self
}

/// Get the number of bytes between the origin's [remaining](Lexer::remaining) and
/// this [Lexer]'s [remaining](Lexer::remaining) using [`Fragment::offset_from`].
///
/// Get the number of bytes between the origin's [remaining](Lexer::remaining) and
/// this [Lexer]'s [remaining](Lexer::remaining) using [`Fragment::offset_from`].
///
/// # Panics
/// - This function panics under the same conditions as [`Fragment::offset_from`].
/// - Generally the best way to avoid panics is to only call this function on
/// [Lexer]s created using [Lexer::fork] on the `origin` lexer.
/// - Generally the best way to avoid panics is to only call this function on
/// [Lexer]s created using [Lexer::fork] on the `origin` lexer.
#[inline]
fn offset_from(&self, origin: &Self) -> usize {
self.remaining.offset_from(&origin.remaining)
Expand Down Expand Up @@ -171,23 +171,23 @@ impl<'src> Lexer<'src> {
}
}

/// Advance this lexer by the specified number of bytes.
///
/// Advance this lexer by the specified number of bytes.
///
/// # Panics
/// - If the lexer is not on a unicode character boundary after advancing.
/// - If the number of bytes is greater than the length of the [remaining](Lexer::remaining) fragment.
/// - If the lexer is not on a unicode character boundary after advancing.
/// - If the number of bytes is greater than the length of the [remaining](Lexer::remaining) fragment.
fn advance(&mut self, bytes: usize) {
self.remaining.inner = &self.remaining.inner[bytes..];
}

/// Unsafe version of [Lexer::advance].
/// Unsafe version of [Lexer::advance].
/// Advances this lexer by the specified number of bytes.
///
///
/// # Safety
/// - This lexer will be left in an invalid/undefined state if the number of bytes is greater than the length
/// of the [Lexer::remaining] fragment.
/// - This lexer will be left in an invalid/undefined state if after advancing, the next byte in the
/// [Lexer::remaining] fragment is not the start of a unicode code point.
/// - This lexer will be left in an invalid/undefined state if after advancing, the next byte in the
/// [Lexer::remaining] fragment is not the start of a unicode code point.
unsafe fn advance_unchecked(&mut self, bytes: usize) {
self.remaining.inner = self.remaining.inner.get_unchecked(bytes..);
}
Expand All @@ -202,27 +202,27 @@ impl<'src> Lexer<'src> {
return None;
}

// Attempt to parse a single line comment and then attempt a multi-line comment.
// Attempt to parse a single line comment and then attempt a multi-line comment.
for comment_match_fn in [try_match_single_line_comment, try_match_block_comment] {
// Attempt to parse a comment using the given match function. Return it if it's documentation or unterminated.
// Get a new token and return that if there was a comment and it was ignored successfully.
match (comment_match_fn)(self) {
// A comment was parsed, consume and return it.
// A comment was parsed, consume and return it.
(bytes, Some(comment_variant)) => {
// Split the token.
let token: Token = self.split_token(bytes, comment_variant);
// Return it.
return Some(token);
},
}

// There was a comment, advance the lexer and ignore it. Re-start this function.
// There was a comment, advance the lexer and ignore it. Re-start this function.
(bytes @ 1.., None) => {
self.advance(bytes);
return self.next_token();
}

// There was no comment, keep trying to match other tokens.
(0, None) => {},
// There was no comment, keep trying to match other tokens.
(0, None) => {}
}
}

Expand Down
4 changes: 2 additions & 2 deletions wright/src/parser/lexer/comments.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pub const MULTI_LINE_COMMENT_END: &str = "*/";
///
/// If the [TokenTy] is not [None], the lexer should consume the specified number of bytes (by the [usize]) and
/// Produce a token with the [variant](super::token::Token::variant) from this function.
///
///
/// Generally I'm trying to follow the [rust comment spec] here.
///
/// [rust comment spec]: https://doc.rust-lang.org/reference/comments.html
Expand Down Expand Up @@ -93,7 +93,7 @@ pub fn try_match_block_comment(lexer: &Lexer) -> (usize, Option<TokenTy>) {
let (nested_comment_bytes, _) = try_match_block_comment(&fork);

// SAFETY: the return from this function should never be on a char boundary or out of bounds.
// This is because the return value is always either 0 or calculated using `offset_from`.
// This is because the return value is always either 0 or calculated using `offset_from`.
unsafe { fork.advance_unchecked(nested_comment_bytes) };

// Restart the loop to keep consuming this comment.
Expand Down

0 comments on commit 99f19c8

Please sign in to comment.