aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMartin Fischer <martin@push-f.com>2023-08-29 13:09:44 +0200
committerMartin Fischer <martin@push-f.com>2023-09-28 10:36:01 +0200
commit826907487e2b593f1c54e98b59fe2f6eb8cb6937 (patch)
treede48a91090a240033a6f02eb8e984da133b71025 /tests
parent2b4c52758c503b08d3299ad2d1ee369ad5f597f1 (diff)
break!: remove Token::Error
An error isn't a token (in general and also according to the spec). You shouldn't have to filter out errors when you're just interested in tokens but most importantly having errors in the Token enum is annoying when implementing tree construction (since the spec conditions exhaustively cover all Token variants except Token::Error).
Diffstat (limited to 'tests')
-rw-r--r--tests/test_spans.rs46
1 files changed, 12 insertions, 34 deletions
diff --git a/tests/test_spans.rs b/tests/test_spans.rs
index f2cdc5f..64cc250 100644
--- a/tests/test_spans.rs
+++ b/tests/test_spans.rs
@@ -221,12 +221,7 @@ fn comment_data_span() {
let mut annotated = String::new();
for case in cases {
let labeler = |parser: Parser| {
- let Token::Comment(comment) = parser
- .flatten()
- .filter(|t| !matches!(t, Token::Error { .. }))
- .next()
- .unwrap()
- else {
+ let Token::Comment(comment) = parser.flatten().next().unwrap() else {
panic!("expected comment");
};
vec![(comment.data_span(), "")]
@@ -265,12 +260,7 @@ fn comment_data_span() {
"###);
for (idx, case) in cases.iter().enumerate() {
- let Token::Comment(comment) = parser(*case)
- .flatten()
- .filter(|t| !matches!(t, Token::Error { .. }))
- .next()
- .unwrap()
- else {
+ let Token::Comment(comment) = parser(*case).flatten().next().unwrap() else {
panic!("expected comment");
};
assert_eq!(case[comment.data_span()], comment.data, "case {idx}");
@@ -287,12 +277,7 @@ fn doctype_span() {
let mut annotated = String::new();
for case in cases {
let labeler = |parser: Parser| {
- let Token::Doctype(doctype) = parser
- .flatten()
- .filter(|t| !matches!(t, Token::Error { .. }))
- .next()
- .unwrap()
- else {
+ let Token::Doctype(doctype) = parser.flatten().next().unwrap() else {
panic!("expected doctype");
};
vec![(doctype.span, "")]
@@ -316,12 +301,7 @@ fn doctype_id_spans() {
let mut annotated = String::new();
for case in cases {
let labeler = |parser: Parser| {
- let Token::Doctype(doctype) = parser
- .flatten()
- .filter(|t| !matches!(t, Token::Error { .. }))
- .next()
- .unwrap()
- else {
+ let Token::Doctype(doctype) = parser.flatten().next().unwrap() else {
panic!("expected doctype");
};
@@ -351,10 +331,11 @@ fn doctype_id_spans() {
}
fn annotate_errors(html: &'static str) -> String {
- for token in parser(html).flatten() {
- let Token::Error { span, .. } = token else {
- continue;
- };
+ let mut parser = parser(html);
+ for _ in parser.by_ref() {}
+ let errors: Vec<_> = parser.emitter_mut().drain_errors().collect();
+
+ for (_, span) in errors {
if span.start == span.end {
if span.start != html.len() {
panic!("empty error spans are only allowed at the very end of the source (for eof errors)");
@@ -365,13 +346,10 @@ fn annotate_errors(html: &'static str) -> String {
}
}
- let labeler = |parser: Parser| {
+ let labeler = |mut parser: Parser| {
let mut labels = Vec::new();
- for token in parser.flatten() {
- let Token::Error { error, span } = token else {
- continue;
- };
-
+ for _ in parser.by_ref() {}
+ for (error, span) in parser.emitter_mut().drain_errors() {
labels.push((span, error.code()));
}
labels