summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMartin Fischer <martin@push-f.com>2023-09-10 19:37:34 +0200
committerMartin Fischer <martin@push-f.com>2023-09-28 10:36:08 +0200
commit852d5c6f2e65a5ab466662ae1c649a0ed25c70a9 (patch)
tree96d6bcdb2f2274f1081a0b6cfbde314f319159a1 /tests
parenta03cea75d9d120a7519be91ec872b143b5d74276 (diff)
break!: move offsets out of Token
Previously the Token enum contained the offsets using the O generic type parameter, which could be a usize if you're tracking offsets or a zero-sized type if you didn't care about offsets. This commit moves all the byte offset and syntax information to a new Trace enum, which has several advantages: * Traces can now easily be stored separately, while the tokens are fed to the tree builder. (The tree builder only has to keep track of which tree nodes originate from which tokens.) * No needless generics for functions that take a token but don't care about offsets (a tree construction implementation is bound to have many of such functions). * The FromIterator<(String, String)> impl for AttributeMap no longer has to specify arbitrary values for the spans and the value_syntax). * The PartialEq implementation of Token is now much more useful (since it no longer includes all the offsets). * The Debug formatting of Token is now more readable (since it no longer includes all the offsets). * Function pointers to functions accepting tokens are possible. (Since function pointer types may not have generic parameters.)
Diffstat (limited to 'tests')
-rw-r--r--tests/test_spans.rs78
1 files changed, 48 insertions, 30 deletions
diff --git a/tests/test_spans.rs b/tests/test_spans.rs
index 71a6c4b..0e95be0 100644
--- a/tests/test_spans.rs
+++ b/tests/test_spans.rs
@@ -10,7 +10,8 @@ use codespan_reporting::{
use html5tokenizer::{
offset::PosTrackingReader,
reader::{IntoReader, Reader},
- NaiveParser, Token, TracingEmitter,
+ trace::Trace,
+ NaiveParser, Token,
};
use insta::assert_snapshot;
use similar_asserts::assert_eq;
@@ -31,7 +32,7 @@ where
PosTrackingReader::new(
Box::new(reader.into_reader()) as Box<dyn Reader<Error = Infallible>>
),
- TracingEmitter::default(),
+ html5tokenizer::TracingEmitter::default(),
)
}
@@ -76,9 +77,9 @@ fn start_tag_span() {
let html = "<x> <xyz> <xyz > <xyz/>";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- for token in parser.flatten() {
- if let Token::StartTag(tag) = token {
- labels.push((tag.span, ""));
+ for (_, trace) in parser.flatten() {
+ if let Trace::StartTag(trace) = trace {
+ labels.push((trace.span, ""));
}
}
labels
@@ -94,9 +95,9 @@ fn end_tag_span() {
let html = "</x> </xyz> </xyz > </xyz/>";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- for token in parser.flatten() {
- if let Token::EndTag(tag) = token {
- labels.push((tag.span, ""));
+ for (_, trace) in parser.flatten() {
+ if let Trace::EndTag(trace) = trace {
+ labels.push((trace.span, ""));
}
}
labels
@@ -112,9 +113,9 @@ fn start_tag_name_span() {
let html = "<x> <xyz> <xyz > <xyz/>";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- for token in parser.flatten() {
- if let Token::StartTag(tag) = token {
- labels.push((tag.name_span, ""));
+ for (_, trace) in parser.flatten() {
+ if let Trace::StartTag(trace) = trace {
+ labels.push((trace.name_span, ""));
}
}
labels
@@ -130,9 +131,9 @@ fn end_tag_name_span() {
let html = "</x> </xyz> </xyz > </xyz/>";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- for token in parser.flatten() {
- if let Token::EndTag(tag) = token {
- labels.push((tag.name_span, ""));
+ for (_, trace) in parser.flatten() {
+ if let Trace::EndTag(trace) = trace {
+ labels.push((trace.name_span, ""));
}
}
labels
@@ -148,11 +149,15 @@ fn attribute_name_span() {
let html = "<test x xyz y=VAL xy=VAL z = VAL yzx = VAL>";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- let Token::StartTag(tag) = parser.flatten().next().unwrap() else {
+ let (Token::StartTag(tag), Trace::StartTag(trace)) = parser.flatten().next().unwrap()
+ else {
panic!("expected start tag")
};
for attr in &tag.attributes {
- labels.push((attr.name_span(), ""));
+ labels.push((
+ trace.attribute_traces[attr.trace_idx().unwrap()].name_span(),
+ "",
+ ));
}
labels
};
@@ -167,11 +172,17 @@ fn attribute_value_span() {
let html = "<test x=unquoted y = unquoted z='single-quoted' zz=\"double-quoted\" empty=''>";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- let Token::StartTag(tag) = parser.flatten().next().unwrap() else {
+ let (Token::StartTag(tag), Trace::StartTag(trace)) = parser.flatten().next().unwrap()
+ else {
panic!("expected start tag")
};
for attr in &tag.attributes {
- labels.push((attr.value_span().unwrap(), ""));
+ labels.push((
+ trace.attribute_traces[attr.trace_idx().unwrap()]
+ .value_span()
+ .unwrap(),
+ "",
+ ));
}
labels
};
@@ -186,11 +197,17 @@ fn attribute_value_with_char_ref() {
let html = "<test x=&amp; y='&amp;' z=\"&amp;\">";
let labeler = |parser: Parser| {
let mut labels = Vec::new();
- let Token::StartTag(tag) = parser.flatten().next().unwrap() else {
+ let (Token::StartTag(tag), Trace::StartTag(trace)) = parser.flatten().next().unwrap()
+ else {
panic!("expected start tag")
};
for attr in &tag.attributes {
- labels.push((attr.value_span().unwrap(), ""));
+ labels.push((
+ trace.attribute_traces[attr.trace_idx().unwrap()]
+ .value_span()
+ .unwrap(),
+ "",
+ ));
}
labels
};
@@ -224,10 +241,10 @@ fn comment_data_span() {
let mut annotated = String::new();
for case in cases {
let labeler = |parser: Parser| {
- let Token::Comment(comment) = parser.flatten().next().unwrap() else {
+ let (_, Trace::Comment(comment)) = parser.flatten().next().unwrap() else {
panic!("expected comment");
};
- vec![(comment.data_span(), "")]
+ vec![(comment.data_span, "")]
};
annotated.push_str(&test_and_annotate(case, labeler));
@@ -263,10 +280,11 @@ fn comment_data_span() {
"###);
for (idx, case) in cases.iter().enumerate() {
- let Token::Comment(comment) = parser(*case).flatten().next().unwrap() else {
+ let (Token::Comment(data), Trace::Comment(trace)) = parser(*case).flatten().next().unwrap()
+ else {
panic!("expected comment");
};
- assert_eq!(case[comment.data_span()], comment.data, "case {idx}");
+ assert_eq!(case[trace.data_span], data, "case {idx}");
}
}
@@ -280,10 +298,10 @@ fn doctype_span() {
let mut annotated = String::new();
for case in cases {
let labeler = |parser: Parser| {
- let Token::Doctype(doctype) = parser.flatten().next().unwrap() else {
+ let (_, Trace::Doctype(trace)) = parser.flatten().next().unwrap() else {
panic!("expected doctype");
};
- vec![(doctype.span, "")]
+ vec![(trace.span(), "")]
};
annotated.push_str(&test_and_annotate(case, labeler));
}
@@ -304,18 +322,18 @@ fn doctype_id_spans() {
let mut annotated = String::new();
for case in cases {
let labeler = |parser: Parser| {
- let Token::Doctype(doctype) = parser.flatten().next().unwrap() else {
+ let (_, Trace::Doctype(trace)) = parser.flatten().next().unwrap() else {
panic!("expected doctype");
};
let mut labels = Vec::new();
- if let Some(name_span) = doctype.name_span() {
+ if let Some(name_span) = trace.name_span() {
labels.push((name_span, "name"));
}
- if let Some(public_id_span) = doctype.public_id_span() {
+ if let Some(public_id_span) = trace.public_id_span() {
labels.push((public_id_span, "public id"));
}
- if let Some(system_id_span) = doctype.system_id_span() {
+ if let Some(system_id_span) = trace.system_id_span() {
labels.push((system_id_span, "system id"));
}
labels