use std::convert::Infallible; use std::ops::Range; use codespan_reporting::{ self, diagnostic::{Diagnostic, Label}, files::SimpleFiles, term::{self, termcolor::Buffer}, }; use html5tokenizer::{ offset::PosTrackingReader, reader::{IntoReader, Reader}, trace::Trace, NaiveParser, Token, }; use insta::assert_snapshot; use similar_asserts::assert_eq; /// Just a convenient type alias for labeler closures since Rust /// apparently cannot infer the type (requiring an annotation). type Parser = NaiveParser< PosTrackingReader>>, usize, html5tokenizer::TracingEmitter, >; fn parser(reader: impl IntoReader<'static, Reader = R>) -> Parser where R: Reader + 'static, { NaiveParser::new_with_emitter( PosTrackingReader::new( Box::new(reader.into_reader()) as Box> ), html5tokenizer::TracingEmitter::default(), ) } fn test_and_annotate + Clone>( html: &'static str, labeler: impl Fn(Parser) -> Vec<(Range, S)>, ) -> String { let labels = labeler(parser(html)); assert_char_encoding_independence(html, labeler); annotate(html, labels) } fn annotate(html: &str, labels: Vec<(Range, impl AsRef)>) -> String { let mut files = SimpleFiles::new(); let file_id = files.add("test.html", html); let diagnostic = Diagnostic::note().with_labels( labels .into_iter() .map(|(span, text)| Label::primary(file_id, span).with_message(text.as_ref())) .collect(), ); let mut writer = Buffer::no_color(); let config = codespan_reporting::term::Config::default(); term::emit(&mut writer, &config, &files, &diagnostic).unwrap(); let msg = std::str::from_utf8(writer.as_slice()).unwrap(); // strip the filename and the line numbers since we don't need them // (apparently they cannot be disabled in codespan_reporting) msg.lines() .skip(3) .flat_map(|l| l.split_once("│ ").map(|s| format!("{}\n", s.1.trim_end()))) .collect::>() .join("") } #[test] fn char_span() { let html = "X & &doesntexist; ѣ "; let labeler = |parser: Parser| { let mut labels = Vec::new(); for (_, trace) in parser.flatten() { if let Trace::StartTag(trace) = trace { labels.push((trace.span, "")); } } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^^^ ^^^^^ ^^^^^^^ ^^^^^^ "###); } #[test] fn end_tag_span() { let html = " "; let labeler = |parser: Parser| { let mut labels = Vec::new(); for (_, trace) in parser.flatten() { if let Trace::EndTag(trace) = trace { labels.push((trace.span, "")); } } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^^^^ ^^^^^^ ^^^^^^^^ ^^^^^^^ "###); } #[test] fn start_tag_name_span() { let html = " "; let labeler = |parser: Parser| { let mut labels = Vec::new(); for (_, trace) in parser.flatten() { if let Trace::StartTag(trace) = trace { labels.push((trace.name_span, "")); } } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^ ^^^ ^^^ ^^^ "###); } #[test] fn end_tag_name_span() { let html = " "; let labeler = |parser: Parser| { let mut labels = Vec::new(); for (_, trace) in parser.flatten() { if let Trace::EndTag(trace) = trace { labels.push((trace.name_span, "")); } } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^ ^^^ ^^^ ^^^ "###); } #[test] fn attribute_name_span() { let html = ""; let labeler = |parser: Parser| { let mut labels = Vec::new(); let (Token::StartTag(tag), Trace::StartTag(trace)) = parser.flatten().next().unwrap() else { panic!("expected start tag") }; for attr in &tag.attributes { labels.push(( trace.attribute_traces[attr.trace_idx().unwrap()].name_span(), "", )); } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^ ^^^ ^ ^^ ^ ^^^ "###); } #[test] fn attribute_value_span() { let html = ""; let labeler = |parser: Parser| { let mut labels = Vec::new(); let (Token::StartTag(tag), Trace::StartTag(trace)) = parser.flatten().next().unwrap() else { panic!("expected start tag") }; for attr in &tag.attributes { labels.push(( trace.attribute_traces[attr.trace_idx().unwrap()] .value_span() .unwrap(), "", )); } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^^^^^^^^ ^^^^^^^^ ^^^^^^^^^^^^^ ^^^^^^^^^^^^^ ^ "###); } #[test] fn attribute_value_with_char_ref() { let html = ""; let labeler = |parser: Parser| { let mut labels = Vec::new(); let (Token::StartTag(tag), Trace::StartTag(trace)) = parser.flatten().next().unwrap() else { panic!("expected start tag") }; for attr in &tag.attributes { labels.push(( trace.attribute_traces[attr.trace_idx().unwrap()] .value_span() .unwrap(), "", )); } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" ^^^^^ ^^^^^ ^^^^^ "###); } #[test] fn comment_data_span() { #[rustfmt::skip] let cases = [ "", "", "", "", "", " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^ ^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "#, ]; let mut annotated = String::new(); for case in cases { let labeler = |parser: Parser| { let (_, Trace::Doctype(trace)) = parser.flatten().next().unwrap() else { panic!("expected doctype"); }; vec![(trace.span(), "")] }; annotated.push_str(&test_and_annotate(case, labeler)); } assert_snapshot!(annotated, @r###" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ "###); } #[test] fn doctype_id_spans() { #[rustfmt::skip] let cases = [ r#""#, ]; let mut annotated = String::new(); for case in cases { let labeler = |parser: Parser| { let (_, Trace::Doctype(trace)) = parser.flatten().next().unwrap() else { panic!("expected doctype"); }; let mut labels = Vec::new(); if let Some(name_span) = trace.name_span() { labels.push((name_span, "name")); } if let Some(public_id_span) = trace.public_id_span() { labels.push((public_id_span, "public id")); } if let Some(system_id_span) = trace.system_id_span() { labels.push((system_id_span, "system id")); } labels }; annotated.push_str(&test_and_annotate(case, labeler)); } assert_snapshot!(annotated, @r###" ^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ system id │ │ │ public id name "###); } #[test] fn eof_offset() { let html = "Where does it end?"; let labeler = |parser: Parser| { let mut labels = Vec::new(); for (_, trace) in parser.flatten() { if let Trace::EndOfFile(offset) = trace { labels.push((offset..offset, "here")); } } labels }; assert_snapshot!(test_and_annotate(html, labeler), @r###" Where does it end? ^ here "###); } fn annotate_errors(html: &'static str) -> String { let mut parser = parser(html); for _ in parser.by_ref() {} let errors: Vec<_> = parser.emitter_mut().drain_errors().collect(); for (_, span) in errors { if span.start == span.end { if span.start != html.len() { panic!("empty error spans are only allowed at the very end of the source (for eof errors)"); } } else { assert!(span.start < span.end); assert!(span.end <= html.len()); } } let labeler = |mut parser: Parser| { let mut labels = Vec::new(); for _ in parser.by_ref() {} for (error, span) in parser.emitter_mut().drain_errors() { labels.push((span, error.code())); } labels }; test_and_annotate(html, labeler) } #[test] fn tests_for_errors_are_sorted() { let source_of_this_file = std::fs::read_to_string(file!()).unwrap(); let mut error_tests: Vec<_> = source_of_this_file .lines() .filter(|l| l.starts_with("fn error_")) .collect(); let error_tests_found_order = error_tests.join("\n"); error_tests.sort(); let error_tests_sorted = error_tests.join("\n"); assert_eq!(error_tests_found_order, error_tests_sorted); } #[test] fn error_char_ref_absence_of_digits() { let html = "&#qux;"; assert_snapshot!(annotate_errors(html), @r###" &#qux; ^^^ absence-of-digits-in-numeric-character-reference "###); } #[test] fn error_char_ref_control_char() { let html = ""; assert_snapshot!(annotate_errors(html), @r###"  ^^^^^^ control-character-reference "###); } #[test] fn error_char_ref_missing_semicolon() { let html = "¬"; assert_snapshot!(annotate_errors(html), @r###" ¬ ^ missing-semicolon-after-character-reference "###); } #[test] fn error_char_ref_noncharacter() { let html = "﷐"; assert_snapshot!(annotate_errors(html), @r###" ﷐ ^^^^^^^^ noncharacter-character-reference "###); } #[test] fn error_char_ref_null_char() { let html = "�"; assert_snapshot!(annotate_errors(html), @r###" � ^^^^ null-character-reference "###); } #[test] fn error_char_ref_outside_unicode_range() { let html = "�"; assert_snapshot!(annotate_errors(html), @r###" � ^^^^^^^^^^ character-reference-outside-unicode-range "###); } #[test] fn error_char_ref_surrogate() { let html = "�"; assert_snapshot!(annotate_errors(html), @r###" � ^^^^^^^^ surrogate-character-reference "###); } #[test] fn error_char_ref_unknown_named() { let html = "The pirate says &arrrrr;"; assert_snapshot!(annotate_errors(html), @r###" The pirate says &arrrrr; ^^^^^^^^ unknown-named-character-reference "###); } #[test] fn error_duplicate_attribute() { let html = "Does this open two pages? "; assert_snapshot!(annotate_errors(html), @r###" Does this open two pages? ^^^^ duplicate-attribute "###); } #[test] fn error_end_tag_with_attributes() { let html = ""; assert_snapshot!(annotate_errors(html), @r###" ^^^^^^ end-tag-with-attributes "###); } #[test] fn error_end_tag_with_trailing_solidus() { let html = "Do you start or do you end? "; assert_snapshot!(annotate_errors(html), @r###" Do you start or do you end? ^ end-tag-with-trailing-solidus "###); } #[test] fn error_eof_before_tag_name() { let html = "<"; assert_snapshot!(annotate_errors(html), @r###" < ^ eof-before-tag-name "###); } // TODO: add error_eof_in_cdata test // blocked by lack of proper tree constructor (NaiveParser doesn't parse CDATA sections) #[test] fn error_eof_in_comment() { let html = "