diff options
author | Martin Fischer <martin@push-f.com> | 2023-08-16 17:07:06 +0200 |
---|---|---|
committer | Martin Fischer <martin@push-f.com> | 2023-08-19 06:41:55 +0200 |
commit | 681404e5036841ec45356f56f77cc5377f3640d9 (patch) | |
tree | 568f6cdd34e0366372ba1ad3c2d13f1b8e2b530c /integration_tests/tests/test_html5lib.rs | |
parent | c021afde2c2bd4b8bdd9b3eec4b1660cb0a896e5 (diff) |
refactor: decouple html5lib_tests from html5tokenizer
Previously we mapped the test tokens to our own token type.
Now we do the reverse, which makes more sense as it enables us
to easily add more detailed fields to our own token variants
without having to worry about these fields not being present
in the html5lib test data.
(An alternative would be to normalize the values of these fields
to some arbitrary value so that PartialEq still holds but seeing
such normalized fields in the diff printed by pretty_assertions
on a test failure would be quite confusing).
Diffstat (limited to 'integration_tests/tests/test_html5lib.rs')
-rw-r--r-- | integration_tests/tests/test_html5lib.rs | 44 |
1 files changed, 31 insertions, 13 deletions
diff --git a/integration_tests/tests/test_html5lib.rs b/integration_tests/tests/test_html5lib.rs index 3236f0f..23adec0 100644 --- a/integration_tests/tests/test_html5lib.rs +++ b/integration_tests/tests/test_html5lib.rs @@ -1,6 +1,8 @@ use std::{fs::File, io::BufReader, path::Path}; -use html5lib_tests::{parse_tests, InitialState, ParseError, ParseErrorInner, Test}; +use html5lib_tests::{ + parse_tests, Error as TestError, InitialState, Output, Test, Token as TestToken, +}; use html5tokenizer::{InternalState, Reader, Token, Tokenizer}; use pretty_assertions::assert_eq; @@ -107,22 +109,38 @@ fn run_test_inner<R: Reader>( tokenizer.set_last_start_tag(last_start_tag); } - let mut actual_tokens = Vec::new(); - let mut actual_errors = Vec::new(); + let mut actual = Output { + errors: Vec::new(), + tokens: Vec::new(), + }; for token in tokenizer { let token = token.unwrap(); - if let Token::Error { error, .. } = token { - actual_errors.push(ParseError { - code: ParseErrorInner(error), - }); - } else { - actual_tokens.push(token); - } + match token { + Token::Error { error, .. } => actual.errors.push(TestError { + code: error.to_string(), + }), + Token::StartTag(tag) => actual.tokens.push(TestToken::StartTag { + name: tag.name, + attributes: tag + .attributes + .into_iter() + .map(|(name, map_val)| (name, map_val.value)) + .collect(), + self_closing: tag.self_closing, + }), + Token::EndTag(tag) => actual.tokens.push(TestToken::EndTag { name: tag.name }), + Token::String(data) => actual.tokens.push(TestToken::Character(data)), + Token::Comment(data) => actual.tokens.push(TestToken::Comment(data)), + Token::Doctype(doctype) => actual.tokens.push(TestToken::Doctype { + name: Some(doctype.name).filter(|name| !name.is_empty()), + public_id: doctype.public_identifier, + system_id: doctype.system_identifier, + force_quirks: doctype.force_quirks, + }), + }; } - assert_eq!(test.output.0, actual_tokens); - - assert_eq!(test.errors, actual_errors); + assert_eq!(test.output, actual); } |