From a1718ae350c0afcd9dbf5d89d2ebdce7d9a6407c Mon Sep 17 00:00:00 2001 From: Andreas Ley Date: Sun, 12 Jan 2020 16:21:02 +0100 Subject: [PATCH] Fix for incorrect tokenization due to index difference of Unicode character/scalar (#286) * Fix: `Scanner` now uses indices of the respective UnicodeScalarView * Fix: `Scanner` now uses indices of the respective UnicodeScalarView * Extended test for Unicode `Combining Diaeresis` * Fixed test for combining diaeresis * Inlined template for testing Unicode combining diaeresis Co-authored-by: Ilya Puchka --- Sources/Lexer.swift | 16 ++++++++-------- Tests/StencilTests/LexerSpec.swift | 10 ++++++++++ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/Sources/Lexer.swift b/Sources/Lexer.swift index 47465f5..f6fe0a2 100644 --- a/Sources/Lexer.swift +++ b/Sources/Lexer.swift @@ -114,7 +114,7 @@ struct Lexer { class Scanner { let originalContent: String var content: String - var range: Range + var range: Range /// The start delimiter for a token. private static let tokenStartDelimiter: Unicode.Scalar = "{" @@ -124,7 +124,7 @@ class Scanner { init(_ content: String) { self.originalContent = content self.content = content - range = content.startIndex.. SourceMap { guard let range = lexer.templateString.range(of: token, options: options) else { fatalError("Token not found") } return SourceMap(location: lexer.rangeLocation(range))