summaryrefslogtreecommitdiff
path: root/test/core/utils
diff options
context:
space:
mode:
Diffstat (limited to 'test/core/utils')
-rw-r--r--test/core/utils/CodeTokenizerTest.cpp30
-rw-r--r--test/core/utils/TokenizerTest.cpp34
2 files changed, 64 insertions, 0 deletions
diff --git a/test/core/utils/CodeTokenizerTest.cpp b/test/core/utils/CodeTokenizerTest.cpp
new file mode 100644
index 0000000..d0f9a17
--- /dev/null
+++ b/test/core/utils/CodeTokenizerTest.cpp
@@ -0,0 +1,30 @@
+/*
+ Ousía
+ Copyright (C) 2014, 2015 Benjamin Paaßen, Andreas Stöckel
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <gtest/gtest.h>
+
+#include <core/utils/CodeTokenizer.hpp>
+
+namespace ousia {
+namespace utils {
+TEST(CodeTokenizer, testTokenizer)
+{
+
+}
+}
+}
diff --git a/test/core/utils/TokenizerTest.cpp b/test/core/utils/TokenizerTest.cpp
index f441fd8..ba06c33 100644
--- a/test/core/utils/TokenizerTest.cpp
+++ b/test/core/utils/TokenizerTest.cpp
@@ -18,6 +18,8 @@
#include <gtest/gtest.h>
+#include <core/utils/BufferedCharReader.hpp>
+
#include <core/utils/Tokenizer.hpp>
namespace ousia {
@@ -59,5 +61,37 @@ TEST(TokenTreeNode, testConstructor)
ASSERT_EQ(4, abd.tokenId);
ASSERT_EQ(0, abd.children.size());
}
+
+TEST(Tokenizer, testTokenization)
+{
+ TokenTreeNode root{{{"/", 1}, {"/*", 2}, {"*/", 3}}};
+
+ BufferedCharReader reader;
+ reader.feed("Test/Test /* Block Comment */");
+ // 12345678901234567890123456789
+ // 0 1 2
+
+ std::vector<Token> expected = {
+ {TOKEN_TEXT, "Test", 1, 1, 5, 1},
+ {1, "/", 5, 1, 6, 1},
+ {TOKEN_TEXT, "Test ", 6, 1, 11, 1},
+ {2, "/*", 11, 1, 13, 1},
+ {TOKEN_TEXT, " Block Comment ", 13, 1, 28, 1},
+ {3, "*/", 28, 1, 30, 1}};
+
+ Tokenizer tokenizer{reader, root};
+
+ Token t;
+ for (auto &te : expected) {
+ ASSERT_TRUE(tokenizer.next(t));
+ ASSERT_EQ(te.tokenId, t.tokenId);
+ ASSERT_EQ(te.content, t.content);
+ ASSERT_EQ(te.startColumn, t.startColumn);
+ ASSERT_EQ(te.startLine, t.startLine);
+ ASSERT_EQ(te.endColumn, t.endColumn);
+ ASSERT_EQ(te.endLine, t.endLine);
+ }
+ ASSERT_FALSE(tokenizer.next(t));
+}
}
}