From 2a270c5e0ec49442fa65f699fbfb30c6bdad69ae Mon Sep 17 00:00:00 2001 From: Benjamin Paassen Date: Wed, 19 Nov 2014 11:50:12 +0100 Subject: added documentation to the Tokenizer header. --- src/core/utils/Tokenizer.hpp | 123 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) (limited to 'src/core/utils/Tokenizer.hpp') diff --git a/src/core/utils/Tokenizer.hpp b/src/core/utils/Tokenizer.hpp index eb8eed4..3b1405a 100644 --- a/src/core/utils/Tokenizer.hpp +++ b/src/core/utils/Tokenizer.hpp @@ -28,6 +28,11 @@ namespace ousia { namespace utils { + /** + * This exception is currently only thrown if errors are made during the + * initialization of the Tokenizer. Have a closer look at the documentation + * of the TokenTreeNode constructor for more information. + */ class TokenizerException : public std::exception { public: const std::string msg; @@ -37,17 +42,83 @@ public: virtual const char *what() const noexcept override { return msg.c_str(); } }; + +/** + * The Tokenizer internally uses a TokenTree to be efficiently able to identify + * the longest consecutive token in the text. This is equivalent to a prefix + * trie. + * + * The TokenTree is a construct that structures all special tokens this + * Tokenizer recognizes. Consider the Tokens "aab", "a" and "aac". Then + * the TokenTree would look like this: + * + * a + * | \ + * a $ + * | \ + * b c + * | | + * $ $ + * + * Every node in the TokenTree is a valid end state that has a $ attached to it. + * During the search algorithm the Tokenizer goes through the tree and stores + * the last valid position. If a character follows that does not lead to a new + * node in the TokenTree the search ends (and starts again at this character). + * The token corresponding to the last valid position is returned. + * + * This allows us to uniquely identify the matching token given a certain + * input text. Note that this is a greedy matching approach that does not + * work if you're using truly ambiguous tokens (that have the same text). + * + * It is also not allowed that tokens have common middle parts but varying + * pre- and suffixes. Consider the example of two tokens "abd" and "bc" and + * the input string "abc". In that case we start looking for "abd" at the + * start, won't find it, wenn we hit "c" and start the scanning process + * anew. Thus the "bc" token is not found. + * + * For most (well-behaved) tokenization schemes this is not the case, + * though. + */ class TokenTreeNode { public: const std::map children; const int tokenId; + /** + * The TokenTreeNode constructor builds a TokenTree from the given token + * specifications. The node returned by this constructor then is the root of + * said TokenTree. + * @param inputs Specifications of tokens in map form. Each specification + * is a tuple of the text that should be matched and some unique ID (>= 0) + * that is returned to you if that Token is found in the text. + * An example for such a map would be + * { + * { "#" , 1}, + * { "##", 2}, + * { "/" , 3} + * } + * Note that IDs below zero are reserved for system Ids, mainly TOKEN_NONE + * (-1) and TOKEN_TEXT (-2). + */ TokenTreeNode(const std::map &inputs); }; +/** + * This is a reserved constant for the empty token. + */ static const int TOKEN_NONE = -1; +/** + * This is a reserved constant for every part of the input text that is not a + * specified token. + */ static const int TOKEN_TEXT = -2; +/** + * A token for us is identified by an integer tokenID (either one of the + * constants TOKEN_NONE or TOKEN_TEXT or one of the user-defined constants). + * Additionally we return the matched text (which should only be really interesting + * in case of TOKEN_TEXT tokens) and the position in the input text. + */ struct Token { int tokenId; std::string content; @@ -70,6 +141,25 @@ struct Token { Token() : tokenId(TOKEN_NONE) {} }; +/** + * A Tokenizer has the purpose of subdividing an input text into tokens. In our + * definition here we distinguish between two kinds of tokens: + * 1.) User-specified tokens that match a fixed text. + * 2.) Any other text between those tokens. + * The user might want to specify the tokens '#{' and '#}' for example, because + * they have some meaning in her code. The user sets the IDs to 1 and 2. + * Given the input text + * "some text #{ special command #} some text" + * the tokenizer would return the tokens: + * 1.) "some text " with the id TOKEN_TEXT (-2). + * 2.) "#{" with the id 1. + * 3.) " special command " with the id TOKEN_TEXT (-2). + * 4.) "#}" with the id 2. + * 5.) " some text" with the id TOKEN_TEXT (-2). + * This makes the subsequent parsing of files of a specific type easier. + * Note that in case of tokens with that are prefixes of other tokens the + * longest possible match is returned. + */ class Tokenizer { private: BufferedCharReader &input; @@ -95,14 +185,45 @@ protected: virtual bool doPrepare(const Token &t, std::deque &peeked); public: + /** + * @param input The input of a Tokenizer is given in the form of a + * BufferedCharReader. Please refer to the respective documentation. + * @param root This is meant to be the root of a TokenTree giving the + * specification of user-defined tokens this Tokenizer should recognize. + * The Tokenizer promises to not change the TokenTree such that you can + * re-use the same specification for multiple inputs. + * Please refer to the TokenTreeNode documentation for more information. + */ Tokenizer(BufferedCharReader &input, const TokenTreeNode &root); + /** + * The next method consumes one Token from the input stream and gives + * it to the user (stored in the input argument). + * + * @param t a Token reference that is set to the next found token. + * @return true if a next token was found and false if the input is at its + * end. + */ bool next(Token &t); - + /** + * The peek method does not consume the next Token but buffers it and + * shows it to the user (stored in the input argument). + * + * @param t a Token reference that is set to the next found token. + * @return true if a next token was found and false if the input is at its + * end. + */ bool peek(Token &t); + /** + * Resets the peek pointer to the current position in the stream (to the + * beginning of the buffer). + */ void resetPeek(); + /** + * Clears the peek buffer, such that all peeked Tokens are consumed. + */ void consumePeek(); }; } -- cgit v1.2.3 From f333dd3f7c88ba93e9ac726fd1cfee6b817edd45 Mon Sep 17 00:00:00 2001 From: Benjamin Paassen Date: Wed, 19 Nov 2014 11:53:54 +0100 Subject: autoformat on Tokenizer docu. --- src/core/utils/Tokenizer.hpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'src/core/utils/Tokenizer.hpp') diff --git a/src/core/utils/Tokenizer.hpp b/src/core/utils/Tokenizer.hpp index 3b1405a..2debc75 100644 --- a/src/core/utils/Tokenizer.hpp +++ b/src/core/utils/Tokenizer.hpp @@ -28,11 +28,11 @@ namespace ousia { namespace utils { - /** - * This exception is currently only thrown if errors are made during the - * initialization of the Tokenizer. Have a closer look at the documentation - * of the TokenTreeNode constructor for more information. - */ +/** + * This exception is currently only thrown if errors are made during the + * initialization of the Tokenizer. Have a closer look at the documentation + * of the TokenTreeNode constructor for more information. + */ class TokenizerException : public std::exception { public: const std::string msg; @@ -42,7 +42,6 @@ public: virtual const char *what() const noexcept override { return msg.c_str(); } }; - /** * The Tokenizer internally uses a TokenTree to be efficiently able to identify * the longest consecutive token in the text. This is equivalent to a prefix @@ -59,7 +58,7 @@ public: * b c * | | * $ $ - * + * * Every node in the TokenTree is a valid end state that has a $ attached to it. * During the search algorithm the Tokenizer goes through the tree and stores * the last valid position. If a character follows that does not lead to a new @@ -116,8 +115,8 @@ static const int TOKEN_TEXT = -2; /** * A token for us is identified by an integer tokenID (either one of the * constants TOKEN_NONE or TOKEN_TEXT or one of the user-defined constants). - * Additionally we return the matched text (which should only be really interesting - * in case of TOKEN_TEXT tokens) and the position in the input text. + * Additionally we return the matched text (which should only be really + * interesting in case of TOKEN_TEXT tokens) and the position in the input text. */ struct Token { int tokenId; @@ -146,7 +145,7 @@ struct Token { * definition here we distinguish between two kinds of tokens: * 1.) User-specified tokens that match a fixed text. * 2.) Any other text between those tokens. - * The user might want to specify the tokens '#{' and '#}' for example, because + * The user might want to specify the tokens '#{' and '#}' for example, because * they have some meaning in her code. The user sets the IDs to 1 and 2. * Given the input text * "some text #{ special command #} some text" @@ -199,7 +198,7 @@ public: /** * The next method consumes one Token from the input stream and gives * it to the user (stored in the input argument). - * + * * @param t a Token reference that is set to the next found token. * @return true if a next token was found and false if the input is at its * end. @@ -208,7 +207,7 @@ public: /** * The peek method does not consume the next Token but buffers it and * shows it to the user (stored in the input argument). - * + * * @param t a Token reference that is set to the next found token. * @return true if a next token was found and false if the input is at its * end. -- cgit v1.2.3