00001 #ifndef LM_WRAPPERS_NPLM_H
00002 #define LM_WRAPPERS_NPLM_H
00003
00004 #include "lm/facade.hh"
00005 #include "lm/max_order.hh"
00006 #include "util/string_piece.hh"
00007
00008 #include <boost/thread/tss.hpp>
00009 #include <boost/scoped_ptr.hpp>
00010
00011
00012
00013
00014
00015
00016 namespace nplm {
00017 class vocabulary;
00018 class neuralLM;
00019 }
00020
00021 namespace lm {
00022 namespace np {
00023
00024 class Vocabulary : public base::Vocabulary {
00025 public:
00026 Vocabulary(const nplm::vocabulary &vocab);
00027
00028 ~Vocabulary();
00029
00030 WordIndex Index(const std::string &str) const;
00031
00032
00033 WordIndex Index(const StringPiece &str) const {
00034 return Index(std::string(str.data(), str.size()));
00035 }
00036
00037 lm::WordIndex NullWord() const { return null_word_; }
00038
00039 private:
00040 const nplm::vocabulary &vocab_;
00041
00042 const lm::WordIndex null_word_;
00043 };
00044
00045
00046 #define NPLM_MAX_ORDER 7
00047
00048 struct State {
00049 WordIndex words[NPLM_MAX_ORDER - 1];
00050 };
00051
00052 class Backend;
00053
00054 class Model : public lm::base::ModelFacade<Model, State, Vocabulary> {
00055 private:
00056 typedef lm::base::ModelFacade<Model, State, Vocabulary> P;
00057
00058 public:
00059
00060 static bool Recognize(const std::string &file);
00061
00062 explicit Model(const std::string &file, std::size_t cache_size = 1 << 20);
00063
00064 ~Model();
00065
00066 FullScoreReturn FullScore(const State &from, const WordIndex new_word, State &out_state) const;
00067
00068 FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, State &out_state) const;
00069
00070 private:
00071 boost::scoped_ptr<nplm::neuralLM> base_instance_;
00072
00073 mutable boost::thread_specific_ptr<Backend> backend_;
00074
00075 Vocabulary vocab_;
00076
00077 lm::WordIndex null_word_;
00078
00079 const std::size_t cache_size_;
00080 };
00081
00082 }
00083 }
00084
00085 #endif // LM_WRAPPERS_NPLM_H