25 #pragma warning(disable:4244) // Conversion warnings 35 probability_in_context_(&
tesseract::
Dict::def_probability_in_context),
36 params_model_classify_(NULL),
38 STRING_MEMBER(user_words_file,
"",
"A filename of user-provided words.",
39 getCCUtil()->params()),
41 "A suffix of user-provided words located in tessdata.",
42 getCCUtil()->params()),
44 "A filename of user-provided patterns.",
45 getCCUtil()->params()),
47 "A suffix of user-provided patterns located in " 49 getCCUtil()->params()),
51 getCCUtil()->params()),
53 getCCUtil()->params()),
55 getCCUtil()->params()),
57 "Load dawg with punctuation" 59 getCCUtil()->params()),
61 "Load dawg with number" 63 getCCUtil()->params()),
65 "Load dawg with special word " 67 getCCUtil()->params()),
69 "Score penalty (0.1 = 10%) added if there are subscripts " 70 "or superscripts in a word, but it is otherwise OK.",
71 getCCUtil()->params()),
73 "Score penalty (0.1 = 10%) added if an xheight is " 75 getCCUtil()->params()),
77 "Score multiplier for word matches which have good case and" 78 "are frequent in the given language (lower is better).",
79 getCCUtil()->params()),
81 "Score multiplier for word matches that have good case " 83 getCCUtil()->params()),
85 "Default score multiplier for word matches, which may have " 86 "case issues (lower is better).",
87 getCCUtil()->params()),
89 "Multipler to for the best choice from the ngram model.",
90 getCCUtil()->params()),
92 "Score multiplier for glyph fragment segmentations which " 93 "do not match a dictionary word (lower is better).",
94 getCCUtil()->params()),
96 "Score multiplier for poorly cased strings that are not in" 97 " the dictionary and generally look like garbage (lower is" 99 getCCUtil()->params()),
101 "Output file for ambiguities found in the dictionary",
102 getCCUtil()->params()),
104 "Set to 1 for general debug info" 105 ", to 2 for more details, to 3 to see all the debug messages",
106 getCCUtil()->params()),
107 INT_MEMBER(hyphen_debug_level, 0,
"Debug level for hyphenated words.",
108 getCCUtil()->params()),
109 INT_MEMBER(max_viterbi_list_size, 10,
"Maximum size of viterbi list.",
110 getCCUtil()->params()),
112 "Use only the first UTF8 step of the given string" 113 " when computing log probabilities.",
114 getCCUtil()->params()),
115 double_MEMBER(certainty_scale, 20.0,
"Certainty scaling factor",
116 getCCUtil()->params()),
118 "Certainty threshold for non-dict words",
119 getCCUtil()->params()),
120 double_MEMBER(stopper_phase2_certainty_rejection_offset, 1.0,
121 "Reject certainty offset", getCCUtil()->params()),
123 "Size of dict word to be treated as non-dict word",
124 getCCUtil()->params()),
127 " for each dict char above small word size.",
128 getCCUtil()->params()),
130 "Max certaintly variation allowed in a word (in sigma)",
131 getCCUtil()->params()),
132 INT_MEMBER(stopper_debug_level, 0,
"Stopper debug level",
133 getCCUtil()->params()),
135 "Make AcceptableChoice() always return false. Useful" 136 " when there is a need to explore all segmentations",
137 getCCUtil()->params()),
139 "Deprecated- backward compatibility only",
140 getCCUtil()->params()),
141 INT_MEMBER(tessedit_truncate_wordchoice_log, 10,
142 "Max words to keep in list", getCCUtil()->params()),
144 "Word for which stopper debug" 145 " information should be printed to stdout",
146 getCCUtil()->params()),
148 "Lengths of unichars in word_to_debug",
149 getCCUtil()->params()),
150 INT_MEMBER(fragments_debug, 0,
"Debug character fragments",
151 getCCUtil()->params()),
153 "Don't use any alphabetic-specific tricks." 154 "Set to true in the traineddata config file for" 155 " scripts that are cursive or inherently fixed-pitch",
156 getCCUtil()->params()),
157 BOOL_MEMBER(save_doc_words, 0,
"Save Document Words",
158 getCCUtil()->params()),
160 "Worst certainty for using pending dictionary",
161 getCCUtil()->params()),
163 "Worst certainty for words that can be inserted into the" 164 "document dictionary",
165 getCCUtil()->params()),
167 "Maximum number of different" 168 " character choices to consider during permutation." 169 " This limit is especially useful when user patterns" 170 " are specified, since overly generic patterns can result in" 171 " dawg search exploring an overly large number of options.",
172 getCCUtil()->params()) {
173 dang_ambigs_table_ = NULL;
174 replace_ambigs_table_ = NULL;
175 reject_offset_ = 0.0;
178 last_word_on_line_ =
false;
179 hyphen_unichar_id_ = INVALID_UNICHAR_ID;
180 document_words_ = NULL;
182 dawg_cache_is_ours_ =
false;
183 pending_words_ = NULL;
187 unambig_dawg_ = NULL;
188 wordseg_rating_adjust_factor_ = -1.0f;
189 output_ambig_words_file_ = NULL;
195 if (output_ambig_words_file_ != NULL) fclose(output_ambig_words_file_);
214 if (dawg_cache != NULL) {
215 dawg_cache_ = dawg_cache;
216 dawg_cache_is_ours_ =
false;
219 dawg_cache_is_ours_ =
true;
229 if (punc_dawg_) dawgs_ += punc_dawg_;
234 if (system_dawg) dawgs_ += system_dawg;
239 if (number_dawg) dawgs_ += number_dawg;
244 if (bigram_dawg_) dawgs_ += bigram_dawg_;
249 if (freq_dawg_) dawgs_ += freq_dawg_;
254 if (unambig_dawg_) dawgs_ += unambig_dawg_;
262 if (((
STRING &)user_words_file).length() > 0) {
298 dawgs_ += document_words_;
311 if (punc_dawg_) dawgs_ += punc_dawg_;
316 if (system_dawg) dawgs_ += system_dawg;
321 if (number_dawg) dawgs_ += number_dawg;
328 if (dawgs_.
empty())
return false;
333 for (
int i = 0; i < dawgs_.
length(); ++i) {
334 const Dawg *dawg = dawgs_[i];
336 for (
int j = 0; j < dawgs_.
length(); ++j) {
337 const Dawg *other = dawgs_[j];
338 if (dawg != NULL && other != NULL &&
340 kDawgSuccessors[dawg->
type()][other->
type()]) *lst += j;
350 for (
int i = 0; i < dawgs_.
size(); i++) {
351 if (!dawg_cache_->
FreeDawg(dawgs_[i])) {
355 if (dawg_cache_is_ours_) {
362 document_words_ = NULL;
363 delete pending_words_;
364 pending_words_ = NULL;
372 bool word_end)
const {
376 tprintf(
"def_letter_is_okay: current unichar=%s word_end=%d" 377 " num active dawgs=%d\n",
386 unichar_id == INVALID_UNICHAR_ID) {
404 if (!dawg && !punc_dawg) {
406 tprintf(
"Received DawgPosition with no dawg or punc_dawg. wth?\n");
414 if (punc_transition_edge != NO_EDGE) {
417 for (
int s = 0; s < slist.
length(); ++s) {
418 int sdawg_index = slist[s];
419 const Dawg *sdawg = dawgs_[sdawg_index];
422 if (dawg_edge != NO_EDGE) {
424 tprintf(
"Letter found in dawg %d\n", sdawg_index);
430 "Append transition from punc dawg to current dawgs: ");
440 if (punc_edge != NO_EDGE) {
442 tprintf(
"Letter found in punctuation dawg\n");
447 "Extend punctuation dawg: ");
458 EDGE_REF punc_edge = punc_node == NO_EDGE ? NO_EDGE
459 : punc_dawg->
edge_char_of(punc_node, unichar_id, word_end);
460 if (punc_edge != NO_EDGE) {
465 "Return to punctuation dawg: ");
486 EDGE_REF edge = (node == NO_EDGE) ? NO_EDGE
494 if (edge != NO_EDGE) {
500 tprintf(
"Punctuation constraint not satisfied at end of word.\n");
512 "Append current dawg to updated active dawgs: ");
524 tprintf(
"Returning %d for permuter code for this character.\n",
538 unichar_id_patterns.
push_back(unichar_id);
540 &unichar_id_patterns);
541 for (
int i = 0; i < unichar_id_patterns.
size(); ++i) {
544 for (
int k = 0; k < 2; ++k) {
546 ? dawg->
edge_char_of(node, unichar_id_patterns[i], word_end)
548 if (edge == NO_EDGE)
continue;
560 "Append current dawg to updated active dawgs: ");
569 bool ambigs_mode)
const {
572 *active_dawgs = hyphen_active_dawgs_;
574 for (i = 0; i < hyphen_active_dawgs_.
size(); ++i) {
576 hyphen_active_dawgs_[i].dawg_index,
577 hyphen_active_dawgs_[i].dawg_ref);
586 bool suppress_patterns)
const {
587 bool punc_dawg_available =
588 (punc_dawg_ != NULL) &&
591 for (
int i = 0; i < dawgs_.
length(); i++) {
592 if (dawgs_[i] != NULL &&
594 int dawg_ty = dawgs_[i]->type();
597 *dawg_pos_vec +=
DawgPosition(-1, NO_EDGE, i, NO_EDGE,
false);
602 }
else if (!punc_dawg_available || !subsumed_by_punc) {
603 *dawg_pos_vec +=
DawgPosition(i, NO_EDGE, -1, NO_EDGE,
false);
619 if (hyphen_word_)
return;
623 int stringlen = best_choice.
length();
629 if (best_choice.
length() >= kDocDictMaxRepChars) {
630 int num_rep_chars = 1;
632 for (
int i = 1; i < best_choice.
length(); ++i) {
638 if (num_rep_chars == kDocDictMaxRepChars)
return;
660 strcpy(filename,
getCCUtil()->imagefile.string());
661 strcat(filename,
".doc");
662 doc_word_file =
open_file (filename,
"a");
663 fprintf(doc_word_file,
"%s\n",
665 fclose(doc_word_file);
673 float additional_adjust,
681 float adjust_factor = additional_adjust;
682 float new_rating = word->
rating();
683 new_rating += kRatingPad;
684 const char *xheight_triggered =
"";
687 switch (xheight_consistency) {
690 xheight_triggered =
", xhtBAD";
694 xheight_triggered =
", xhtSUB";
704 tprintf(
"Consistency could not be calculated.\n");
708 tprintf(
"%sWord: %s %4.2f%s", nonword ?
"Non-" :
"",
714 if (case_is_ok && punc_is_ok) {
716 new_rating *= adjust_factor;
720 new_rating *= adjust_factor;
722 if (!case_is_ok)
tprintf(
", C");
723 if (!punc_is_ok)
tprintf(
", P");
728 if (!is_han && freq_dawg_ != NULL && freq_dawg_->
word_in_dawg(*word)) {
731 new_rating *= adjust_factor;
735 new_rating *= adjust_factor;
740 new_rating *= adjust_factor;
744 new_rating -= kRatingPad;
745 if (modify_rating) word->
set_rating(new_rating);
746 if (debug)
tprintf(
" %4.2f --> %4.2f\n", adjust_factor, new_rating);
756 word_ptr = &temp_word;
764 int last_index = word_ptr->
length() - 1;
768 i == last_index)))
break;
778 delete[] active_dawgs;
785 if (bigram_dawg_ == NULL)
return false;
789 int w1start, w1end, w2start, w2end;
795 if (w1start >= w1end)
return word1.
length() < 3;
796 if (w2start >= w2end)
return word2.
length() < 3;
800 bigram_string.
reserve(w1end + w2end + 1);
801 for (
int i = w1start; i < w1end; i++) {
805 bigram_string.
push_back(question_unichar_id_);
807 bigram_string += normed_ids;
810 for (
int i = w2start; i < w2end; i++) {
814 bigram_string.
push_back(question_unichar_id_);
816 bigram_string += normed_ids;
819 for (
int i = 0; i < bigram_string.
size(); ++i) {
830 int last_index = word.
length() - 1;
832 for (i = 0; i <= last_index; ++i) {
835 new_word.append_unichar_id(unichar_id, 1, 0.0, 0.0);
839 }
else if ((new_len = new_word.length()) == 0 ||
844 for (i = 0; i < dawgs_.
size(); ++i) {
845 if (dawgs_[i] != NULL &&
847 dawgs_[i]->word_in_dawg(new_word))
return true;
855 if (u_set.
han_sid() > 0)
return false;
857 if (u_set.
thai_sid() > 0)
return false;
bool valid_punctuation(const WERD_CHOICE &word)
static NODE_REF GetStartingNode(const Dawg *dawg, EDGE_REF edge_ref)
Returns the appropriate next node given the EDGE_REF.
const UNICHARSET & getUnicharset() const
bool add_unique(const DawgPosition &new_pos, bool debug, const char *debug_msg)
UNICHAR_ID unichar_id(int index) const
const GenericVector< UNICHAR_ID > & normed_ids(UNICHAR_ID unichar_id) const
virtual EDGE_REF edge_char_of(NODE_REF node, UNICHAR_ID unichar_id, bool word_end) const =0
Returns the edge that corresponds to the letter out of this node.
UNICHAR_ID char_for_dawg(UNICHAR_ID ch, const Dawg *dawg) const
bool add_word_to_dawg(const WERD_CHOICE &word, const GenericVector< bool > *repetitions)
void adjust_word(WERD_CHOICE *word, bool nonword, XHeightConsistencyEnum xheight_consistency, float additional_adjust, bool modify_rating, bool debug)
Adjusts the rating of the given word.
bool get_ispunctuation(UNICHAR_ID unichar_id) const
static bool valid_word_permuter(uinT8 perm, bool numbers_ok)
Check all the DAWGs to see if this word is in any of them.
#define double_MEMBER(name, val, comment, vec)
void init_active_dawgs(DawgPositionVector *active_dawgs, bool ambigs_mode) const
double doc_dict_pending_threshold
double segment_penalty_dict_case_ok
int def_letter_is_okay(void *void_dawg_args, UNICHAR_ID unichar_id, bool word_end) const
const STRING debug_string() const
GenericVector< int > SuccessorList
const char * string() const
STRING language_data_path_prefix
double segment_penalty_dict_frequent_word
bool word_in_dawg(const WERD_CHOICE &word) const
Returns true if the given word is in the Dawg.
bool IsSpaceDelimitedLang() const
Returns true if the language is space-delimited (not CJ, or T).
bool read_and_add_word_list(const char *filename, const UNICHARSET &unicharset, Trie::RTLReversePolicy reverse)
virtual bool end_of_word(EDGE_REF edge_ref) const =0
void LoadLSTM(const STRING &lang, TessdataManager *data_file)
FILE * open_file(const char *filename, const char *mode)
int hyphen_base_size() const
Size of the base word (the part on the line before) of a hyphenated word.
bool valid_bigram(const WERD_CHOICE &word1, const WERD_CHOICE &word2) const
void(Dict::* go_deeper_fxn_)(const char *debug, const BLOB_CHOICE_LIST_VECTOR &char_choices, int char_choice_index, const CHAR_FRAGMENT_INFO *prev_char_frag_info, bool word_ending, WERD_CHOICE *word, float certainties[], float *limit, WERD_CHOICE *best_choice, int *attempts_left, void *void_more_args)
Pointer to go_deeper function.
const CCUtil * getCCUtil() const
PermuterType permuter() const
void ProcessPatternEdges(const Dawg *dawg, const DawgPosition &info, UNICHAR_ID unichar_id, bool word_end, DawgArgs *dawg_args, PermuterType *current_permuter) const
bool get_isdigit(UNICHAR_ID unichar_id) const
double segment_penalty_garbage
virtual EDGE_REF pattern_loop_edge(EDGE_REF edge_ref, UNICHAR_ID unichar_id, bool word_end) const
char * user_patterns_suffix
const STRING & lang() const
double segment_penalty_dict_case_bad
DawgPositionVector * updated_dawgs
int valid_word(const WERD_CHOICE &word, bool numbers_ok) const
double xheight_penalty_inconsistent
void initialize_patterns(UNICHARSET *unicharset)
void copy_hyphen_info(WERD_CHOICE *word) const
bool hyphenated() const
Returns true if we've recorded the beginning of a hyphenated word.
const UNICHARSET * unicharset() const
#define STRING_MEMBER(name, val, comment, vec)
const STRING & unichar_string() const
#define STRING_INIT_MEMBER(name, val, comment, vec)
void set_rating(float new_val)
void punct_stripped(int *start_core, int *end_core) const
void append_unichar_id_space_allocated(UNICHAR_ID unichar_id, int blob_count, float rating, float certainty)
int case_ok(const WERD_CHOICE &word, const UNICHARSET &unicharset) const
Check a string to see if it matches a set of lexical rules.
double doc_dict_certainty_threshold
Dawg * GetSquishedDawg(const STRING &lang, TessdataType tessdata_dawg_type, int debug_level, TessdataManager *data_file)
void default_dawgs(DawgPositionVector *anylength_dawgs, bool suppress_patterns) const
DawgPositionVector * active_dawgs
double segment_penalty_dict_nonword
int GetTopScriptID() const
virtual void unichar_id_to_patterns(UNICHAR_ID unichar_id, const UNICHARSET &unicharset, GenericVector< UNICHAR_ID > *vec) const
void add_document_word(const WERD_CHOICE &best_choice)
Adds a word found on this document to the document specific dictionary.
#define BOOL_INIT_MEMBER(name, val, comment, vec)
char * user_patterns_file
static DawgCache * GlobalDawgCache()
bool get_isupper(UNICHAR_ID unichar_id) const
double xheight_penalty_subscripts
void SetupForLoad(DawgCache *dawg_cache)
static const UNICHAR_ID kPatternUnicharID
void Load(const STRING &lang, TessdataManager *data_file)
bool read_pattern_list(const char *filename, const UNICHARSET &unicharset)
void delete_data_pointers()
UNICHAR_ID unichar_to_id(const char *const unichar_repr) const
bool FreeDawg(Dawg *dawg)
void set_permuter(uinT8 perm)
#define BOOL_MEMBER(name, val, comment, vec)
#define INT_MEMBER(name, val, comment, vec)
int(Dict::* letter_is_okay_)(void *void_dawg_args, UNICHAR_ID unichar_id, bool word_end) const
void set_adjust_factor(float factor)