Tesseract  3.02
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
fixspace.cpp
Go to the documentation of this file.
1 /******************************************************************
2  * File: fixspace.cpp (Formerly fixspace.c)
3  * Description: Implements a pass over the page res, exploring the alternative
4  * spacing possibilities, trying to use context to improve the
5  * word spacing
6 * Author: Phil Cheatle
7 * Created: Thu Oct 21 11:38:43 BST 1993
8 *
9 * (C) Copyright 1993, Hewlett-Packard Ltd.
10 ** Licensed under the Apache License, Version 2.0 (the "License");
11 ** you may not use this file except in compliance with the License.
12 ** You may obtain a copy of the License at
13 ** http://www.apache.org/licenses/LICENSE-2.0
14 ** Unless required by applicable law or agreed to in writing, software
15 ** distributed under the License is distributed on an "AS IS" BASIS,
16 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 ** See the License for the specific language governing permissions and
18 ** limitations under the License.
19 *
20 **********************************************************************/
21 
22 #include "mfcpch.h"
23 #include <ctype.h>
24 #include "reject.h"
25 #include "statistc.h"
26 #include "control.h"
27 #include "fixspace.h"
28 #include "genblob.h"
29 #include "tessvars.h"
30 #include "tessbox.h"
31 #include "secname.h"
32 #include "globals.h"
33 #include "tesseractclass.h"
34 
35 #define PERFECT_WERDS 999
36 #define MAXSPACING 128 /*max expected spacing in pix */
37 
38 namespace tesseract {
50  inT32 word_count,
51  PAGE_RES *page_res) {
52  BLOCK_RES_IT block_res_it;
53  ROW_RES_IT row_res_it;
54  WERD_RES_IT word_res_it_from;
55  WERD_RES_IT word_res_it_to;
56  WERD_RES *word_res;
57  WERD_RES_LIST fuzzy_space_words;
58  inT16 new_length;
59  BOOL8 prevent_null_wd_fixsp; // DONT process blobless wds
60  inT32 word_index; // current word
61 
62  block_res_it.set_to_list(&page_res->block_res_list);
63  word_index = 0;
64  for (block_res_it.mark_cycle_pt(); !block_res_it.cycled_list();
65  block_res_it.forward()) {
66  row_res_it.set_to_list(&block_res_it.data()->row_res_list);
67  for (row_res_it.mark_cycle_pt(); !row_res_it.cycled_list();
68  row_res_it.forward()) {
69  word_res_it_from.set_to_list(&row_res_it.data()->word_res_list);
70  while (!word_res_it_from.at_last()) {
71  word_res = word_res_it_from.data();
72  while (!word_res_it_from.at_last() &&
73  !(word_res->combination ||
74  word_res_it_from.data_relative(1)->word->flag(W_FUZZY_NON) ||
75  word_res_it_from.data_relative(1)->word->flag(W_FUZZY_SP))) {
76  fix_sp_fp_word(word_res_it_from, row_res_it.data()->row,
77  block_res_it.data()->block);
78  word_res = word_res_it_from.forward();
79  word_index++;
80  if (monitor != NULL) {
81  monitor->ocr_alive = TRUE;
82  monitor->progress = 90 + 5 * word_index / word_count;
83  if (monitor->deadline_exceeded() ||
84  (monitor->cancel != NULL &&
85  (*monitor->cancel)(monitor->cancel_this, stats_.dict_words)))
86  return;
87  }
88  }
89 
90  if (!word_res_it_from.at_last()) {
91  word_res_it_to = word_res_it_from;
92  prevent_null_wd_fixsp =
93  word_res->word->cblob_list()->empty();
94  if (check_debug_pt(word_res, 60))
95  debug_fix_space_level.set_value(10);
96  word_res_it_to.forward();
97  word_index++;
98  if (monitor != NULL) {
99  monitor->ocr_alive = TRUE;
100  monitor->progress = 90 + 5 * word_index / word_count;
101  if (monitor->deadline_exceeded() ||
102  (monitor->cancel != NULL &&
103  (*monitor->cancel)(monitor->cancel_this, stats_.dict_words)))
104  return;
105  }
106  while (!word_res_it_to.at_last () &&
107  (word_res_it_to.data_relative(1)->word->flag(W_FUZZY_NON) ||
108  word_res_it_to.data_relative(1)->word->flag(W_FUZZY_SP))) {
109  if (check_debug_pt(word_res, 60))
110  debug_fix_space_level.set_value(10);
111  if (word_res->word->cblob_list()->empty())
112  prevent_null_wd_fixsp = TRUE;
113  word_res = word_res_it_to.forward();
114  }
115  if (check_debug_pt(word_res, 60))
116  debug_fix_space_level.set_value(10);
117  if (word_res->word->cblob_list()->empty())
118  prevent_null_wd_fixsp = TRUE;
119  if (prevent_null_wd_fixsp) {
120  word_res_it_from = word_res_it_to;
121  } else {
122  fuzzy_space_words.assign_to_sublist(&word_res_it_from,
123  &word_res_it_to);
124  fix_fuzzy_space_list(fuzzy_space_words,
125  row_res_it.data()->row,
126  block_res_it.data()->block);
127  new_length = fuzzy_space_words.length();
128  word_res_it_from.add_list_before(&fuzzy_space_words);
129  for (;
130  !word_res_it_from.at_last() && new_length > 0;
131  new_length--) {
132  word_res_it_from.forward();
133  }
134  }
135  if (test_pt)
136  debug_fix_space_level.set_value(0);
137  }
138  fix_sp_fp_word(word_res_it_from, row_res_it.data()->row,
139  block_res_it.data()->block);
140  // Last word in row
141  }
142  }
143  }
144 }
145 
146 void Tesseract::fix_fuzzy_space_list(WERD_RES_LIST &best_perm,
147  ROW *row,
148  BLOCK* block) {
149  inT16 best_score;
150  WERD_RES_LIST current_perm;
151  inT16 current_score;
152  BOOL8 improved = FALSE;
153 
154  best_score = eval_word_spacing(best_perm); // default score
155  dump_words(best_perm, best_score, 1, improved);
156 
157  if (best_score != PERFECT_WERDS)
158  initialise_search(best_perm, current_perm);
159 
160  while ((best_score != PERFECT_WERDS) && !current_perm.empty()) {
161  match_current_words(current_perm, row, block);
162  current_score = eval_word_spacing(current_perm);
163  dump_words(current_perm, current_score, 2, improved);
164  if (current_score > best_score) {
165  best_perm.clear();
166  best_perm.deep_copy(&current_perm, &WERD_RES::deep_copy);
167  best_score = current_score;
168  improved = TRUE;
169  }
170  if (current_score < PERFECT_WERDS)
171  transform_to_next_perm(current_perm);
172  }
173  dump_words(best_perm, best_score, 3, improved);
174 }
175 
176 } // namespace tesseract
177 
178 void initialise_search(WERD_RES_LIST &src_list, WERD_RES_LIST &new_list) {
179  WERD_RES_IT src_it(&src_list);
180  WERD_RES_IT new_it(&new_list);
181  WERD_RES *src_wd;
182  WERD_RES *new_wd;
183 
184  for (src_it.mark_cycle_pt(); !src_it.cycled_list(); src_it.forward()) {
185  src_wd = src_it.data();
186  if (!src_wd->combination) {
187  new_wd = new WERD_RES(*src_wd);
188  new_wd->combination = FALSE;
189  new_wd->part_of_combo = FALSE;
190  new_it.add_after_then_move(new_wd);
191  }
192  }
193 }
194 
195 
196 namespace tesseract {
197 void Tesseract::match_current_words(WERD_RES_LIST &words, ROW *row,
198  BLOCK* block) {
199  WERD_RES_IT word_it(&words);
200  WERD_RES *word;
201  // Since we are not using PAGE_RES to iterate over words, we need to update
202  // prev_word_best_choice_ before calling classify_word_pass2().
204  for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
205  word = word_it.data();
206  if ((!word->part_of_combo) && (word->box_word == NULL)) {
208  block, row, word);
209  }
211  }
212 }
213 
214 
240 inT16 Tesseract::eval_word_spacing(WERD_RES_LIST &word_res_list) {
241  WERD_RES_IT word_res_it(&word_res_list);
242  inT16 total_score = 0;
243  inT16 word_count = 0;
244  inT16 done_word_count = 0;
245  inT16 word_len;
246  inT16 i;
247  inT16 offset;
248  WERD_RES *word; // current word
249  inT16 prev_word_score = 0;
250  BOOL8 prev_word_done = FALSE;
251  BOOL8 prev_char_1 = FALSE; // prev ch a "1/I/l"?
252  BOOL8 prev_char_digit = FALSE; // prev ch 2..9 or 0
253  BOOL8 current_char_1 = FALSE;
254  BOOL8 current_word_ok_so_far;
255  STRING punct_chars = "!\"`',.:;";
256  BOOL8 prev_char_punct = FALSE;
257  BOOL8 current_char_punct = FALSE;
258  BOOL8 word_done = FALSE;
259 
260  do {
261  word = word_res_it.data();
262  word_done = fixspace_thinks_word_done(word);
263  word_count++;
264  if (word->tess_failed) {
265  total_score += prev_word_score;
266  if (prev_word_done)
267  done_word_count++;
268  prev_word_score = 0;
269  prev_char_1 = FALSE;
270  prev_char_digit = FALSE;
271  prev_word_done = FALSE;
272  } else {
273  /*
274  Can we add the prev word score and potentially count this word?
275  Yes IF it didnt end in a 1 when the first char of this word is a digit
276  AND it didnt end in a digit when the first char of this word is a 1
277  */
278  word_len = word->reject_map.length();
279  current_word_ok_so_far = FALSE;
280  if (!((prev_char_1 && digit_or_numeric_punct(word, 0)) ||
281  (prev_char_digit && (
282  (word_done &&
283  word->best_choice->unichar_lengths().string()[0] == 1 &&
284  word->best_choice->unichar_string()[0] == '1') ||
285  (!word_done && STRING(conflict_set_I_l_1).contains(
286  word->best_choice->unichar_string()[0])))))) {
287  total_score += prev_word_score;
288  if (prev_word_done)
289  done_word_count++;
290  current_word_ok_so_far = word_done;
291  }
292 
293  if (current_word_ok_so_far) {
294  prev_word_done = TRUE;
295  prev_word_score = word_len;
296  } else {
297  prev_word_done = FALSE;
298  prev_word_score = 0;
299  }
300 
301  /* Add 1 to total score for every joined 1 regardless of context and
302  rejtn */
303  for (i = 0, prev_char_1 = FALSE; i < word_len; i++) {
304  current_char_1 = word->best_choice->unichar_string()[i] == '1';
305  if (prev_char_1 || (current_char_1 && (i > 0)))
306  total_score++;
307  prev_char_1 = current_char_1;
308  }
309 
310  /* Add 1 to total score for every joined punctuation regardless of context
311  and rejtn */
313  for (i = 0, offset = 0, prev_char_punct = FALSE; i < word_len;
314  offset += word->best_choice->unichar_lengths()[i++]) {
315  current_char_punct =
316  punct_chars.contains(word->best_choice->unichar_string()[offset]);
317  if (prev_char_punct || (current_char_punct && i > 0))
318  total_score++;
319  prev_char_punct = current_char_punct;
320  }
321  }
322  prev_char_digit = digit_or_numeric_punct(word, word_len - 1);
323  for (i = 0, offset = 0; i < word_len - 1;
324  offset += word->best_choice->unichar_lengths()[i++]);
325  prev_char_1 =
326  ((word_done && (word->best_choice->unichar_string()[offset] == '1'))
327  || (!word_done && STRING(conflict_set_I_l_1).contains(
328  word->best_choice->unichar_string()[offset])));
329  }
330  /* Find next word */
331  do {
332  word_res_it.forward();
333  } while (word_res_it.data()->part_of_combo);
334  } while (!word_res_it.at_first());
335  total_score += prev_word_score;
336  if (prev_word_done)
337  done_word_count++;
338  if (done_word_count == word_count)
339  return PERFECT_WERDS;
340  else
341  return total_score;
342 }
343 
345  int i;
346  int offset;
347 
348  for (i = 0, offset = 0; i < char_position;
349  offset += word->best_choice->unichar_lengths()[i++]);
350  return (
351  word->uch_set->get_isdigit(
352  word->best_choice->unichar_string().string() + offset,
353  word->best_choice->unichar_lengths()[i]) ||
354  (word->best_choice->permuter() == NUMBER_PERM &&
356  word->best_choice->unichar_string().string()[offset])));
357 }
358 
359 } // namespace tesseract
360 
361 
373 void transform_to_next_perm(WERD_RES_LIST &words) {
374  WERD_RES_IT word_it(&words);
375  WERD_RES_IT prev_word_it(&words);
376  WERD_RES *word;
377  WERD_RES *prev_word;
378  WERD_RES *combo;
379  WERD *copy_word;
380  inT16 prev_right = -MAX_INT16;
381  TBOX box;
382  inT16 gap;
383  inT16 min_gap = MAX_INT16;
384 
385  for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
386  word = word_it.data();
387  if (!word->part_of_combo) {
388  box = word->word->bounding_box();
389  if (prev_right > -MAX_INT16) {
390  gap = box.left() - prev_right;
391  if (gap < min_gap)
392  min_gap = gap;
393  }
394  prev_right = box.right();
395  }
396  }
397  if (min_gap < MAX_INT16) {
398  prev_right = -MAX_INT16; // back to start
399  word_it.set_to_list(&words);
400  // Note: we can't use cycle_pt due to inserted combos at start of list.
401  for (; (prev_right == -MAX_INT16) || !word_it.at_first();
402  word_it.forward()) {
403  word = word_it.data();
404  if (!word->part_of_combo) {
405  box = word->word->bounding_box();
406  if (prev_right > -MAX_INT16) {
407  gap = box.left() - prev_right;
408  if (gap <= min_gap) {
409  prev_word = prev_word_it.data();
410  if (prev_word->combination) {
411  combo = prev_word;
412  } else {
413  /* Make a new combination and insert before
414  * the first word being joined. */
415  copy_word = new WERD;
416  *copy_word = *(prev_word->word);
417  // deep copy
418  combo = new WERD_RES(copy_word);
419  combo->combination = TRUE;
420  combo->x_height = prev_word->x_height;
421  prev_word->part_of_combo = TRUE;
422  prev_word_it.add_before_then_move(combo);
423  }
424  combo->word->set_flag(W_EOL, word->word->flag(W_EOL));
425  if (word->combination) {
426  combo->word->join_on(word->word);
427  // Move blobs to combo
428  // old combo no longer needed
429  delete word_it.extract();
430  } else {
431  // Copy current wd to combo
432  combo->copy_on(word);
433  word->part_of_combo = TRUE;
434  }
435  combo->done = FALSE;
436  combo->ClearResults();
437  } else {
438  prev_word_it = word_it; // catch up
439  }
440  }
441  prev_right = box.right();
442  }
443  }
444  } else {
445  words.clear(); // signal termination
446  }
447 }
448 
449 namespace tesseract {
450 void Tesseract::dump_words(WERD_RES_LIST &perm, inT16 score,
451  inT16 mode, BOOL8 improved) {
452  WERD_RES_IT word_res_it(&perm);
453 
454  if (debug_fix_space_level > 0) {
455  if (mode == 1) {
456  stats_.dump_words_str = "";
457  for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
458  word_res_it.forward()) {
459  if (!word_res_it.data()->part_of_combo) {
460  stats_.dump_words_str +=
461  word_res_it.data()->best_choice->unichar_string();
462  stats_.dump_words_str += ' ';
463  }
464  }
465  }
466 
467  #ifndef SECURE_NAMES
468  if (debug_fix_space_level > 1) {
469  switch (mode) {
470  case 1:
471  tprintf("EXTRACTED (%d): \"", score);
472  break;
473  case 2:
474  tprintf("TESTED (%d): \"", score);
475  break;
476  case 3:
477  tprintf("RETURNED (%d): \"", score);
478  break;
479  }
480 
481  for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
482  word_res_it.forward()) {
483  if (!word_res_it.data()->part_of_combo) {
484  tprintf("%s/%1d ",
485  word_res_it.data()->best_choice->unichar_string().string(),
486  (int)word_res_it.data()->best_choice->permuter());
487  }
488  }
489  tprintf("\"\n");
490  } else if (improved) {
491  tprintf("FIX SPACING \"%s\" => \"", stats_.dump_words_str.string());
492  for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
493  word_res_it.forward()) {
494  if (!word_res_it.data()->part_of_combo) {
495  tprintf("%s/%1d ",
496  word_res_it.data()->best_choice->unichar_string().string(),
497  (int)word_res_it.data()->best_choice->permuter());
498  }
499  }
500  tprintf("\"\n");
501  }
502  #endif
503  }
504 }
505 
506 
516  TBOX box;
517  inT16 prev_right = -MAX_INT16;
518  inT16 gap;
519  inT16 max_gap = -MAX_INT16;
520  inT16 max_gap_count = 0;
521  STATS gap_stats(0, MAXSPACING);
522  BOOL8 result;
523  const ROW *row = word->denorm.row();
524  float max_non_space;
525  float normalised_max_nonspace;
526  inT16 i = 0;
527  inT16 offset = 0;
528  STRING punct_chars = "\"`',.:;";
529 
530  for (TBLOB* blob = word->rebuild_word->blobs; blob != NULL;
531  blob = blob->next) {
532  box = blob->bounding_box();
533  if ((prev_right > -MAX_INT16) &&
534  (!punct_chars.contains(
535  word->best_choice->unichar_string()
536  [offset - word->best_choice->unichar_lengths()[i - 1]]) &&
537  !punct_chars.contains(
538  word->best_choice->unichar_string()[offset]))) {
539  gap = box.left() - prev_right;
540  if (gap < max_gap) {
541  gap_stats.add(gap, 1);
542  } else if (gap == max_gap) {
543  max_gap_count++;
544  } else {
545  if (max_gap_count > 0)
546  gap_stats.add(max_gap, max_gap_count);
547  max_gap = gap;
548  max_gap_count = 1;
549  }
550  }
551  prev_right = box.right();
552  offset += word->best_choice->unichar_lengths()[i++];
553  }
554 
555  max_non_space = (row->space() + 3 * row->kern()) / 4;
556  normalised_max_nonspace = max_non_space * kBlnXHeight / row->x_height();
557 
558  result = (
559  gap_stats.get_total() == 0 ||
560  max_gap <= normalised_max_nonspace ||
561  (gap_stats.get_total() > 2 && max_gap <= 2 * gap_stats.median()) ||
562  (gap_stats.get_total() <= 2 && max_gap <= 2 * gap_stats.mean()));
563  #ifndef SECURE_NAMES
564  if ((debug_fix_space_level > 1)) {
565  if (result) {
566  tprintf(
567  "ACCEPT SPACING FOR: \"%s\" norm_maxnon = %f max=%d maxcount=%d "
568  "total=%d mean=%f median=%f\n",
569  word->best_choice->unichar_string().string(), normalised_max_nonspace,
570  max_gap, max_gap_count, gap_stats.get_total(), gap_stats.mean(),
571  gap_stats.median());
572  } else {
573  tprintf(
574  "REJECT SPACING FOR: \"%s\" norm_maxnon = %f max=%d maxcount=%d "
575  "total=%d mean=%f median=%f\n",
576  word->best_choice->unichar_string().string(), normalised_max_nonspace,
577  max_gap, max_gap_count, gap_stats.get_total(), gap_stats.mean(),
578  gap_stats.median());
579  }
580  }
581  #endif
582 
583  return result;
584 }
585 
587  if (word->done)
588  return TRUE;
589 
590  /*
591  Use all the standard pass 2 conditions for mode 5 in set_done() in
592  reject.c BUT DONT REJECT IF THE WERD IS AMBIGUOUS - FOR SPACING WE DONT
593  CARE WHETHER WE HAVE of/at on/an etc.
594  */
595  if (fixsp_done_mode > 0 &&
596  (word->tess_accepted ||
597  (fixsp_done_mode == 2 && word->reject_map.reject_count() == 0) ||
598  fixsp_done_mode == 3) &&
599  (strchr(word->best_choice->unichar_string().string(), ' ') == NULL) &&
600  ((word->best_choice->permuter() == SYSTEM_DAWG_PERM) ||
601  (word->best_choice->permuter() == FREQ_DAWG_PERM) ||
602  (word->best_choice->permuter() == USER_DAWG_PERM) ||
603  (word->best_choice->permuter() == NUMBER_PERM))) {
604  return TRUE;
605  } else {
606  return FALSE;
607  }
608 }
609 
610 
618 void Tesseract::fix_sp_fp_word(WERD_RES_IT &word_res_it, ROW *row,
619  BLOCK* block) {
620  WERD_RES *word_res;
621  WERD_RES_LIST sub_word_list;
622  WERD_RES_IT sub_word_list_it(&sub_word_list);
623  inT16 blob_index;
624  inT16 new_length;
625  float junk;
626 
627  word_res = word_res_it.data();
628  if (word_res->word->flag(W_REP_CHAR) ||
629  word_res->combination ||
630  word_res->part_of_combo ||
631  !word_res->word->flag(W_DONT_CHOP))
632  return;
633 
634  blob_index = worst_noise_blob(word_res, &junk);
635  if (blob_index < 0)
636  return;
637 
638  if (debug_fix_space_level > 1) {
639  tprintf("FP fixspace working on \"%s\"\n",
640  word_res->best_choice->unichar_string().string());
641  }
642  word_res->word->rej_cblob_list()->sort(c_blob_comparator);
643  sub_word_list_it.add_after_stay_put(word_res_it.extract());
644  fix_noisy_space_list(sub_word_list, row, block);
645  new_length = sub_word_list.length();
646  word_res_it.add_list_before(&sub_word_list);
647  for (; !word_res_it.at_last() && new_length > 1; new_length--) {
648  word_res_it.forward();
649  }
650 }
651 
652 void Tesseract::fix_noisy_space_list(WERD_RES_LIST &best_perm, ROW *row,
653  BLOCK* block) {
654  inT16 best_score;
655  WERD_RES_IT best_perm_it(&best_perm);
656  WERD_RES_LIST current_perm;
657  WERD_RES_IT current_perm_it(&current_perm);
658  WERD_RES *old_word_res;
659  WERD_RES *new_word_res;
660  inT16 current_score;
661  BOOL8 improved = FALSE;
662 
663  best_score = fp_eval_word_spacing(best_perm); // default score
664 
665  dump_words(best_perm, best_score, 1, improved);
666 
667  new_word_res = new WERD_RES;
668  old_word_res = best_perm_it.data();
669  old_word_res->combination = TRUE; // Kludge to force deep copy
670  *new_word_res = *old_word_res; // deep copy
671  old_word_res->combination = FALSE; // Undo kludge
672  current_perm_it.add_to_end(new_word_res);
673 
674  break_noisiest_blob_word(current_perm);
675 
676  while (best_score != PERFECT_WERDS && !current_perm.empty()) {
677  match_current_words(current_perm, row, block);
678  current_score = fp_eval_word_spacing(current_perm);
679  dump_words(current_perm, current_score, 2, improved);
680  if (current_score > best_score) {
681  best_perm.clear();
682  best_perm.deep_copy(&current_perm, &WERD_RES::deep_copy);
683  best_score = current_score;
684  improved = TRUE;
685  }
686  if (current_score < PERFECT_WERDS) {
687  break_noisiest_blob_word(current_perm);
688  }
689  }
690  dump_words(best_perm, best_score, 3, improved);
691 }
692 
693 
699 void Tesseract::break_noisiest_blob_word(WERD_RES_LIST &words) {
700  WERD_RES_IT word_it(&words);
701  WERD_RES_IT worst_word_it;
702  float worst_noise_score = 9999;
703  int worst_blob_index = -1; // Noisiest blob of noisiest wd
704  int blob_index; // of wds noisiest blob
705  float noise_score; // of wds noisiest blob
706  WERD_RES *word_res;
707  C_BLOB_IT blob_it;
708  C_BLOB_IT rej_cblob_it;
709  C_BLOB_LIST new_blob_list;
710  C_BLOB_IT new_blob_it;
711  C_BLOB_IT new_rej_cblob_it;
712  WERD *new_word;
713  inT16 start_of_noise_blob;
714  inT16 i;
715 
716  for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
717  blob_index = worst_noise_blob(word_it.data(), &noise_score);
718  if (blob_index > -1 && worst_noise_score > noise_score) {
719  worst_noise_score = noise_score;
720  worst_blob_index = blob_index;
721  worst_word_it = word_it;
722  }
723  }
724  if (worst_blob_index < 0) {
725  words.clear(); // signal termination
726  return;
727  }
728 
729  /* Now split the worst_word_it */
730 
731  word_res = worst_word_it.data();
732 
733  /* Move blobs before noise blob to a new bloblist */
734 
735  new_blob_it.set_to_list(&new_blob_list);
736  blob_it.set_to_list(word_res->word->cblob_list());
737  for (i = 0; i < worst_blob_index; i++, blob_it.forward()) {
738  new_blob_it.add_after_then_move(blob_it.extract());
739  }
740  start_of_noise_blob = blob_it.data()->bounding_box().left();
741  delete blob_it.extract(); // throw out noise blob
742 
743  new_word = new WERD(&new_blob_list, word_res->word);
744  new_word->set_flag(W_EOL, FALSE);
745  word_res->word->set_flag(W_BOL, FALSE);
746  word_res->word->set_blanks(1); // After break
747 
748  new_rej_cblob_it.set_to_list(new_word->rej_cblob_list());
749  rej_cblob_it.set_to_list(word_res->word->rej_cblob_list());
750  for (;
751  (!rej_cblob_it.empty() &&
752  (rej_cblob_it.data()->bounding_box().left() < start_of_noise_blob));
753  rej_cblob_it.forward()) {
754  new_rej_cblob_it.add_after_then_move(rej_cblob_it.extract());
755  }
756 
757  WERD_RES* new_word_res = new WERD_RES(new_word);
758  new_word_res->combination = TRUE;
759  worst_word_it.add_before_then_move(new_word_res);
760 
761  word_res->ClearResults();
762 }
763 
765  float *worst_noise_score) {
766  float noise_score[512];
767  int i;
768  int min_noise_blob; // 1st contender
769  int max_noise_blob; // last contender
770  int non_noise_count;
771  int worst_noise_blob; // Worst blob
772  float small_limit = kBlnXHeight * fixsp_small_outlines_size;
773  float non_noise_limit = kBlnXHeight * 0.8;
774 
775  if (word_res->rebuild_word == NULL)
776  return -1; // Can't handle cube words.
777 
778  TBLOB* blob = word_res->rebuild_word->blobs;
779  // Normalised.
780  int blob_count = word_res->box_word->length();
781  ASSERT_HOST(blob_count <= 512);
782  if (blob_count < 5)
783  return -1; // too short to split
784 
785  /* Get the noise scores for all blobs */
786 
787  #ifndef SECURE_NAMES
788  if (debug_fix_space_level > 5)
789  tprintf("FP fixspace Noise metrics for \"%s\": ",
790  word_res->best_choice->unichar_string().string());
791  #endif
792 
793  for (i = 0; i < blob_count && blob != NULL; i++, blob = blob->next) {
794  if (word_res->reject_map[i].accepted())
795  noise_score[i] = non_noise_limit;
796  else
797  noise_score[i] = blob_noise_score(blob);
798 
799  if (debug_fix_space_level > 5)
800  tprintf("%1.1f ", noise_score[i]);
801  }
802  if (debug_fix_space_level > 5)
803  tprintf("\n");
804 
805  /* Now find the worst one which is far enough away from the end of the word */
806 
807  non_noise_count = 0;
808  for (i = 0; i < blob_count && non_noise_count < fixsp_non_noise_limit; i++) {
809  if (noise_score[i] >= non_noise_limit) {
810  non_noise_count++;
811  }
812  }
813  if (non_noise_count < fixsp_non_noise_limit)
814  return -1;
815 
816  min_noise_blob = i;
817 
818  non_noise_count = 0;
819  for (i = blob_count - 1; i >= 0 && non_noise_count < fixsp_non_noise_limit;
820  i--) {
821  if (noise_score[i] >= non_noise_limit) {
822  non_noise_count++;
823  }
824  }
825  if (non_noise_count < fixsp_non_noise_limit)
826  return -1;
827 
828  max_noise_blob = i;
829 
830  if (min_noise_blob > max_noise_blob)
831  return -1;
832 
833  *worst_noise_score = small_limit;
834  worst_noise_blob = -1;
835  for (i = min_noise_blob; i <= max_noise_blob; i++) {
836  if (noise_score[i] < *worst_noise_score) {
837  worst_noise_blob = i;
838  *worst_noise_score = noise_score[i];
839  }
840  }
841  return worst_noise_blob;
842 }
843 
845  TBOX box; // BB of outline
846  inT16 outline_count = 0;
847  inT16 max_dimension;
848  inT16 largest_outline_dimension = 0;
849 
850  for (TESSLINE* ol = blob->outlines; ol != NULL; ol= ol->next) {
851  outline_count++;
852  box = ol->bounding_box();
853  if (box.height() > box.width()) {
854  max_dimension = box.height();
855  } else {
856  max_dimension = box.width();
857  }
858 
859  if (largest_outline_dimension < max_dimension)
860  largest_outline_dimension = max_dimension;
861  }
862 
863  if (outline_count > 5) {
864  // penalise LOTS of blobs
865  largest_outline_dimension *= 2;
866  }
867 
868  box = blob->bounding_box();
869  if (box.bottom() > kBlnBaselineOffset * 4 ||
870  box.top() < kBlnBaselineOffset / 2) {
871  // Lax blob is if high or low
872  largest_outline_dimension /= 2;
873  }
874 
875  return largest_outline_dimension;
876 }
877 } // namespace tesseract
878 
879 void fixspace_dbg(WERD_RES *word) {
880  TBOX box = word->word->bounding_box();
881  BOOL8 show_map_detail = FALSE;
882  inT16 i;
883 
884  box.print();
885  tprintf(" \"%s\" ", word->best_choice->unichar_string().string());
886  tprintf("Blob count: %d (word); %d/%d (rebuild word)\n",
887  word->word->cblob_list()->length(),
888  word->rebuild_word->NumBlobs(),
889  word->box_word->length());
890  word->reject_map.print(debug_fp);
891  tprintf("\n");
892  if (show_map_detail) {
893  tprintf("\"%s\"\n", word->best_choice->unichar_string().string());
894  for (i = 0; word->best_choice->unichar_string()[i] != '\0'; i++) {
895  tprintf("**** \"%c\" ****\n", word->best_choice->unichar_string()[i]);
896  word->reject_map[i].full_print(debug_fp);
897  }
898  }
899 
900  tprintf("Tess Accepted: %s\n", word->tess_accepted ? "TRUE" : "FALSE");
901  tprintf("Done flag: %s\n\n", word->done ? "TRUE" : "FALSE");
902 }
903 
904 
913 namespace tesseract {
914 inT16 Tesseract::fp_eval_word_spacing(WERD_RES_LIST &word_res_list) {
915  WERD_RES_IT word_it(&word_res_list);
916  WERD_RES *word;
917  inT16 word_length;
918  inT16 score = 0;
919  inT16 i;
920  float small_limit = kBlnXHeight * fixsp_small_outlines_size;
921 
922  for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
923  word = word_it.data();
924  if (word->rebuild_word == NULL)
925  continue; // Can't handle cube words.
926  word_length = word->reject_map.length();
927  if (word->done ||
928  word->tess_accepted ||
929  word->best_choice->permuter() == SYSTEM_DAWG_PERM ||
930  word->best_choice->permuter() == FREQ_DAWG_PERM ||
931  word->best_choice->permuter() == USER_DAWG_PERM ||
932  safe_dict_word(word) > 0) {
933  TBLOB* blob = word->rebuild_word->blobs;
934  UNICHAR_ID space = word->uch_set->unichar_to_id(" ");
935  for (i = 0; i < word->best_choice->length() && blob != NULL;
936  ++i, blob = blob->next) {
937  if (word->best_choice->unichar_id(i) == space ||
938  blob_noise_score(blob) < small_limit) {
939  score -= 1; // penalise possibly erroneous non-space
940  } else if (word->reject_map[i].accepted()) {
941  score++;
942  }
943  }
944  }
945  }
946  if (score < 0)
947  score = 0;
948  return score;
949 }
950 
951 } // namespace tesseract