tesseract 4.1.1
Loading...
Searching...
No Matches
weightmatrix.cpp
Go to the documentation of this file.
1
2// File: weightmatrix.cpp
3// Description: Hides distinction between float/int implementations.
4// Author: Ray Smith
5//
6// (C) Copyright 2014, Google Inc.
7// Licensed under the Apache License, Version 2.0 (the "License");
8// you may not use this file except in compliance with the License.
9// You may obtain a copy of the License at
10// http://www.apache.org/licenses/LICENSE-2.0
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
17
18#include "weightmatrix.h"
19
20#include <cassert> // for assert
21#include "intsimdmatrix.h"
22#include "simddetect.h" // for DotProduct
23#include "statistc.h"
24#include "tprintf.h"
25
26namespace tesseract {
27
28#if defined(ANDROID)
29static inline double log2(double n) {
30 return log(n) / log(2.0);
31}
32#endif // ANDROID
33
34// Number of iterations after which the correction effectively becomes unity.
35const int kAdamCorrectionIterations = 200000;
36// Epsilon in Adam to prevent division by zero.
37const double kAdamEpsilon = 1e-8;
38
39// Computes matrix.vector v = Wu.
40// u is of size W.dim2() - add_bias_fwd and the output v is of size
41// W.dim1() - skip_bias_back.
42// If add_bias_fwd, u is imagined to have an extra element at the end with value
43// 1, to implement the bias, weight.
44// If skip_bias_back, we are actullay performing the backwards product on a
45// transposed matrix, so we need to drop the v output corresponding to the last
46// element in dim1.
47static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY<double>& w,
48 bool add_bias_fwd,
49 bool skip_bias_back, const double* u,
50 double* v) {
51 int num_results = w.dim1() - skip_bias_back;
52 int extent = w.dim2() - add_bias_fwd;
53 for (int i = 0; i < num_results; ++i) {
54 const double* wi = w[i];
55 double total = DotProduct(wi, u, extent);
56 if (add_bias_fwd) total += wi[extent]; // The bias value.
57 v[i] = total;
58 }
59}
60
61// Copies the whole input transposed, converted to double, into *this.
63 int width = input.dim1();
64 int num_features = input.dim2();
65 ResizeNoInit(num_features, width);
66 for (int t = 0; t < width; ++t) WriteStrided(t, input[t]);
67}
68
69// Destructor.
70// It is defined here, so the compiler can create a single vtable
71// instead of weak vtables in every compilation unit.
73
74// Sets up the network for training. Initializes weights using weights of
75// scale `range` picked according to the random number generator `randomizer`.
76int WeightMatrix::InitWeightsFloat(int no, int ni, bool use_adam,
77 float weight_range, TRand* randomizer) {
78 int_mode_ = false;
79 wf_.Resize(no, ni, 0.0);
80 if (randomizer != nullptr) {
81 for (int i = 0; i < no; ++i) {
82 for (int j = 0; j < ni; ++j) {
83 wf_[i][j] = randomizer->SignedRand(weight_range);
84 }
85 }
86 }
87 use_adam_ = use_adam;
89 return ni * no;
90}
91
92// Changes the number of outputs to the size of the given code_map, copying
93// the old weight matrix entries for each output from code_map[output] where
94// non-negative, and uses the mean (over all outputs) of the existing weights
95// for all outputs with negative code_map entries. Returns the new number of
96// weights.
97int WeightMatrix::RemapOutputs(const std::vector<int>& code_map) {
98 GENERIC_2D_ARRAY<double> old_wf(wf_);
99 int old_no = wf_.dim1();
100 int new_no = code_map.size();
101 int ni = wf_.dim2();
102 std::vector<double> means(ni, 0.0);
103 for (int c = 0; c < old_no; ++c) {
104 const double* weights = wf_[c];
105 for (int i = 0; i < ni; ++i) means[i] += weights[i];
106 }
107 for (double& mean : means) mean /= old_no;
108 wf_.ResizeNoInit(new_no, ni);
109 InitBackward();
110 for (int dest = 0; dest < new_no; ++dest) {
111 int src = code_map[dest];
112 const double* src_data = src >= 0 ? old_wf[src] : means.data();
113 memcpy(wf_[dest], src_data, ni * sizeof(*src_data));
114 }
115 return ni * new_no;
116}
117
118// Converts a float network to an int network. Each set of input weights that
119// corresponds to a single output weight is converted independently:
120// Compute the max absolute value of the weight set.
121// Scale so the max absolute value becomes INT8_MAX.
122// Round to integer.
123// Store a multiplicative scale factor (as a double) that will reproduce
124// the original value, subject to rounding errors.
126 wi_.ResizeNoInit(wf_.dim1(), wf_.dim2());
127 scales_.init_to_size(wi_.dim1(), 0.0);
128 int dim2 = wi_.dim2();
129 for (int t = 0; t < wi_.dim1(); ++t) {
130 double* f_line = wf_[t];
131 int8_t* i_line = wi_[t];
132 double max_abs = 0.0;
133 for (int f = 0; f < dim2; ++f) {
134 double abs_val = fabs(f_line[f]);
135 if (abs_val > max_abs) max_abs = abs_val;
136 }
137 double scale = max_abs / INT8_MAX;
138 scales_[t] = scale;
139 if (scale == 0.0) scale = 1.0;
140 for (int f = 0; f < dim2; ++f) {
141 i_line[f] = IntCastRounded(f_line[f] / scale);
142 }
143 }
144 wf_.Resize(1, 1, 0.0);
145 int_mode_ = true;
147 IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_);
148 }
149}
150
151// Allocates any needed memory for running Backward, and zeroes the deltas,
152// thus eliminating any existing momentum.
154 int no = int_mode_ ? wi_.dim1() : wf_.dim1();
155 int ni = int_mode_ ? wi_.dim2() : wf_.dim2();
156 dw_.Resize(no, ni, 0.0);
157 updates_.Resize(no, ni, 0.0);
158 wf_t_.Transpose(wf_);
159 if (use_adam_) dw_sq_sum_.Resize(no, ni, 0.0);
160}
161
162// Flag on mode to indicate that this weightmatrix uses int8_t.
163const int kInt8Flag = 1;
164// Flag on mode to indicate that this weightmatrix uses adam.
165const int kAdamFlag = 4;
166// Flag on mode to indicate that this weightmatrix uses double. Set
167// independently of kInt8Flag as even in int mode the scales can
168// be float or double.
169const int kDoubleFlag = 128;
170
171// Writes to the given file. Returns false in case of error.
172bool WeightMatrix::Serialize(bool training, TFile* fp) const {
173 // For backward compatibility, add kDoubleFlag to mode to indicate the doubles
174 // format, without errs, so we can detect and read old format weight matrices.
175 uint8_t mode =
176 (int_mode_ ? kInt8Flag : 0) | (use_adam_ ? kAdamFlag : 0) | kDoubleFlag;
177 if (!fp->Serialize(&mode)) return false;
178 if (int_mode_) {
179 if (!wi_.Serialize(fp)) return false;
180 if (!scales_.Serialize(fp)) return false;
181 } else {
182 if (!wf_.Serialize(fp)) return false;
183 if (training && !updates_.Serialize(fp)) return false;
184 if (training && use_adam_ && !dw_sq_sum_.Serialize(fp)) return false;
185 }
186 return true;
187}
188
189// Reads from the given file. Returns false in case of error.
190
191bool WeightMatrix::DeSerialize(bool training, TFile* fp) {
192 uint8_t mode;
193 if (!fp->DeSerialize(&mode)) return false;
194 int_mode_ = (mode & kInt8Flag) != 0;
195 use_adam_ = (mode & kAdamFlag) != 0;
196 if ((mode & kDoubleFlag) == 0) return DeSerializeOld(training, fp);
197 if (int_mode_) {
198 if (!wi_.DeSerialize(fp)) return false;
199 if (!scales_.DeSerialize(fp)) return false;
201 IntSimdMatrix::intSimdMatrix->Init(wi_, shaped_w_);
202 }
203 } else {
204 if (!wf_.DeSerialize(fp)) return false;
205 if (training) {
206 InitBackward();
207 if (!updates_.DeSerialize(fp)) return false;
208 if (use_adam_ && !dw_sq_sum_.DeSerialize(fp)) return false;
209 }
210 }
211 return true;
212}
213
214// As DeSerialize, but reads an old (float) format WeightMatrix for
215// backward compatibility.
216bool WeightMatrix::DeSerializeOld(bool training, TFile* fp) {
217 GENERIC_2D_ARRAY<float> float_array;
218 if (int_mode_) {
219 if (!wi_.DeSerialize(fp)) return false;
220 GenericVector<float> old_scales;
221 if (!old_scales.DeSerialize(fp)) return false;
222 scales_.resize_no_init(old_scales.size());
223 for (int i = 0; i < old_scales.size(); ++i) scales_[i] = old_scales[i];
224 } else {
225 if (!float_array.DeSerialize(fp)) return false;
226 FloatToDouble(float_array, &wf_);
227 }
228 if (training) {
229 InitBackward();
230 if (!float_array.DeSerialize(fp)) return false;
231 FloatToDouble(float_array, &updates_);
232 // Errs was only used in int training, which is now dead.
233 if (!float_array.DeSerialize(fp)) return false;
234 }
235 return true;
236}
237
238// Computes matrix.vector v = Wu.
239// u is of size W.dim2() - 1 and the output v is of size W.dim1().
240// u is imagined to have an extra element at the end with value 1, to
241// implement the bias, but it doesn't actually have it.
242// Asserts that the call matches what we have.
243void WeightMatrix::MatrixDotVector(const double* u, double* v) const {
244 assert(!int_mode_);
245 MatrixDotVectorInternal(wf_, true, false, u, v);
246}
247
248void WeightMatrix::MatrixDotVector(const int8_t* u, double* v) const {
249 assert(int_mode_);
252 wi_.dim1(), wi_.dim2(), &shaped_w_[0], &scales_[0], u, v);
253 } else {
254 IntSimdMatrix::MatrixDotVector(wi_, scales_, u, v);
255 }
256}
257
258// MatrixDotVector for peep weights, MultiplyAccumulate adds the
259// component-wise products of *this[0] and v to inout.
260void WeightMatrix::MultiplyAccumulate(const double* v, double* inout) {
261 assert(!int_mode_);
262 assert(wf_.dim1() == 1);
263 int n = wf_.dim2();
264 const double* u = wf_[0];
265 for (int i = 0; i < n; ++i) {
266 inout[i] += u[i] * v[i];
267 }
268}
269
270// Computes vector.matrix v = uW.
271// u is of size W.dim1() and the output v is of size W.dim2() - 1.
272// The last result is discarded, as v is assumed to have an imaginary
273// last value of 1, as with MatrixDotVector.
274void WeightMatrix::VectorDotMatrix(const double* u, double* v) const {
275 assert(!int_mode_);
276 MatrixDotVectorInternal(wf_t_, false, true, u, v);
277}
278
279// Fills dw_[i][j] with the dot product u[i][] . v[j][], using elements from
280// u and v. In terms of the neural network, u is the gradients and v is the
281// inputs.
282// Note that (matching MatrixDotVector) v[last][] is missing, presumed 1.0.
283// Runs parallel if requested. Note that u and v must be transposed.
285 const TransposedArray& v,
286 bool in_parallel) {
287 assert(!int_mode_);
288 int num_outputs = dw_.dim1();
289 assert(u.dim1() == num_outputs);
290 assert(u.dim2() == v.dim2());
291 int num_inputs = dw_.dim2() - 1;
292 int num_samples = u.dim2();
293 // v is missing the last element in dim1.
294 assert(v.dim1() == num_inputs);
295#ifdef _OPENMP
296#pragma omp parallel for num_threads(4) if (in_parallel)
297#endif
298 for (int i = 0; i < num_outputs; ++i) {
299 double* dwi = dw_[i];
300 const double* ui = u[i];
301 for (int j = 0; j < num_inputs; ++j) {
302 dwi[j] = DotProduct(ui, v[j], num_samples);
303 }
304 // The last element of v is missing, presumed 1.0f.
305 double total = 0.0;
306 for (int k = 0; k < num_samples; ++k) total += ui[k];
307 dwi[num_inputs] = total;
308 }
309}
310
311// Updates the weights using the given learning rate and momentum.
312// num_samples is the quotient to be used in the adam computation iff
313// use_adam_ is true.
314void WeightMatrix::Update(double learning_rate, double momentum,
315 double adam_beta, int num_samples) {
316 assert(!int_mode_);
317 if (use_adam_ && num_samples > 0 && num_samples < kAdamCorrectionIterations) {
318 learning_rate *= sqrt(1.0 - pow(adam_beta, num_samples));
319 learning_rate /= 1.0 - pow(momentum, num_samples);
320 }
321 if (use_adam_ && num_samples > 0 && momentum > 0.0) {
322 dw_sq_sum_.SumSquares(dw_, adam_beta);
323 dw_ *= learning_rate * (1.0 - momentum);
324 updates_ *= momentum;
325 updates_ += dw_;
326 wf_.AdamUpdate(updates_, dw_sq_sum_, learning_rate * kAdamEpsilon);
327 } else {
328 dw_ *= learning_rate;
329 updates_ += dw_;
330 if (momentum > 0.0) wf_ += updates_;
331 if (momentum >= 0.0) updates_ *= momentum;
332 }
333 wf_t_.Transpose(wf_);
334}
335
336// Adds the dw_ in other to the dw_ is *this.
338 assert(dw_.dim1() == other.dw_.dim1());
339 assert(dw_.dim2() == other.dw_.dim2());
340 dw_ += other.dw_;
341}
342
343// Sums the products of weight updates in *this and other, splitting into
344// positive (same direction) in *same and negative (different direction) in
345// *changed.
346void WeightMatrix::CountAlternators(const WeightMatrix& other, double* same,
347 double* changed) const {
348 int num_outputs = updates_.dim1();
349 int num_inputs = updates_.dim2();
350 assert(num_outputs == other.updates_.dim1());
351 assert(num_inputs == other.updates_.dim2());
352 for (int i = 0; i < num_outputs; ++i) {
353 const double* this_i = updates_[i];
354 const double* other_i = other.updates_[i];
355 for (int j = 0; j < num_inputs; ++j) {
356 double product = this_i[j] * other_i[j];
357 if (product < 0.0)
358 *changed -= product;
359 else
360 *same += product;
361 }
362 }
363}
364
365// Helper computes an integer histogram bucket for a weight and adds it
366// to the histogram.
367const int kHistogramBuckets = 16;
368static void HistogramWeight(double weight, STATS* histogram) {
369 int bucket = kHistogramBuckets - 1;
370 if (weight != 0.0) {
371 double logval = -log2(fabs(weight));
372 bucket = ClipToRange(IntCastRounded(logval), 0, kHistogramBuckets - 1);
373 }
374 histogram->add(bucket, 1);
375}
376
377void WeightMatrix::Debug2D(const char* msg) {
378 STATS histogram(0, kHistogramBuckets);
379 if (int_mode_) {
380 for (int i = 0; i < wi_.dim1(); ++i) {
381 for (int j = 0; j < wi_.dim2(); ++j) {
382 HistogramWeight(wi_[i][j] * scales_[i], &histogram);
383 }
384 }
385 } else {
386 for (int i = 0; i < wf_.dim1(); ++i) {
387 for (int j = 0; j < wf_.dim2(); ++j) {
388 HistogramWeight(wf_[i][j], &histogram);
389 }
390 }
391 }
392 tprintf("%s\n", msg);
393 histogram.print();
394}
395
396// Utility function converts an array of float to the corresponding array
397// of double.
398/* static */
401 int dim1 = wf.dim1();
402 int dim2 = wf.dim2();
403 wd->ResizeNoInit(dim1, dim2);
404 for (int i = 0; i < dim1; ++i) {
405 const float* wfi = wf[i];
406 double* wdi = (*wd)[i];
407 for (int j = 0; j < dim2; ++j) wdi[j] = static_cast<double>(wfi[j]);
408 }
409}
410
411} // namespace tesseract.
int IntCastRounded(double x)
Definition: helpers.h:175
T ClipToRange(const T &x, const T &lower_bound, const T &upper_bound)
Definition: helpers.h:108
DLLSYM void tprintf(const char *format,...)
Definition: tprintf.cpp:35
const int kInt8Flag
const int kDoubleFlag
DotProductFunction DotProduct
Definition: simddetect.cpp:49
const int kAdamFlag
const int kHistogramBuckets
const double kAdamEpsilon
const int kAdamCorrectionIterations
void init_to_size(int size, const T &t)
void resize_no_init(int size)
Definition: genericvector.h:66
bool Serialize(FILE *fp) const
int size() const
Definition: genericvector.h:72
bool DeSerialize(bool swap, FILE *fp)
void SumSquares(const GENERIC_2D_ARRAY< T > &src, const T &decay_factor)
Definition: matrix.h:371
bool Serialize(FILE *fp) const
Definition: matrix.h:147
int dim2() const
Definition: matrix.h:210
bool DeSerialize(bool swap, FILE *fp)
Definition: matrix.h:164
void AdamUpdate(const GENERIC_2D_ARRAY< T > &sum, const GENERIC_2D_ARRAY< T > &sqsum, const T &epsilon)
Definition: matrix.h:382
void Resize(int size1, int size2, const T &empty)
Definition: matrix.h:108
int dim1() const
Definition: matrix.h:209
void ResizeNoInit(int size1, int size2, int pad=0)
Definition: matrix.h:94
static void MatrixDotVector(const GENERIC_2D_ARRAY< int8_t > &w, const GenericVector< double > &scales, const int8_t *u, double *v)
MatrixDotVectorFunction matrixDotVectorFunction
static const IntSimdMatrix * intSimdMatrix
void Init(const GENERIC_2D_ARRAY< int8_t > &w, std::vector< int8_t > &shaped_w) const
Definition: statistc.h:31
void add(int32_t value, int32_t count)
Definition: statistc.cpp:93
void print() const
Definition: statistc.cpp:526
double SignedRand(double range)
Definition: helpers.h:55
bool Serialize(const char *data, size_t count=1)
Definition: serialis.cpp:148
bool DeSerialize(char *data, size_t count=1)
Definition: serialis.cpp:104
void WriteStrided(int t, const float *data)
Definition: weightmatrix.h:39
void Transpose(const GENERIC_2D_ARRAY< double > &input)
void SumOuterTransposed(const TransposedArray &u, const TransposedArray &v, bool parallel)
bool Serialize(bool training, TFile *fp) const
bool DeSerializeOld(bool training, TFile *fp)
void Update(double learning_rate, double momentum, double adam_beta, int num_samples)
int InitWeightsFloat(int no, int ni, bool use_adam, float weight_range, TRand *randomizer)
void Debug2D(const char *msg)
static void FloatToDouble(const GENERIC_2D_ARRAY< float > &wf, GENERIC_2D_ARRAY< double > *wd)
void CountAlternators(const WeightMatrix &other, double *same, double *changed) const
void AddDeltas(const WeightMatrix &other)
int RemapOutputs(const std::vector< int > &code_map)
void MultiplyAccumulate(const double *v, double *inout)
void MatrixDotVector(const double *u, double *v) const
bool DeSerialize(bool training, TFile *fp)
void VectorDotMatrix(const double *u, double *v) const