xref: /aosp_15_r20/external/harfbuzz_ng/src/hb-ot-var-common.hh (revision 2d1272b857b1f7575e6e246373e1cb218663db8a)
1 /*
2  * Copyright © 2021  Google, Inc.
3  *
4  *  This is part of HarfBuzz, a text shaping library.
5  *
6  * Permission is hereby granted, without written agreement and without
7  * license or royalty fees, to use, copy, modify, and distribute this
8  * software and its documentation for any purpose, provided that the
9  * above copyright notice and the following two paragraphs appear in
10  * all copies of this software.
11  *
12  * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13  * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15  * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
16  * DAMAGE.
17  *
18  * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20  * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
21  * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22  * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
23  *
24  */
25 
26 #ifndef HB_OT_VAR_COMMON_HH
27 #define HB_OT_VAR_COMMON_HH
28 
29 #include "hb-ot-layout-common.hh"
30 #include "hb-priority-queue.hh"
31 #include "hb-subset-instancer-iup.hh"
32 
33 
34 namespace OT {
35 
36 
37 /* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
38 struct TupleVariationHeader
39 {
40   friend struct tuple_delta_t;
get_sizeOT::TupleVariationHeader41   unsigned get_size (unsigned axis_count) const
42   { return min_size + get_all_tuples (axis_count).get_size (); }
43 
get_data_sizeOT::TupleVariationHeader44   unsigned get_data_size () const { return varDataSize; }
45 
get_nextOT::TupleVariationHeader46   const TupleVariationHeader &get_next (unsigned axis_count) const
47   { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
48 
unpack_axis_tuplesOT::TupleVariationHeader49   bool unpack_axis_tuples (unsigned axis_count,
50                            const hb_array_t<const F2DOT14> shared_tuples,
51                            const hb_map_t *axes_old_index_tag_map,
52                            hb_hashmap_t<hb_tag_t, Triple>& axis_tuples /* OUT */) const
53   {
54     const F2DOT14 *peak_tuple = nullptr;
55     if (has_peak ())
56       peak_tuple = get_peak_tuple (axis_count).arrayZ;
57     else
58     {
59       unsigned int index = get_index ();
60       if (unlikely ((index + 1) * axis_count > shared_tuples.length))
61         return false;
62       peak_tuple = shared_tuples.sub_array (axis_count * index, axis_count).arrayZ;
63     }
64 
65     const F2DOT14 *start_tuple = nullptr;
66     const F2DOT14 *end_tuple = nullptr;
67     bool has_interm = has_intermediate ();
68 
69     if (has_interm)
70     {
71       start_tuple = get_start_tuple (axis_count).arrayZ;
72       end_tuple = get_end_tuple (axis_count).arrayZ;
73     }
74 
75     for (unsigned i = 0; i < axis_count; i++)
76     {
77       float peak = peak_tuple[i].to_float ();
78       if (peak == 0.f) continue;
79 
80       hb_tag_t *axis_tag;
81       if (!axes_old_index_tag_map->has (i, &axis_tag))
82         return false;
83 
84       float start, end;
85       if (has_interm)
86       {
87         start = start_tuple[i].to_float ();
88         end = end_tuple[i].to_float ();
89       }
90       else
91       {
92         start = hb_min (peak, 0.f);
93         end = hb_max (peak, 0.f);
94       }
95       axis_tuples.set (*axis_tag, Triple ((double) start, (double) peak, (double) end));
96     }
97 
98     return true;
99   }
100 
calculate_scalarOT::TupleVariationHeader101   double calculate_scalar (hb_array_t<const int> coords, unsigned int coord_count,
102 			   const hb_array_t<const F2DOT14> shared_tuples,
103 			   const hb_vector_t<hb_pair_t<int,int>> *shared_tuple_active_idx = nullptr) const
104   {
105     const F2DOT14 *peak_tuple;
106 
107     unsigned start_idx = 0;
108     unsigned end_idx = coord_count;
109     unsigned step = 1;
110 
111     if (has_peak ())
112       peak_tuple = get_peak_tuple (coord_count).arrayZ;
113     else
114     {
115       unsigned int index = get_index ();
116       if (unlikely ((index + 1) * coord_count > shared_tuples.length))
117         return 0.0;
118       peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count).arrayZ;
119 
120       if (shared_tuple_active_idx)
121       {
122 	if (unlikely (index >= shared_tuple_active_idx->length))
123 	  return 0.0;
124 	auto _ = (*shared_tuple_active_idx).arrayZ[index];
125 	if (_.second != -1)
126 	{
127 	  start_idx = _.first;
128 	  end_idx = _.second + 1;
129 	  step = _.second - _.first;
130 	}
131 	else if (_.first != -1)
132 	{
133 	  start_idx = _.first;
134 	  end_idx = start_idx + 1;
135 	}
136       }
137     }
138 
139     const F2DOT14 *start_tuple = nullptr;
140     const F2DOT14 *end_tuple = nullptr;
141     bool has_interm = has_intermediate ();
142     if (has_interm)
143     {
144       start_tuple = get_start_tuple (coord_count).arrayZ;
145       end_tuple = get_end_tuple (coord_count).arrayZ;
146     }
147 
148     double scalar = 1.0;
149     for (unsigned int i = start_idx; i < end_idx; i += step)
150     {
151       int peak = peak_tuple[i].to_int ();
152       if (!peak) continue;
153 
154       int v = coords[i];
155       if (v == peak) continue;
156 
157       if (has_interm)
158       {
159         int start = start_tuple[i].to_int ();
160         int end = end_tuple[i].to_int ();
161         if (unlikely (start > peak || peak > end ||
162                       (start < 0 && end > 0 && peak))) continue;
163         if (v < start || v > end) return 0.0;
164         if (v < peak)
165         { if (peak != start) scalar *= (double) (v - start) / (peak - start); }
166         else
167         { if (peak != end) scalar *= (double) (end - v) / (end - peak); }
168       }
169       else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.0;
170       else
171         scalar *= (double) v / peak;
172     }
173     return scalar;
174   }
175 
has_peakOT::TupleVariationHeader176   bool           has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
has_intermediateOT::TupleVariationHeader177   bool   has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
has_private_pointsOT::TupleVariationHeader178   bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
get_indexOT::TupleVariationHeader179   unsigned      get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
180 
181   protected:
182   struct TuppleIndex : HBUINT16
183   {
184     enum Flags {
185       EmbeddedPeakTuple   = 0x8000u,
186       IntermediateRegion  = 0x4000u,
187       PrivatePointNumbers = 0x2000u,
188       TupleIndexMask      = 0x0FFFu
189     };
190 
operator =OT::TupleVariationHeader::TuppleIndex191     TuppleIndex& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
192     DEFINE_SIZE_STATIC (2);
193   };
194 
get_all_tuplesOT::TupleVariationHeader195   hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
196   { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
get_peak_tupleOT::TupleVariationHeader197   hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
198   { return get_all_tuples (axis_count).sub_array (0, axis_count); }
get_start_tupleOT::TupleVariationHeader199   hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
200   { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
get_end_tupleOT::TupleVariationHeader201   hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
202   { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
203 
204   HBUINT16      varDataSize;    /* The size in bytes of the serialized
205                                  * data for this tuple variation table. */
206   TuppleIndex   tupleIndex;     /* A packed field. The high 4 bits are flags (see below).
207                                    The low 12 bits are an index into a shared tuple
208                                    records array. */
209   /* UnsizedArrayOf<F2DOT14> peakTuple - optional */
210                                 /* Peak tuple record for this tuple variation table — optional,
211                                  * determined by flags in the tupleIndex value.
212                                  *
213                                  * Note that this must always be included in the 'cvar' table. */
214   /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
215                                 /* Intermediate start tuple record for this tuple variation table — optional,
216                                    determined by flags in the tupleIndex value. */
217   /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
218                                 /* Intermediate end tuple record for this tuple variation table — optional,
219                                  * determined by flags in the tupleIndex value. */
220   public:
221   DEFINE_SIZE_MIN (4);
222 };
223 
224 struct tuple_delta_t
225 {
226   static constexpr bool realloc_move = true;  // Watch out when adding new members!
227 
228   public:
229   hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
230 
231   /* indices_length = point_count, indice[i] = 1 means point i is referenced */
232   hb_vector_t<bool> indices;
233 
234   hb_vector_t<double> deltas_x;
235   /* empty for cvar tuples */
236   hb_vector_t<double> deltas_y;
237 
238   /* compiled data: header and deltas
239    * compiled point data is saved in a hashmap within tuple_variations_t cause
240    * some point sets might be reused by different tuple variations */
241   hb_vector_t<unsigned char> compiled_tuple_header;
242   hb_vector_t<unsigned char> compiled_deltas;
243 
244   /* compiled peak coords, empty for non-gvar tuples */
245   hb_vector_t<char> compiled_peak_coords;
246 
247   tuple_delta_t () = default;
248   tuple_delta_t (const tuple_delta_t& o) = default;
249 
swap(tuple_delta_t & a,tuple_delta_t & b)250   friend void swap (tuple_delta_t& a, tuple_delta_t& b) noexcept
251   {
252     hb_swap (a.axis_tuples, b.axis_tuples);
253     hb_swap (a.indices, b.indices);
254     hb_swap (a.deltas_x, b.deltas_x);
255     hb_swap (a.deltas_y, b.deltas_y);
256     hb_swap (a.compiled_tuple_header, b.compiled_tuple_header);
257     hb_swap (a.compiled_deltas, b.compiled_deltas);
258     hb_swap (a.compiled_peak_coords, b.compiled_peak_coords);
259   }
260 
tuple_delta_tOT::tuple_delta_t261   tuple_delta_t (tuple_delta_t&& o)  noexcept : tuple_delta_t ()
262   { hb_swap (*this, o); }
263 
operator =OT::tuple_delta_t264   tuple_delta_t& operator = (tuple_delta_t&& o) noexcept
265   {
266     hb_swap (*this, o);
267     return *this;
268   }
269 
remove_axisOT::tuple_delta_t270   void remove_axis (hb_tag_t axis_tag)
271   { axis_tuples.del (axis_tag); }
272 
set_tentOT::tuple_delta_t273   bool set_tent (hb_tag_t axis_tag, Triple tent)
274   { return axis_tuples.set (axis_tag, tent); }
275 
operator +=OT::tuple_delta_t276   tuple_delta_t& operator += (const tuple_delta_t& o)
277   {
278     unsigned num = indices.length;
279     for (unsigned i = 0; i < num; i++)
280     {
281       if (indices.arrayZ[i])
282       {
283         if (o.indices.arrayZ[i])
284         {
285           deltas_x[i] += o.deltas_x[i];
286           if (deltas_y && o.deltas_y)
287             deltas_y[i] += o.deltas_y[i];
288         }
289       }
290       else
291       {
292         if (!o.indices.arrayZ[i]) continue;
293         indices.arrayZ[i] = true;
294         deltas_x[i] = o.deltas_x[i];
295         if (deltas_y && o.deltas_y)
296           deltas_y[i] = o.deltas_y[i];
297       }
298     }
299     return *this;
300   }
301 
operator *=OT::tuple_delta_t302   tuple_delta_t& operator *= (double scalar)
303   {
304     if (scalar == 1.0)
305       return *this;
306 
307     unsigned num = indices.length;
308     if (deltas_y)
309       for (unsigned i = 0; i < num; i++)
310       {
311 	if (!indices.arrayZ[i]) continue;
312 	deltas_x[i] *= scalar;
313 	deltas_y[i] *= scalar;
314       }
315     else
316       for (unsigned i = 0; i < num; i++)
317       {
318 	if (!indices.arrayZ[i]) continue;
319 	deltas_x[i] *= scalar;
320       }
321     return *this;
322   }
323 
change_tuple_var_axis_limitOT::tuple_delta_t324   hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit,
325                                                           TripleDistances axis_triple_distances) const
326   {
327     hb_vector_t<tuple_delta_t> out;
328     Triple *tent;
329     if (!axis_tuples.has (axis_tag, &tent))
330     {
331       out.push (*this);
332       return out;
333     }
334 
335     if ((tent->minimum < 0.0 && tent->maximum > 0.0) ||
336         !(tent->minimum <= tent->middle && tent->middle <= tent->maximum))
337       return out;
338 
339     if (tent->middle == 0.0)
340     {
341       out.push (*this);
342       return out;
343     }
344 
345     rebase_tent_result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances);
346     for (auto &t : solutions)
347     {
348       tuple_delta_t new_var = *this;
349       if (t.second == Triple ())
350         new_var.remove_axis (axis_tag);
351       else
352         new_var.set_tent (axis_tag, t.second);
353 
354       new_var *= t.first;
355       out.push (std::move (new_var));
356     }
357 
358     return out;
359   }
360 
compile_peak_coordsOT::tuple_delta_t361   bool compile_peak_coords (const hb_map_t& axes_index_map,
362                             const hb_map_t& axes_old_index_tag_map)
363   {
364     unsigned axis_count = axes_index_map.get_population ();
365     if (unlikely (!compiled_peak_coords.alloc (axis_count * F2DOT14::static_size)))
366       return false;
367 
368     unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
369     for (unsigned i = 0; i < orig_axis_count; i++)
370     {
371       if (!axes_index_map.has (i))
372         continue;
373 
374       hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
375       Triple *coords;
376       F2DOT14 peak_coord;
377       if (axis_tuples.has (axis_tag, &coords))
378         peak_coord.set_float (coords->middle);
379       else
380         peak_coord.set_int (0);
381 
382       /* push F2DOT14 value into char vector */
383       int16_t val = peak_coord.to_int ();
384       compiled_peak_coords.push (static_cast<char> (val >> 8));
385       compiled_peak_coords.push (static_cast<char> (val & 0xFF));
386     }
387 
388     return !compiled_peak_coords.in_error ();
389   }
390 
391   /* deltas should be compiled already before we compile tuple
392    * variation header cause we need to fill in the size of the
393    * serialized data for this tuple variation */
compile_tuple_var_headerOT::tuple_delta_t394   bool compile_tuple_var_header (const hb_map_t& axes_index_map,
395                                  unsigned points_data_length,
396                                  const hb_map_t& axes_old_index_tag_map,
397                                  const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map)
398   {
399     /* compiled_deltas could be empty after iup delta optimization, we can skip
400      * compiling this tuple and return true */
401     if (!compiled_deltas) return true;
402 
403     unsigned cur_axis_count = axes_index_map.get_population ();
404     /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */
405     unsigned alloc_len = 3 * cur_axis_count * (F2DOT14::static_size) + 4;
406     if (unlikely (!compiled_tuple_header.resize (alloc_len))) return false;
407 
408     unsigned flag = 0;
409     /* skip the first 4 header bytes: variationDataSize+tupleIndex */
410     F2DOT14* p = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.begin () + 4);
411     F2DOT14* end = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.end ());
412     hb_array_t<F2DOT14> coords (p, end - p);
413 
414     /* encode peak coords */
415     unsigned peak_count = 0;
416     unsigned *shared_tuple_idx;
417     if (shared_tuples_idx_map &&
418         shared_tuples_idx_map->has (&compiled_peak_coords, &shared_tuple_idx))
419     {
420       flag = *shared_tuple_idx;
421     }
422     else
423     {
424       peak_count = encode_peak_coords(coords, flag, axes_index_map, axes_old_index_tag_map);
425       if (!peak_count) return false;
426     }
427 
428     /* encode interim coords, it's optional so returned num could be 0 */
429     unsigned interim_count = encode_interm_coords (coords.sub_array (peak_count), flag, axes_index_map, axes_old_index_tag_map);
430 
431     /* pointdata length = 0 implies "use shared points" */
432     if (points_data_length)
433       flag |= TupleVariationHeader::TuppleIndex::PrivatePointNumbers;
434 
435     unsigned serialized_data_size = points_data_length + compiled_deltas.length;
436     TupleVariationHeader *o = reinterpret_cast<TupleVariationHeader *> (compiled_tuple_header.begin ());
437     o->varDataSize = serialized_data_size;
438     o->tupleIndex = flag;
439 
440     unsigned total_header_len = 4 + (peak_count + interim_count) * (F2DOT14::static_size);
441     return compiled_tuple_header.resize (total_header_len);
442   }
443 
encode_peak_coordsOT::tuple_delta_t444   unsigned encode_peak_coords (hb_array_t<F2DOT14> peak_coords,
445                                unsigned& flag,
446                                const hb_map_t& axes_index_map,
447                                const hb_map_t& axes_old_index_tag_map) const
448   {
449     unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
450     auto it = peak_coords.iter ();
451     unsigned count = 0;
452     for (unsigned i = 0; i < orig_axis_count; i++)
453     {
454       if (!axes_index_map.has (i)) /* axis pinned */
455         continue;
456       hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
457       Triple *coords;
458       if (!axis_tuples.has (axis_tag, &coords))
459         (*it).set_int (0);
460       else
461         (*it).set_float (coords->middle);
462       it++;
463       count++;
464     }
465     flag |= TupleVariationHeader::TuppleIndex::EmbeddedPeakTuple;
466     return count;
467   }
468 
469   /* if no need to encode intermediate coords, then just return p */
encode_interm_coordsOT::tuple_delta_t470   unsigned encode_interm_coords (hb_array_t<F2DOT14> coords,
471                                  unsigned& flag,
472                                  const hb_map_t& axes_index_map,
473                                  const hb_map_t& axes_old_index_tag_map) const
474   {
475     unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
476     unsigned cur_axis_count = axes_index_map.get_population ();
477 
478     auto start_coords_iter = coords.sub_array (0, cur_axis_count).iter ();
479     auto end_coords_iter = coords.sub_array (cur_axis_count).iter ();
480     bool encode_needed = false;
481     unsigned count = 0;
482     for (unsigned i = 0; i < orig_axis_count; i++)
483     {
484       if (!axes_index_map.has (i)) /* axis pinned */
485         continue;
486       hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
487       Triple *coords;
488       float min_val = 0.f, val = 0.f, max_val = 0.f;
489       if (axis_tuples.has (axis_tag, &coords))
490       {
491         min_val = coords->minimum;
492         val = coords->middle;
493         max_val = coords->maximum;
494       }
495 
496       (*start_coords_iter).set_float (min_val);
497       (*end_coords_iter).set_float (max_val);
498 
499       start_coords_iter++;
500       end_coords_iter++;
501       count += 2;
502       if (min_val != hb_min (val, 0.f) || max_val != hb_max (val, 0.f))
503         encode_needed = true;
504     }
505 
506     if (encode_needed)
507     {
508       flag |= TupleVariationHeader::TuppleIndex::IntermediateRegion;
509       return count;
510     }
511     return 0;
512   }
513 
compile_deltasOT::tuple_delta_t514   bool compile_deltas ()
515   { return compile_deltas (indices, deltas_x, deltas_y, compiled_deltas); }
516 
compile_deltasOT::tuple_delta_t517   static bool compile_deltas (const hb_vector_t<bool> &point_indices,
518 			      const hb_vector_t<double> &x_deltas,
519 			      const hb_vector_t<double> &y_deltas,
520 			      hb_vector_t<unsigned char> &compiled_deltas /* OUT */)
521   {
522     hb_vector_t<int> rounded_deltas;
523     if (unlikely (!rounded_deltas.alloc (point_indices.length)))
524       return false;
525 
526     for (unsigned i = 0; i < point_indices.length; i++)
527     {
528       if (!point_indices[i]) continue;
529       int rounded_delta = (int) roundf (x_deltas.arrayZ[i]);
530       rounded_deltas.push (rounded_delta);
531     }
532 
533     if (!rounded_deltas) return true;
534     /* allocate enough memories 5 * num_deltas */
535     unsigned alloc_len = 5 * rounded_deltas.length;
536     if (y_deltas)
537       alloc_len *= 2;
538 
539     if (unlikely (!compiled_deltas.resize (alloc_len))) return false;
540 
541     unsigned encoded_len = compile_deltas (compiled_deltas, rounded_deltas);
542 
543     if (y_deltas)
544     {
545       /* reuse the rounded_deltas vector, check that y_deltas have the same num of deltas as x_deltas */
546       unsigned j = 0;
547       for (unsigned idx = 0; idx < point_indices.length; idx++)
548       {
549         if (!point_indices[idx]) continue;
550         int rounded_delta = (int) roundf (y_deltas.arrayZ[idx]);
551 
552         if (j >= rounded_deltas.length) return false;
553 
554         rounded_deltas[j++] = rounded_delta;
555       }
556 
557       if (j != rounded_deltas.length) return false;
558       encoded_len += compile_deltas (compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas);
559     }
560     return compiled_deltas.resize (encoded_len);
561   }
562 
compile_deltasOT::tuple_delta_t563   static unsigned compile_deltas (hb_array_t<unsigned char> encoded_bytes,
564 				  hb_array_t<const int> deltas)
565   {
566     return TupleValues::compile (deltas, encoded_bytes);
567   }
568 
calc_inferred_deltasOT::tuple_delta_t569   bool calc_inferred_deltas (const contour_point_vector_t& orig_points)
570   {
571     unsigned point_count = orig_points.length;
572     if (point_count != indices.length)
573       return false;
574 
575     unsigned ref_count = 0;
576     hb_vector_t<unsigned> end_points;
577 
578     for (unsigned i = 0; i < point_count; i++)
579     {
580       if (indices.arrayZ[i])
581         ref_count++;
582       if (orig_points.arrayZ[i].is_end_point)
583         end_points.push (i);
584     }
585     /* all points are referenced, nothing to do */
586     if (ref_count == point_count)
587       return true;
588     if (unlikely (end_points.in_error ())) return false;
589 
590     hb_set_t inferred_idxes;
591     unsigned start_point = 0;
592     for (unsigned end_point : end_points)
593     {
594       /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
595       unsigned unref_count = 0;
596       for (unsigned i = start_point; i < end_point + 1; i++)
597         unref_count += indices.arrayZ[i];
598       unref_count = (end_point - start_point + 1) - unref_count;
599 
600       unsigned j = start_point;
601       if (unref_count == 0 || unref_count > end_point - start_point)
602         goto no_more_gaps;
603       for (;;)
604       {
605         /* Locate the next gap of unreferenced points between two referenced points prev and next.
606          * Note that a gap may wrap around at left (start_point) and/or at right (end_point).
607          */
608         unsigned int prev, next, i;
609         for (;;)
610         {
611           i = j;
612           j = next_index (i, start_point, end_point);
613           if (indices.arrayZ[i] && !indices.arrayZ[j]) break;
614         }
615         prev = j = i;
616         for (;;)
617         {
618           i = j;
619           j = next_index (i, start_point, end_point);
620           if (!indices.arrayZ[i] && indices.arrayZ[j]) break;
621         }
622         next = j;
623        /* Infer deltas for all unref points in the gap between prev and next */
624         i = prev;
625         for (;;)
626         {
627           i = next_index (i, start_point, end_point);
628           if (i == next) break;
629           deltas_x.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].x,
630                                             (double) orig_points.arrayZ[prev].x,
631                                             (double) orig_points.arrayZ[next].x,
632                                             deltas_x.arrayZ[prev], deltas_x.arrayZ[next]);
633           deltas_y.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].y,
634                                             (double) orig_points.arrayZ[prev].y,
635                                             (double) orig_points.arrayZ[next].y,
636                                             deltas_y.arrayZ[prev], deltas_y.arrayZ[next]);
637           inferred_idxes.add (i);
638           if (--unref_count == 0) goto no_more_gaps;
639         }
640       }
641     no_more_gaps:
642       start_point = end_point + 1;
643     }
644 
645     for (unsigned i = 0; i < point_count; i++)
646     {
647       /* if points are not referenced and deltas are not inferred, set to 0.
648        * reference all points for gvar */
649       if ( !indices[i])
650       {
651         if (!inferred_idxes.has (i))
652         {
653           deltas_x.arrayZ[i] = 0.0;
654           deltas_y.arrayZ[i] = 0.0;
655         }
656         indices[i] = true;
657       }
658     }
659     return true;
660   }
661 
optimizeOT::tuple_delta_t662   bool optimize (const contour_point_vector_t& contour_points,
663                  bool is_composite,
664                  double tolerance = 0.5 + 1e-10)
665   {
666     unsigned count = contour_points.length;
667     if (deltas_x.length != count ||
668         deltas_y.length != count)
669       return false;
670 
671     hb_vector_t<bool> opt_indices;
672     hb_vector_t<int> rounded_x_deltas, rounded_y_deltas;
673 
674     if (unlikely (!rounded_x_deltas.alloc (count) ||
675                   !rounded_y_deltas.alloc (count)))
676       return false;
677 
678     for (unsigned i = 0; i < count; i++)
679     {
680       int rounded_x_delta = (int) roundf (deltas_x.arrayZ[i]);
681       int rounded_y_delta = (int) roundf (deltas_y.arrayZ[i]);
682       rounded_x_deltas.push (rounded_x_delta);
683       rounded_y_deltas.push (rounded_y_delta);
684     }
685 
686     if (!iup_delta_optimize (contour_points, rounded_x_deltas, rounded_y_deltas, opt_indices, tolerance))
687       return false;
688 
689     unsigned ref_count = 0;
690     for (bool ref_flag : opt_indices)
691        ref_count += ref_flag;
692 
693     if (ref_count == count) return true;
694 
695     hb_vector_t<double> opt_deltas_x, opt_deltas_y;
696     bool is_comp_glyph_wo_deltas = (is_composite && ref_count == 0);
697     if (is_comp_glyph_wo_deltas)
698     {
699       if (unlikely (!opt_deltas_x.resize (count) ||
700                     !opt_deltas_y.resize (count)))
701         return false;
702 
703       opt_indices.arrayZ[0] = true;
704       for (unsigned i = 1; i < count; i++)
705         opt_indices.arrayZ[i] = false;
706     }
707 
708     hb_vector_t<unsigned char> opt_point_data;
709     if (!compile_point_set (opt_indices, opt_point_data))
710       return false;
711     hb_vector_t<unsigned char> opt_deltas_data;
712     if (!compile_deltas (opt_indices,
713                          is_comp_glyph_wo_deltas ? opt_deltas_x : deltas_x,
714                          is_comp_glyph_wo_deltas ? opt_deltas_y : deltas_y,
715                          opt_deltas_data))
716       return false;
717 
718     hb_vector_t<unsigned char> point_data;
719     if (!compile_point_set (indices, point_data))
720       return false;
721     hb_vector_t<unsigned char> deltas_data;
722     if (!compile_deltas (indices, deltas_x, deltas_y, deltas_data))
723       return false;
724 
725     if (opt_point_data.length + opt_deltas_data.length < point_data.length + deltas_data.length)
726     {
727       indices.fini ();
728       indices = std::move (opt_indices);
729 
730       if (is_comp_glyph_wo_deltas)
731       {
732         deltas_x.fini ();
733         deltas_x = std::move (opt_deltas_x);
734 
735         deltas_y.fini ();
736         deltas_y = std::move (opt_deltas_y);
737       }
738     }
739     return !indices.in_error () && !deltas_x.in_error () && !deltas_y.in_error ();
740   }
741 
compile_point_setOT::tuple_delta_t742   static bool compile_point_set (const hb_vector_t<bool> &point_indices,
743                                  hb_vector_t<unsigned char>& compiled_points /* OUT */)
744   {
745     unsigned num_points = 0;
746     for (bool i : point_indices)
747       if (i) num_points++;
748 
749     /* when iup optimization is enabled, num of referenced points could be 0 */
750     if (!num_points) return true;
751 
752     unsigned indices_length = point_indices.length;
753     /* If the points set consists of all points in the glyph, it's encoded with a
754      * single zero byte */
755     if (num_points == indices_length)
756       return compiled_points.resize (1);
757 
758     /* allocate enough memories: 2 bytes for count + 3 bytes for each point */
759     unsigned num_bytes = 2 + 3 *num_points;
760     if (unlikely (!compiled_points.resize (num_bytes, false)))
761       return false;
762 
763     unsigned pos = 0;
764     /* binary data starts with the total number of reference points */
765     if (num_points < 0x80)
766       compiled_points.arrayZ[pos++] = num_points;
767     else
768     {
769       compiled_points.arrayZ[pos++] = ((num_points >> 8) | 0x80);
770       compiled_points.arrayZ[pos++] = num_points & 0xFF;
771     }
772 
773     const unsigned max_run_length = 0x7F;
774     unsigned i = 0;
775     unsigned last_value = 0;
776     unsigned num_encoded = 0;
777     while (i < indices_length && num_encoded < num_points)
778     {
779       unsigned run_length = 0;
780       unsigned header_pos = pos;
781       compiled_points.arrayZ[pos++] = 0;
782 
783       bool use_byte_encoding = false;
784       bool new_run = true;
785       while (i < indices_length && num_encoded < num_points &&
786              run_length <= max_run_length)
787       {
788         // find out next referenced point index
789         while (i < indices_length && !point_indices[i])
790           i++;
791 
792         if (i >= indices_length) break;
793 
794         unsigned cur_value = i;
795         unsigned delta = cur_value - last_value;
796 
797         if (new_run)
798         {
799           use_byte_encoding = (delta <= 0xFF);
800           new_run = false;
801         }
802 
803         if (use_byte_encoding && delta > 0xFF)
804           break;
805 
806         if (use_byte_encoding)
807           compiled_points.arrayZ[pos++] = delta;
808         else
809         {
810           compiled_points.arrayZ[pos++] = delta >> 8;
811           compiled_points.arrayZ[pos++] = delta & 0xFF;
812         }
813         i++;
814         last_value = cur_value;
815         run_length++;
816         num_encoded++;
817       }
818 
819       if (use_byte_encoding)
820         compiled_points.arrayZ[header_pos] = run_length - 1;
821       else
822         compiled_points.arrayZ[header_pos] = (run_length - 1) | 0x80;
823     }
824     return compiled_points.resize (pos, false);
825   }
826 
infer_deltaOT::tuple_delta_t827   static double infer_delta (double target_val, double prev_val, double next_val, double prev_delta, double next_delta)
828   {
829     if (prev_val == next_val)
830       return (prev_delta == next_delta) ? prev_delta : 0.0;
831     else if (target_val <= hb_min (prev_val, next_val))
832       return (prev_val < next_val) ? prev_delta : next_delta;
833     else if (target_val >= hb_max (prev_val, next_val))
834       return (prev_val > next_val) ? prev_delta : next_delta;
835 
836     double r = (target_val - prev_val) / (next_val - prev_val);
837     return prev_delta + r * (next_delta - prev_delta);
838   }
839 
next_indexOT::tuple_delta_t840   static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end)
841   { return (i >= end) ? start : (i + 1); }
842 };
843 
844 struct TupleVariationData
845 {
sanitizeOT::TupleVariationData846   bool sanitize (hb_sanitize_context_t *c) const
847   {
848     TRACE_SANITIZE (this);
849     // here check on min_size only, TupleVariationHeader and var data will be
850     // checked while accessing through iterator.
851     return_trace (c->check_struct (this));
852   }
853 
get_sizeOT::TupleVariationData854   unsigned get_size (unsigned axis_count) const
855   {
856     unsigned total_size = min_size;
857     unsigned count = tupleVarCount.get_count ();
858     const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header());
859     for (unsigned i = 0; i < count; i++)
860     {
861       total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size ();
862       tuple_var_header = &tuple_var_header->get_next (axis_count);
863     }
864 
865     return total_size;
866   }
867 
get_tuple_var_headerOT::TupleVariationData868   const TupleVariationHeader &get_tuple_var_header (void) const
869   { return StructAfter<TupleVariationHeader> (data); }
870 
871   struct tuple_iterator_t;
872   struct tuple_variations_t
873   {
874     hb_vector_t<tuple_delta_t> tuple_vars;
875 
876     private:
877     /* referenced point set->compiled point data map */
878     hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<char>> point_data_map;
879     /* referenced point set-> count map, used in finding shared points */
880     hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
881 
882     /* empty for non-gvar tuples.
883      * shared_points_bytes is a pointer to some value in the point_data_map,
884      * which will be freed during map destruction. Save it for serialization, so
885      * no need to do find_shared_points () again */
886     hb_vector_t<char> *shared_points_bytes = nullptr;
887 
888     /* total compiled byte size as TupleVariationData format, initialized to its
889      * min_size: 4 */
890     unsigned compiled_byte_size = 4;
891 
892     /* for gvar iup delta optimization: whether this is a composite glyph */
893     bool is_composite = false;
894 
895     public:
896     tuple_variations_t () = default;
897     tuple_variations_t (const tuple_variations_t&) = delete;
898     tuple_variations_t& operator=(const tuple_variations_t&) = delete;
899     tuple_variations_t (tuple_variations_t&&) = default;
900     tuple_variations_t& operator=(tuple_variations_t&&) = default;
901     ~tuple_variations_t () = default;
902 
operator boolOT::TupleVariationData::tuple_variations_t903     explicit operator bool () const { return bool (tuple_vars); }
get_var_countOT::TupleVariationData::tuple_variations_t904     unsigned get_var_count () const
905     {
906       unsigned count = 0;
907       /* when iup delta opt is enabled, compiled_deltas could be empty and we
908        * should skip this tuple */
909       for (auto& tuple: tuple_vars)
910         if (tuple.compiled_deltas) count++;
911 
912       if (shared_points_bytes && shared_points_bytes->length)
913         count |= TupleVarCount::SharedPointNumbers;
914       return count;
915     }
916 
get_compiled_byte_sizeOT::TupleVariationData::tuple_variations_t917     unsigned get_compiled_byte_size () const
918     { return compiled_byte_size; }
919 
create_from_tuple_var_dataOT::TupleVariationData::tuple_variations_t920     bool create_from_tuple_var_data (tuple_iterator_t iterator,
921                                      unsigned tuple_var_count,
922                                      unsigned point_count,
923                                      bool is_gvar,
924                                      const hb_map_t *axes_old_index_tag_map,
925                                      const hb_vector_t<unsigned> &shared_indices,
926                                      const hb_array_t<const F2DOT14> shared_tuples,
927                                      bool is_composite_glyph)
928     {
929       do
930       {
931         const HBUINT8 *p = iterator.get_serialized_data ();
932         unsigned int length = iterator.current_tuple->get_data_size ();
933         if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
934           return false;
935 
936         hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
937         if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples)
938             || axis_tuples.is_empty ())
939           return false;
940 
941         hb_vector_t<unsigned> private_indices;
942         bool has_private_points = iterator.current_tuple->has_private_points ();
943         const HBUINT8 *end = p + length;
944         if (has_private_points &&
945             !TupleVariationData::decompile_points (p, private_indices, end))
946           return false;
947 
948         const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices;
949         bool apply_to_all = (indices.length == 0);
950         unsigned num_deltas = apply_to_all ? point_count : indices.length;
951 
952         hb_vector_t<int> deltas_x;
953 
954         if (unlikely (!deltas_x.resize (num_deltas, false) ||
955                       !TupleVariationData::decompile_deltas (p, deltas_x, end)))
956           return false;
957 
958         hb_vector_t<int> deltas_y;
959         if (is_gvar)
960         {
961           if (unlikely (!deltas_y.resize (num_deltas, false) ||
962                         !TupleVariationData::decompile_deltas (p, deltas_y, end)))
963             return false;
964         }
965 
966         tuple_delta_t var;
967         var.axis_tuples = std::move (axis_tuples);
968         if (unlikely (!var.indices.resize (point_count) ||
969                       !var.deltas_x.resize (point_count, false)))
970           return false;
971 
972         if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false)))
973           return false;
974 
975         for (unsigned i = 0; i < num_deltas; i++)
976         {
977           unsigned idx = apply_to_all ? i : indices[i];
978           if (idx >= point_count) continue;
979           var.indices[idx] = true;
980           var.deltas_x[idx] = deltas_x[i];
981           if (is_gvar)
982             var.deltas_y[idx] = deltas_y[i];
983         }
984         tuple_vars.push (std::move (var));
985       } while (iterator.move_to_next ());
986 
987       is_composite = is_composite_glyph;
988       return true;
989     }
990 
create_from_item_var_dataOT::TupleVariationData::tuple_variations_t991     bool create_from_item_var_data (const VarData &var_data,
992                                     const hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>>& regions,
993                                     const hb_map_t& axes_old_index_tag_map,
994                                     unsigned& item_count,
995                                     const hb_inc_bimap_t* inner_map = nullptr)
996     {
997       /* NULL offset, to keep original varidx valid, just return */
998       if (&var_data == &Null (VarData))
999         return true;
1000 
1001       unsigned num_regions = var_data.get_region_index_count ();
1002       if (!tuple_vars.alloc (num_regions)) return false;
1003 
1004       item_count = inner_map ? inner_map->get_population () : var_data.get_item_count ();
1005       if (!item_count) return true;
1006       unsigned row_size = var_data.get_row_size ();
1007       const HBUINT8 *delta_bytes = var_data.get_delta_bytes ();
1008 
1009       for (unsigned r = 0; r < num_regions; r++)
1010       {
1011         /* In VarData, deltas are organized in rows, convert them into
1012          * column(region) based tuples, resize deltas_x first */
1013         tuple_delta_t tuple;
1014         if (!tuple.deltas_x.resize (item_count, false) ||
1015             !tuple.indices.resize (item_count, false))
1016           return false;
1017 
1018         for (unsigned i = 0; i < item_count; i++)
1019         {
1020           tuple.indices.arrayZ[i] = true;
1021           tuple.deltas_x.arrayZ[i] = var_data.get_item_delta_fast (inner_map ? inner_map->backward (i) : i,
1022                                                                    r, delta_bytes, row_size);
1023         }
1024 
1025         unsigned region_index = var_data.get_region_index (r);
1026         if (region_index >= regions.length) return false;
1027         tuple.axis_tuples = regions.arrayZ[region_index];
1028 
1029         tuple_vars.push (std::move (tuple));
1030       }
1031       return !tuple_vars.in_error ();
1032     }
1033 
1034     private:
_cmp_axis_tagOT::TupleVariationData::tuple_variations_t1035     static int _cmp_axis_tag (const void *pa, const void *pb)
1036     {
1037       const hb_tag_t *a = (const hb_tag_t*) pa;
1038       const hb_tag_t *b = (const hb_tag_t*) pb;
1039       return (int)(*a) - (int)(*b);
1040     }
1041 
change_tuple_variations_axis_limitsOT::TupleVariationData::tuple_variations_t1042     bool change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1043                                               const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1044     {
1045       /* sort axis_tag/axis_limits, make result deterministic */
1046       hb_vector_t<hb_tag_t> axis_tags;
1047       if (!axis_tags.alloc (normalized_axes_location.get_population ()))
1048         return false;
1049       for (auto t : normalized_axes_location.keys ())
1050         axis_tags.push (t);
1051 
1052       axis_tags.qsort (_cmp_axis_tag);
1053       for (auto axis_tag : axis_tags)
1054       {
1055         Triple *axis_limit;
1056         if (!normalized_axes_location.has (axis_tag, &axis_limit))
1057           return false;
1058         TripleDistances axis_triple_distances{1.0, 1.0};
1059         if (axes_triple_distances.has (axis_tag))
1060           axis_triple_distances = axes_triple_distances.get (axis_tag);
1061 
1062         hb_vector_t<tuple_delta_t> new_vars;
1063         for (const tuple_delta_t& var : tuple_vars)
1064         {
1065           hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, *axis_limit, axis_triple_distances);
1066           if (!out) continue;
1067 
1068           unsigned new_len = new_vars.length + out.length;
1069 
1070           if (unlikely (!new_vars.alloc (new_len, false)))
1071             return false;
1072 
1073           for (unsigned i = 0; i < out.length; i++)
1074             new_vars.push (std::move (out[i]));
1075         }
1076         tuple_vars.fini ();
1077         tuple_vars = std::move (new_vars);
1078       }
1079       return true;
1080     }
1081 
1082     /* merge tuple variations with overlapping tents, if iup delta optimization
1083      * is enabled, add default deltas to contour_points */
merge_tuple_variationsOT::TupleVariationData::tuple_variations_t1084     bool merge_tuple_variations (contour_point_vector_t* contour_points = nullptr)
1085     {
1086       hb_vector_t<tuple_delta_t> new_vars;
1087       hb_hashmap_t<const hb_hashmap_t<hb_tag_t, Triple>*, unsigned> m;
1088       unsigned i = 0;
1089       for (const tuple_delta_t& var : tuple_vars)
1090       {
1091         /* if all axes are pinned, drop the tuple variation */
1092         if (var.axis_tuples.is_empty ())
1093         {
1094           /* if iup_delta_optimize is enabled, add deltas to contour coords */
1095           if (contour_points && !contour_points->add_deltas (var.deltas_x,
1096                                                              var.deltas_y,
1097                                                              var.indices))
1098             return false;
1099           continue;
1100         }
1101 
1102         unsigned *idx;
1103         if (m.has (&(var.axis_tuples), &idx))
1104         {
1105           new_vars[*idx] += var;
1106         }
1107         else
1108         {
1109           new_vars.push (var);
1110           if (!m.set (&(var.axis_tuples), i))
1111             return false;
1112           i++;
1113         }
1114       }
1115       tuple_vars.fini ();
1116       tuple_vars = std::move (new_vars);
1117       return true;
1118     }
1119 
1120     /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap,
1121      * also update point_set->count map, which will be used in finding shared
1122      * point set*/
compile_all_point_setsOT::TupleVariationData::tuple_variations_t1123     bool compile_all_point_sets ()
1124     {
1125       for (const auto& tuple: tuple_vars)
1126       {
1127         const hb_vector_t<bool>* points_set = &(tuple.indices);
1128         if (point_data_map.has (points_set))
1129         {
1130           unsigned *count;
1131           if (unlikely (!point_set_count_map.has (points_set, &count) ||
1132                         !point_set_count_map.set (points_set, (*count) + 1)))
1133             return false;
1134           continue;
1135         }
1136 
1137         hb_vector_t<unsigned char> compiled_point_data;
1138         if (!tuple_delta_t::compile_point_set (*points_set, compiled_point_data))
1139           return false;
1140 
1141         if (!point_data_map.set (points_set, std::move (compiled_point_data)) ||
1142             !point_set_count_map.set (points_set, 1))
1143           return false;
1144       }
1145       return true;
1146     }
1147 
1148     /* find shared points set which saves most bytes */
find_shared_pointsOT::TupleVariationData::tuple_variations_t1149     void find_shared_points ()
1150     {
1151       unsigned max_saved_bytes = 0;
1152 
1153       for (const auto& _ : point_data_map.iter_ref ())
1154       {
1155         const hb_vector_t<bool>* points_set = _.first;
1156         unsigned data_length = _.second.length;
1157         if (!data_length) continue;
1158         unsigned *count;
1159         if (unlikely (!point_set_count_map.has (points_set, &count) ||
1160                       *count <= 1))
1161         {
1162           shared_points_bytes = nullptr;
1163           return;
1164         }
1165 
1166         unsigned saved_bytes = data_length * ((*count) -1);
1167         if (saved_bytes > max_saved_bytes)
1168         {
1169           max_saved_bytes = saved_bytes;
1170           shared_points_bytes = &(_.second);
1171         }
1172       }
1173     }
1174 
calc_inferred_deltasOT::TupleVariationData::tuple_variations_t1175     bool calc_inferred_deltas (const contour_point_vector_t& contour_points)
1176     {
1177       for (tuple_delta_t& var : tuple_vars)
1178         if (!var.calc_inferred_deltas (contour_points))
1179           return false;
1180 
1181       return true;
1182     }
1183 
iup_optimizeOT::TupleVariationData::tuple_variations_t1184     bool iup_optimize (const contour_point_vector_t& contour_points)
1185     {
1186       for (tuple_delta_t& var : tuple_vars)
1187       {
1188         if (!var.optimize (contour_points, is_composite))
1189           return false;
1190       }
1191       return true;
1192     }
1193 
1194     public:
instantiateOT::TupleVariationData::tuple_variations_t1195     bool instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1196                       const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances,
1197                       contour_point_vector_t* contour_points = nullptr,
1198                       bool optimize = false)
1199     {
1200       if (!tuple_vars) return true;
1201       if (!change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances))
1202         return false;
1203       /* compute inferred deltas only for gvar */
1204       if (contour_points)
1205         if (!calc_inferred_deltas (*contour_points))
1206           return false;
1207 
1208       /* if iup delta opt is on, contour_points can't be null */
1209       if (optimize && !contour_points)
1210         return false;
1211 
1212       if (!merge_tuple_variations (optimize ? contour_points : nullptr))
1213         return false;
1214 
1215       if (optimize && !iup_optimize (*contour_points)) return false;
1216       return !tuple_vars.in_error ();
1217     }
1218 
compile_bytesOT::TupleVariationData::tuple_variations_t1219     bool compile_bytes (const hb_map_t& axes_index_map,
1220                         const hb_map_t& axes_old_index_tag_map,
1221                         bool use_shared_points,
1222                         const hb_hashmap_t<const hb_vector_t<char>*, unsigned>* shared_tuples_idx_map = nullptr)
1223     {
1224       // compile points set and store data in hashmap
1225       if (!compile_all_point_sets ())
1226         return false;
1227 
1228       if (use_shared_points)
1229       {
1230         find_shared_points ();
1231         if (shared_points_bytes)
1232           compiled_byte_size += shared_points_bytes->length;
1233       }
1234       // compile delta and tuple var header for each tuple variation
1235       for (auto& tuple: tuple_vars)
1236       {
1237         const hb_vector_t<bool>* points_set = &(tuple.indices);
1238         hb_vector_t<char> *points_data;
1239         if (unlikely (!point_data_map.has (points_set, &points_data)))
1240           return false;
1241 
1242         /* when iup optimization is enabled, num of referenced points could be 0
1243          * and thus the compiled points bytes is empty, we should skip compiling
1244          * this tuple */
1245         if (!points_data->length)
1246           continue;
1247         if (!tuple.compile_deltas ())
1248           return false;
1249 
1250         unsigned points_data_length = (points_data != shared_points_bytes) ? points_data->length : 0;
1251         if (!tuple.compile_tuple_var_header (axes_index_map, points_data_length, axes_old_index_tag_map,
1252                                              shared_tuples_idx_map))
1253           return false;
1254         compiled_byte_size += tuple.compiled_tuple_header.length + points_data_length + tuple.compiled_deltas.length;
1255       }
1256       return true;
1257     }
1258 
serialize_var_headersOT::TupleVariationData::tuple_variations_t1259     bool serialize_var_headers (hb_serialize_context_t *c, unsigned& total_header_len) const
1260     {
1261       TRACE_SERIALIZE (this);
1262       for (const auto& tuple: tuple_vars)
1263       {
1264         tuple.compiled_tuple_header.as_array ().copy (c);
1265         if (c->in_error ()) return_trace (false);
1266         total_header_len += tuple.compiled_tuple_header.length;
1267       }
1268       return_trace (true);
1269     }
1270 
serialize_var_dataOT::TupleVariationData::tuple_variations_t1271     bool serialize_var_data (hb_serialize_context_t *c, bool is_gvar) const
1272     {
1273       TRACE_SERIALIZE (this);
1274       if (is_gvar && shared_points_bytes)
1275       {
1276         hb_bytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length);
1277         s.copy (c);
1278       }
1279 
1280       for (const auto& tuple: tuple_vars)
1281       {
1282         const hb_vector_t<bool>* points_set = &(tuple.indices);
1283         hb_vector_t<char> *point_data;
1284         if (!point_data_map.has (points_set, &point_data))
1285           return_trace (false);
1286 
1287         if (!is_gvar || point_data != shared_points_bytes)
1288         {
1289           hb_bytes_t s (point_data->arrayZ, point_data->length);
1290           s.copy (c);
1291         }
1292 
1293         tuple.compiled_deltas.as_array ().copy (c);
1294         if (c->in_error ()) return_trace (false);
1295       }
1296 
1297       /* padding for gvar */
1298       if (is_gvar && (compiled_byte_size % 2))
1299       {
1300         HBUINT8 pad;
1301         pad = 0;
1302         if (!c->embed (pad)) return_trace (false);
1303       }
1304       return_trace (true);
1305     }
1306   };
1307 
1308   struct tuple_iterator_t
1309   {
get_axis_countOT::TupleVariationData::tuple_iterator_t1310     unsigned get_axis_count () const { return axis_count; }
1311 
initOT::TupleVariationData::tuple_iterator_t1312     void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
1313     {
1314       var_data_bytes = var_data_bytes_;
1315       var_data = var_data_bytes_.as<TupleVariationData> ();
1316       index = 0;
1317       axis_count = axis_count_;
1318       current_tuple = &var_data->get_tuple_var_header ();
1319       data_offset = 0;
1320       table_base = table_base_;
1321     }
1322 
get_shared_indicesOT::TupleVariationData::tuple_iterator_t1323     bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
1324     {
1325       if (var_data->has_shared_point_numbers ())
1326       {
1327         const HBUINT8 *base = &(table_base+var_data->data);
1328         const HBUINT8 *p = base;
1329         if (!decompile_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
1330         data_offset = p - base;
1331       }
1332       return true;
1333     }
1334 
is_validOT::TupleVariationData::tuple_iterator_t1335     bool is_valid () const
1336     {
1337       return (index < var_data->tupleVarCount.get_count ()) &&
1338              var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
1339              var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
1340                                                                 current_tuple->get_size (axis_count)));
1341     }
1342 
move_to_nextOT::TupleVariationData::tuple_iterator_t1343     bool move_to_next ()
1344     {
1345       data_offset += current_tuple->get_data_size ();
1346       current_tuple = &current_tuple->get_next (axis_count);
1347       index++;
1348       return is_valid ();
1349     }
1350 
get_serialized_dataOT::TupleVariationData::tuple_iterator_t1351     const HBUINT8 *get_serialized_data () const
1352     { return &(table_base+var_data->data) + data_offset; }
1353 
1354     private:
1355     const TupleVariationData *var_data;
1356     unsigned int index;
1357     unsigned int axis_count;
1358     unsigned int data_offset;
1359     const void *table_base;
1360 
1361     public:
1362     hb_bytes_t var_data_bytes;
1363     const TupleVariationHeader *current_tuple;
1364   };
1365 
get_tuple_iteratorOT::TupleVariationData1366   static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
1367                                   const void *table_base,
1368                                   hb_vector_t<unsigned int> &shared_indices /* OUT */,
1369                                   tuple_iterator_t *iterator /* OUT */)
1370   {
1371     iterator->init (var_data_bytes, axis_count, table_base);
1372     if (!iterator->get_shared_indices (shared_indices))
1373       return false;
1374     return iterator->is_valid ();
1375   }
1376 
has_shared_point_numbersOT::TupleVariationData1377   bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
1378 
decompile_pointsOT::TupleVariationData1379   static bool decompile_points (const HBUINT8 *&p /* IN/OUT */,
1380 				hb_vector_t<unsigned int> &points /* OUT */,
1381 				const HBUINT8 *end)
1382   {
1383     enum packed_point_flag_t
1384     {
1385       POINTS_ARE_WORDS     = 0x80,
1386       POINT_RUN_COUNT_MASK = 0x7F
1387     };
1388 
1389     if (unlikely (p + 1 > end)) return false;
1390 
1391     unsigned count = *p++;
1392     if (count & POINTS_ARE_WORDS)
1393     {
1394       if (unlikely (p + 1 > end)) return false;
1395       count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
1396     }
1397     if (unlikely (!points.resize (count, false))) return false;
1398 
1399     unsigned n = 0;
1400     unsigned i = 0;
1401     while (i < count)
1402     {
1403       if (unlikely (p + 1 > end)) return false;
1404       unsigned control = *p++;
1405       unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
1406       unsigned stop = i + run_count;
1407       if (unlikely (stop > count)) return false;
1408       if (control & POINTS_ARE_WORDS)
1409       {
1410         if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1411         for (; i < stop; i++)
1412         {
1413           n += *(const HBUINT16 *)p;
1414           points.arrayZ[i] = n;
1415           p += HBUINT16::static_size;
1416         }
1417       }
1418       else
1419       {
1420         if (unlikely (p + run_count > end)) return false;
1421         for (; i < stop; i++)
1422         {
1423           n += *p++;
1424           points.arrayZ[i] = n;
1425         }
1426       }
1427     }
1428     return true;
1429   }
1430 
1431   template <typename T>
decompile_deltasOT::TupleVariationData1432   static bool decompile_deltas (const HBUINT8 *&p /* IN/OUT */,
1433 				hb_vector_t<T> &deltas /* IN/OUT */,
1434 				const HBUINT8 *end,
1435 				bool consume_all = false)
1436   {
1437     return TupleValues::decompile (p, deltas, end, consume_all);
1438   }
1439 
has_dataOT::TupleVariationData1440   bool has_data () const { return tupleVarCount; }
1441 
decompile_tuple_variationsOT::TupleVariationData1442   bool decompile_tuple_variations (unsigned point_count,
1443                                    bool is_gvar,
1444                                    tuple_iterator_t iterator,
1445                                    const hb_map_t *axes_old_index_tag_map,
1446                                    const hb_vector_t<unsigned> &shared_indices,
1447                                    const hb_array_t<const F2DOT14> shared_tuples,
1448                                    tuple_variations_t& tuple_variations, /* OUT */
1449                                    bool is_composite_glyph = false) const
1450   {
1451     return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount,
1452                                                         point_count, is_gvar,
1453                                                         axes_old_index_tag_map,
1454                                                         shared_indices,
1455                                                         shared_tuples,
1456                                                         is_composite_glyph);
1457   }
1458 
serializeOT::TupleVariationData1459   bool serialize (hb_serialize_context_t *c,
1460                   bool is_gvar,
1461                   const tuple_variations_t& tuple_variations) const
1462   {
1463     TRACE_SERIALIZE (this);
1464     /* empty tuple variations, just return and skip serialization. */
1465     if (!tuple_variations) return_trace (true);
1466 
1467     auto *out = c->start_embed (this);
1468     if (unlikely (!c->extend_min (out))) return_trace (false);
1469 
1470     if (!c->check_assign (out->tupleVarCount, tuple_variations.get_var_count (),
1471                           HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1472 
1473     unsigned total_header_len = 0;
1474 
1475     if (!tuple_variations.serialize_var_headers (c, total_header_len))
1476       return_trace (false);
1477 
1478     unsigned data_offset = min_size + total_header_len;
1479     if (!is_gvar) data_offset += 4;
1480     if (!c->check_assign (out->data, data_offset, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1481 
1482     return tuple_variations.serialize_var_data (c, is_gvar);
1483   }
1484 
1485   protected:
1486   struct TupleVarCount : HBUINT16
1487   {
1488     friend struct tuple_variations_t;
has_shared_point_numbersOT::TupleVariationData::TupleVarCount1489     bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
get_countOT::TupleVariationData::TupleVarCount1490     unsigned int get_count () const { return (*this) & CountMask; }
operator =OT::TupleVariationData::TupleVarCount1491     TupleVarCount& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
operator boolOT::TupleVariationData::TupleVarCount1492     explicit operator bool () const { return get_count (); }
1493 
1494     protected:
1495     enum Flags
1496     {
1497       SharedPointNumbers= 0x8000u,
1498       CountMask         = 0x0FFFu
1499     };
1500     public:
1501     DEFINE_SIZE_STATIC (2);
1502   };
1503 
1504   TupleVarCount tupleVarCount;  /* A packed field. The high 4 bits are flags, and the
1505                                  * low 12 bits are the number of tuple variation tables
1506                                  * for this glyph. The number of tuple variation tables
1507                                  * can be any number between 1 and 4095. */
1508   Offset16To<HBUINT8>
1509                 data;           /* Offset from the start of the base table
1510                                  * to the serialized data. */
1511   /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
1512   public:
1513   DEFINE_SIZE_MIN (4);
1514 };
1515 
1516 using tuple_variations_t = TupleVariationData::tuple_variations_t;
1517 struct item_variations_t
1518 {
1519   using region_t = const hb_hashmap_t<hb_tag_t, Triple>*;
1520   private:
1521   /* each subtable is decompiled into a tuple_variations_t, in which all tuples
1522    * have the same num of deltas (rows) */
1523   hb_vector_t<tuple_variations_t> vars;
1524 
1525   /* num of retained rows for each subtable, there're 2 cases when var_data is empty:
1526    * 1. retained item_count is zero
1527    * 2. regions is empty and item_count is non-zero.
1528    * when converting to tuples, both will be dropped because the tuple is empty,
1529    * however, we need to retain 2. as all-zero rows to keep original varidx
1530    * valid, so we need a way to remember the num of rows for each subtable */
1531   hb_vector_t<unsigned> var_data_num_rows;
1532 
1533   /* original region list, decompiled from item varstore, used when rebuilding
1534    * region list after instantiation */
1535   hb_vector_t<hb_hashmap_t<hb_tag_t, Triple>> orig_region_list;
1536 
1537   /* region list: vector of Regions, maintain the original order for the regions
1538    * that existed before instantiate (), append the new regions at the end.
1539    * Regions are stored in each tuple already, save pointers only.
1540    * When converting back to item varstore, unused regions will be pruned */
1541   hb_vector_t<region_t> region_list;
1542 
1543   /* region -> idx map after instantiation and pruning unused regions */
1544   hb_hashmap_t<region_t, unsigned> region_map;
1545 
1546   /* all delta rows after instantiation */
1547   hb_vector_t<hb_vector_t<int>> delta_rows;
1548   /* final optimized vector of encoding objects used to assemble the varstore */
1549   hb_vector_t<delta_row_encoding_t> encodings;
1550 
1551   /* old varidxes -> new var_idxes map */
1552   hb_map_t varidx_map;
1553 
1554   /* has long words */
1555   bool has_long = false;
1556 
1557   public:
has_long_wordOT::item_variations_t1558   bool has_long_word () const
1559   { return has_long; }
1560 
get_region_listOT::item_variations_t1561   const hb_vector_t<region_t>& get_region_list () const
1562   { return region_list; }
1563 
get_vardata_encodingsOT::item_variations_t1564   const hb_vector_t<delta_row_encoding_t>& get_vardata_encodings () const
1565   { return encodings; }
1566 
get_varidx_mapOT::item_variations_t1567   const hb_map_t& get_varidx_map () const
1568   { return varidx_map; }
1569 
instantiateOT::item_variations_t1570   bool instantiate (const ItemVariationStore& varStore,
1571                     const hb_subset_plan_t *plan,
1572                     bool optimize=true,
1573                     bool use_no_variation_idx=true,
1574                     const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1575   {
1576     if (!create_from_item_varstore (varStore, plan->axes_old_index_tag_map, inner_maps))
1577       return false;
1578     if (!instantiate_tuple_vars (plan->axes_location, plan->axes_triple_distances))
1579       return false;
1580     return as_item_varstore (optimize, use_no_variation_idx);
1581   }
1582 
1583   /* keep below APIs public only for unit test: test-item-varstore */
create_from_item_varstoreOT::item_variations_t1584   bool create_from_item_varstore (const ItemVariationStore& varStore,
1585                                   const hb_map_t& axes_old_index_tag_map,
1586                                   const hb_array_t <const hb_inc_bimap_t> inner_maps = hb_array_t<const hb_inc_bimap_t> ())
1587   {
1588     const VarRegionList& regionList = varStore.get_region_list ();
1589     if (!regionList.get_var_regions (axes_old_index_tag_map, orig_region_list))
1590       return false;
1591 
1592     unsigned num_var_data = varStore.get_sub_table_count ();
1593     if (inner_maps && inner_maps.length != num_var_data) return false;
1594     if (!vars.alloc (num_var_data) ||
1595         !var_data_num_rows.alloc (num_var_data)) return false;
1596 
1597     for (unsigned i = 0; i < num_var_data; i++)
1598     {
1599       if (inner_maps && !inner_maps.arrayZ[i].get_population ())
1600           continue;
1601       tuple_variations_t var_data_tuples;
1602       unsigned item_count = 0;
1603       if (!var_data_tuples.create_from_item_var_data (varStore.get_sub_table (i),
1604                                                       orig_region_list,
1605                                                       axes_old_index_tag_map,
1606                                                       item_count,
1607                                                       inner_maps ? &(inner_maps.arrayZ[i]) : nullptr))
1608         return false;
1609 
1610       var_data_num_rows.push (item_count);
1611       vars.push (std::move (var_data_tuples));
1612     }
1613     return !vars.in_error () && !var_data_num_rows.in_error () && vars.length == var_data_num_rows.length;
1614   }
1615 
instantiate_tuple_varsOT::item_variations_t1616   bool instantiate_tuple_vars (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1617                                const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1618   {
1619     for (tuple_variations_t& tuple_vars : vars)
1620       if (!tuple_vars.instantiate (normalized_axes_location, axes_triple_distances))
1621         return false;
1622 
1623     if (!build_region_list ()) return false;
1624     return true;
1625   }
1626 
build_region_listOT::item_variations_t1627   bool build_region_list ()
1628   {
1629     /* scan all tuples and collect all unique regions, prune unused regions */
1630     hb_hashmap_t<region_t, unsigned> all_regions;
1631     hb_hashmap_t<region_t, unsigned> used_regions;
1632 
1633     /* use a vector when inserting new regions, make result deterministic */
1634     hb_vector_t<region_t> all_unique_regions;
1635     for (const tuple_variations_t& sub_table : vars)
1636     {
1637       for (const tuple_delta_t& tuple : sub_table.tuple_vars)
1638       {
1639         region_t r = &(tuple.axis_tuples);
1640         if (!used_regions.has (r))
1641         {
1642           bool all_zeros = true;
1643           for (float d : tuple.deltas_x)
1644           {
1645             int delta = (int) roundf (d);
1646             if (delta != 0)
1647             {
1648               all_zeros = false;
1649               break;
1650             }
1651           }
1652           if (!all_zeros)
1653           {
1654             if (!used_regions.set (r, 1))
1655               return false;
1656           }
1657         }
1658         if (all_regions.has (r))
1659           continue;
1660         if (!all_regions.set (r, 1))
1661           return false;
1662         all_unique_regions.push (r);
1663       }
1664     }
1665 
1666     /* regions are empty means no variation data, return true */
1667     if (!all_regions || !all_unique_regions) return true;
1668 
1669     if (!region_list.alloc (all_regions.get_population ()))
1670       return false;
1671 
1672     unsigned idx = 0;
1673     /* append the original regions that pre-existed */
1674     for (const auto& r : orig_region_list)
1675     {
1676       if (!all_regions.has (&r) || !used_regions.has (&r))
1677         continue;
1678 
1679       region_list.push (&r);
1680       if (!region_map.set (&r, idx))
1681         return false;
1682       all_regions.del (&r);
1683       idx++;
1684     }
1685 
1686     /* append the new regions at the end */
1687     for (const auto& r: all_unique_regions)
1688     {
1689       if (!all_regions.has (r) || !used_regions.has (r))
1690         continue;
1691       region_list.push (r);
1692       if (!region_map.set (r, idx))
1693         return false;
1694       all_regions.del (r);
1695       idx++;
1696     }
1697     return (!region_list.in_error ()) && (!region_map.in_error ());
1698   }
1699 
1700   /* main algorithm ported from fonttools VarStore_optimize() method, optimize
1701    * varstore by default */
1702 
1703   struct combined_gain_idx_tuple_t
1704   {
1705     int gain;
1706     unsigned idx_1;
1707     unsigned idx_2;
1708 
1709     combined_gain_idx_tuple_t () = default;
combined_gain_idx_tuple_tOT::item_variations_t::combined_gain_idx_tuple_t1710     combined_gain_idx_tuple_t (int gain_, unsigned i, unsigned j)
1711         :gain (gain_), idx_1 (i), idx_2 (j) {}
1712 
operator <OT::item_variations_t::combined_gain_idx_tuple_t1713     bool operator < (const combined_gain_idx_tuple_t& o)
1714     {
1715       if (gain != o.gain)
1716         return gain < o.gain;
1717 
1718       if (idx_1 != o.idx_1)
1719         return idx_1 < o.idx_1;
1720 
1721       return idx_2 < o.idx_2;
1722     }
1723 
operator <=OT::item_variations_t::combined_gain_idx_tuple_t1724     bool operator <= (const combined_gain_idx_tuple_t& o)
1725     {
1726       if (*this < o) return true;
1727       return gain == o.gain && idx_1 == o.idx_1 && idx_2 == o.idx_2;
1728     }
1729   };
1730 
as_item_varstoreOT::item_variations_t1731   bool as_item_varstore (bool optimize=true, bool use_no_variation_idx=true)
1732   {
1733     /* return true if no variation data */
1734     if (!region_list) return true;
1735     unsigned num_cols = region_list.length;
1736     /* pre-alloc a 2D vector for all sub_table's VarData rows */
1737     unsigned total_rows = 0;
1738     for (unsigned major = 0; major < var_data_num_rows.length; major++)
1739       total_rows += var_data_num_rows[major];
1740 
1741     if (!delta_rows.resize (total_rows)) return false;
1742     /* init all rows to [0]*num_cols */
1743     for (unsigned i = 0; i < total_rows; i++)
1744       if (!(delta_rows[i].resize (num_cols))) return false;
1745 
1746     /* old VarIdxes -> full encoding_row mapping */
1747     hb_hashmap_t<unsigned, const hb_vector_t<int>*> front_mapping;
1748     unsigned start_row = 0;
1749     hb_vector_t<delta_row_encoding_t> encoding_objs;
1750     hb_hashmap_t<hb_vector_t<uint8_t>, unsigned> chars_idx_map;
1751 
1752     /* delta_rows map, used for filtering out duplicate rows */
1753     hb_hashmap_t<const hb_vector_t<int>*, unsigned> delta_rows_map;
1754     for (unsigned major = 0; major < vars.length; major++)
1755     {
1756       /* deltas are stored in tuples(column based), convert them back into items
1757        * (row based) delta */
1758       const tuple_variations_t& tuples = vars[major];
1759       unsigned num_rows = var_data_num_rows[major];
1760       for (const tuple_delta_t& tuple: tuples.tuple_vars)
1761       {
1762         if (tuple.deltas_x.length != num_rows)
1763           return false;
1764 
1765         /* skip unused regions */
1766         unsigned *col_idx;
1767         if (!region_map.has (&(tuple.axis_tuples), &col_idx))
1768           continue;
1769 
1770         for (unsigned i = 0; i < num_rows; i++)
1771         {
1772           int rounded_delta = roundf (tuple.deltas_x[i]);
1773           delta_rows[start_row + i][*col_idx] += rounded_delta;
1774           if ((!has_long) && (rounded_delta < -65536 || rounded_delta > 65535))
1775             has_long = true;
1776         }
1777       }
1778 
1779       if (!optimize)
1780       {
1781         /* assemble a delta_row_encoding_t for this subtable, skip optimization so
1782          * chars is not initialized, we only need delta rows for serialization */
1783         delta_row_encoding_t obj;
1784         for (unsigned r = start_row; r < start_row + num_rows; r++)
1785           obj.add_row (&(delta_rows.arrayZ[r]));
1786 
1787         encodings.push (std::move (obj));
1788         start_row += num_rows;
1789         continue;
1790       }
1791 
1792       for (unsigned minor = 0; minor < num_rows; minor++)
1793       {
1794         const hb_vector_t<int>& row = delta_rows[start_row + minor];
1795         if (use_no_variation_idx)
1796         {
1797           bool all_zeros = true;
1798           for (int delta : row)
1799           {
1800             if (delta != 0)
1801             {
1802               all_zeros = false;
1803               break;
1804             }
1805           }
1806           if (all_zeros)
1807             continue;
1808         }
1809 
1810         if (!front_mapping.set ((major<<16) + minor, &row))
1811           return false;
1812 
1813         hb_vector_t<uint8_t> chars = delta_row_encoding_t::get_row_chars (row);
1814         if (!chars) return false;
1815 
1816         if (delta_rows_map.has (&row))
1817           continue;
1818 
1819         delta_rows_map.set (&row, 1);
1820         unsigned *obj_idx;
1821         if (chars_idx_map.has (chars, &obj_idx))
1822         {
1823           delta_row_encoding_t& obj = encoding_objs[*obj_idx];
1824           if (!obj.add_row (&row))
1825             return false;
1826         }
1827         else
1828         {
1829           if (!chars_idx_map.set (chars, encoding_objs.length))
1830             return false;
1831           delta_row_encoding_t obj (std::move (chars), &row);
1832           encoding_objs.push (std::move (obj));
1833         }
1834       }
1835 
1836       start_row += num_rows;
1837     }
1838 
1839     /* return directly if no optimization, maintain original VariationIndex so
1840      * varidx_map would be empty */
1841     if (!optimize) return !encodings.in_error ();
1842 
1843     /* sort encoding_objs */
1844     encoding_objs.qsort ();
1845 
1846     /* main algorithm: repeatedly pick 2 best encodings to combine, and combine
1847      * them */
1848     hb_priority_queue_t<combined_gain_idx_tuple_t> queue;
1849     unsigned num_todos = encoding_objs.length;
1850     for (unsigned i = 0; i < num_todos; i++)
1851     {
1852       for (unsigned j = i + 1; j < num_todos; j++)
1853       {
1854         int combining_gain = encoding_objs.arrayZ[i].gain_from_merging (encoding_objs.arrayZ[j]);
1855         if (combining_gain > 0)
1856           queue.insert (combined_gain_idx_tuple_t (-combining_gain, i, j), 0);
1857       }
1858     }
1859 
1860     hb_set_t removed_todo_idxes;
1861     while (queue)
1862     {
1863       auto t = queue.pop_minimum ().first;
1864       unsigned i = t.idx_1;
1865       unsigned j = t.idx_2;
1866 
1867       if (removed_todo_idxes.has (i) || removed_todo_idxes.has (j))
1868         continue;
1869 
1870       delta_row_encoding_t& encoding = encoding_objs.arrayZ[i];
1871       delta_row_encoding_t& other_encoding = encoding_objs.arrayZ[j];
1872 
1873       removed_todo_idxes.add (i);
1874       removed_todo_idxes.add (j);
1875 
1876       hb_vector_t<uint8_t> combined_chars;
1877       if (!combined_chars.alloc (encoding.chars.length))
1878         return false;
1879 
1880       for (unsigned idx = 0; idx < encoding.chars.length; idx++)
1881       {
1882         uint8_t v = hb_max (encoding.chars.arrayZ[idx], other_encoding.chars.arrayZ[idx]);
1883         combined_chars.push (v);
1884       }
1885 
1886       delta_row_encoding_t combined_encoding_obj (std::move (combined_chars));
1887       for (const auto& row : hb_concat (encoding.items, other_encoding.items))
1888         combined_encoding_obj.add_row (row);
1889 
1890       for (unsigned idx = 0; idx < encoding_objs.length; idx++)
1891       {
1892         if (removed_todo_idxes.has (idx)) continue;
1893 
1894         const delta_row_encoding_t& obj = encoding_objs.arrayZ[idx];
1895         if (obj.chars == combined_chars)
1896         {
1897           for (const auto& row : obj.items)
1898             combined_encoding_obj.add_row (row);
1899 
1900           removed_todo_idxes.add (idx);
1901           continue;
1902         }
1903 
1904         int combined_gain = combined_encoding_obj.gain_from_merging (obj);
1905         if (combined_gain > 0)
1906           queue.insert (combined_gain_idx_tuple_t (-combined_gain, idx, encoding_objs.length), 0);
1907       }
1908 
1909       encoding_objs.push (std::move (combined_encoding_obj));
1910     }
1911 
1912     int num_final_encodings = (int) encoding_objs.length - (int) removed_todo_idxes.get_population ();
1913     if (num_final_encodings <= 0) return false;
1914 
1915     if (!encodings.alloc (num_final_encodings)) return false;
1916     for (unsigned i = 0; i < encoding_objs.length; i++)
1917     {
1918       if (removed_todo_idxes.has (i)) continue;
1919       encodings.push (std::move (encoding_objs.arrayZ[i]));
1920     }
1921 
1922     /* sort again based on width, make result deterministic */
1923     encodings.qsort (delta_row_encoding_t::cmp_width);
1924 
1925     return compile_varidx_map (front_mapping);
1926   }
1927 
1928   private:
1929   /* compile varidx_map for one VarData subtable (index specified by major) */
compile_varidx_mapOT::item_variations_t1930   bool compile_varidx_map (const hb_hashmap_t<unsigned, const hb_vector_t<int>*>& front_mapping)
1931   {
1932     /* full encoding_row -> new VarIdxes mapping */
1933     hb_hashmap_t<const hb_vector_t<int>*, unsigned> back_mapping;
1934 
1935     for (unsigned major = 0; major < encodings.length; major++)
1936     {
1937       delta_row_encoding_t& encoding = encodings[major];
1938       /* just sanity check, this shouldn't happen */
1939       if (encoding.is_empty ())
1940         return false;
1941 
1942       unsigned num_rows = encoding.items.length;
1943 
1944       /* sort rows, make result deterministic */
1945       encoding.items.qsort (_cmp_row);
1946 
1947       /* compile old to new var_idxes mapping */
1948       for (unsigned minor = 0; minor < num_rows; minor++)
1949       {
1950         unsigned new_varidx = (major << 16) + minor;
1951         back_mapping.set (encoding.items.arrayZ[minor], new_varidx);
1952       }
1953     }
1954 
1955     for (auto _ : front_mapping.iter ())
1956     {
1957       unsigned old_varidx = _.first;
1958       unsigned *new_varidx;
1959       if (back_mapping.has (_.second, &new_varidx))
1960         varidx_map.set (old_varidx, *new_varidx);
1961       else
1962         varidx_map.set (old_varidx, HB_OT_LAYOUT_NO_VARIATIONS_INDEX);
1963     }
1964     return !varidx_map.in_error ();
1965   }
1966 
_cmp_rowOT::item_variations_t1967   static int _cmp_row (const void *pa, const void *pb)
1968   {
1969     /* compare pointers of vectors(const hb_vector_t<int>*) that represent a row */
1970     const hb_vector_t<int>** a = (const hb_vector_t<int>**) pa;
1971     const hb_vector_t<int>** b = (const hb_vector_t<int>**) pb;
1972 
1973     for (unsigned i = 0; i < (*b)->length; i++)
1974     {
1975       int va = (*a)->arrayZ[i];
1976       int vb = (*b)->arrayZ[i];
1977       if (va != vb)
1978         return va < vb ? -1 : 1;
1979     }
1980     return 0;
1981   }
1982 };
1983 
1984 
1985 } /* namespace OT */
1986 
1987 
1988 #endif /* HB_OT_VAR_COMMON_HH */
1989