Upgrade Harfbuzz to 10.4.0

[ChangeLog][Third-Party Code] Upgraded Harfbuzz to version
10.4.0.

Fixes: QTBUG-134282
Pick-to: dev 6.9 6.8 6.5 5.15
Change-Id: Ib36df2ae6b0fa4cf667b25d460473d45e8b34350
Reviewed-by: Volker Hilsheimer <volker.hilsheimer@qt.io>
This commit is contained in:
Eskil Abrahamsen Blomfeldt 2025-03-04 14:45:36 +01:00
parent 5634bc529c
commit ecc8ca605c
30 changed files with 731 additions and 251 deletions

View File

@ -1,8 +1,9 @@
[![Linux CI Status](https://github.com/harfbuzz/harfbuzz/workflows/linux-ci/badge.svg)](https://github.com/harfbuzz/harfbuzz/workflows/linux-ci/badge.svg)
[![Linux CI Status](https://github.com/harfbuzz/harfbuzz/actions/workflows/linux-ci.yml/badge.svg)](https://github.com/harfbuzz/harfbuzz/actions/workflows/linux-ci.yml)
[![macoOS CI Status](https://github.com/harfbuzz/harfbuzz/actions/workflows/macos-ci.yml/badge.svg)](https://github.com/harfbuzz/harfbuzz/actions/workflows/macos-ci.yml)
[![Windows CI Status](https://github.com/harfbuzz/harfbuzz/actions/workflows/msvc-ci.yml/badge.svg)](https://github.com/harfbuzz/harfbuzz/actions/workflows/msvc-ci.yml)
[![CircleCI Build Status](https://circleci.com/gh/harfbuzz/harfbuzz/tree/main.svg?style=svg)](https://circleci.com/gh/harfbuzz/harfbuzz/tree/main)
[![OSS-Fuzz Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/harfbuzz.svg)](https://oss-fuzz-build-logs.storage.googleapis.com/index.html)
[![Coverity Scan Build Status](https://scan.coverity.com/projects/15166/badge.svg)](https://scan.coverity.com/projects/harfbuzz)
[![Codacy Badge](https://app.codacy.com/project/badge/Grade/89c872f5ce1c42af802602bfcd15d90a)](https://app.codacy.com/gh/harfbuzz/harfbuzz/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade)
[![Packaging status](https://repology.org/badge/tiny-repos/harfbuzz.svg)](https://repology.org/project/harfbuzz/versions)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/harfbuzz/harfbuzz/badge)](https://securityscorecards.dev/viewer/?uri=github.com/harfbuzz/harfbuzz)

View File

@ -7,8 +7,8 @@
"Description": "HarfBuzz is an OpenType text shaping engine.",
"Homepage": "http://harfbuzz.org",
"Version": "10.3.0",
"DownloadLocation": "https://github.com/harfbuzz/harfbuzz/releases/tag/10.3.0",
"Version": "10.4.0",
"DownloadLocation": "https://github.com/harfbuzz/harfbuzz/releases/tag/10.4.0",
"PURL": "pkg:github/harfbuzz/harfbuzz@$<VERSION>",
"CPE": "cpe:2.3:a:harfbuzz_project:harfbuzz:$<VERSION>:*:*:*:*:*:*:*",
"License": "MIT License",

View File

@ -941,10 +941,12 @@ struct CBDT
}
}
bool has_data () const { return cbdt.get_length (); }
bool has_data () const { return cbdt->version.major; }
bool paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data) const
{
if (!has_data ()) return false;
hb_glyph_extents_t extents;
hb_glyph_extents_t pixel_extents;
hb_blob_t *blob = reference_png (font, glyph);

View File

@ -137,14 +137,14 @@ VarComponent::get_path_at (hb_font_t *font,
hb_decycler_t *decycler,
signed *edges_left,
signed depth_left,
hb_glyf_scratch_t &scratch,
VarRegionList::cache_t *cache) const
{
const unsigned char *end = total_record.arrayZ + total_record.length;
const unsigned char *record = total_record.arrayZ;
auto &VARC = *font->face->table.VARC;
auto &VARC = *font->face->table.VARC->table;
auto &varStore = &VARC+VARC.varStore;
auto instancer = MultiItemVarStoreInstancer(&varStore, nullptr, coords, cache);
#define READ_UINT32VAR(name) \
HB_STMT_START { \
@ -187,22 +187,25 @@ VarComponent::get_path_at (hb_font_t *font,
unsigned conditionIndex;
READ_UINT32VAR (conditionIndex);
const auto &condition = (&VARC+VARC.conditionList)[conditionIndex];
auto instancer = MultiItemVarStoreInstancer(&varStore, nullptr, coords, cache);
show = condition.evaluate (coords.arrayZ, coords.length, &instancer);
}
// Axis values
hb_vector_t<unsigned> axisIndices;
hb_vector_t<float> axisValues;
auto &axisIndices = scratch.axisIndices;
axisIndices.clear ();
auto &axisValues = scratch.axisValues;
axisValues.clear ();
if (flags & (unsigned) flags_t::HAVE_AXES)
{
unsigned axisIndicesIndex;
READ_UINT32VAR (axisIndicesIndex);
axisIndices = (&VARC+VARC.axisIndicesList)[axisIndicesIndex];
axisIndices.extend ((&VARC+VARC.axisIndicesList)[axisIndicesIndex]);
axisValues.resize (axisIndices.length);
const HBUINT8 *p = (const HBUINT8 *) record;
TupleValues::decompile (p, axisValues, (const HBUINT8 *) end);
record += (const unsigned char *) p - record;
record = (const unsigned char *) p;
}
// Apply variations if any
@ -319,7 +322,8 @@ VarComponent::get_path_at (hb_font_t *font,
VARC.get_path_at (font, gid,
draw_session, component_coords, total_transform,
parent_gid,
decycler, edges_left, depth_left - 1);
decycler, edges_left, depth_left - 1,
scratch);
}
#undef PROCESS_TRANSFORM_COMPONENTS
@ -337,7 +341,8 @@ VARC::get_path_at (hb_font_t *font,
hb_codepoint_t parent_glyph,
hb_decycler_t *decycler,
signed *edges_left,
signed depth_left) const
signed depth_left,
hb_glyf_scratch_t &scratch) const
{
// Don't recurse on the same glyph.
unsigned idx = glyph == parent_glyph ?
@ -354,7 +359,7 @@ VARC::get_path_at (hb_font_t *font,
hb_draw_session_t transformer_session {transformer_funcs, &context};
hb_draw_session_t &shape_draw_session = transform.is_identity () ? draw_session : transformer_session;
if (!font->face->table.glyf->get_path_at (font, glyph, shape_draw_session, coords))
if (!font->face->table.glyf->get_path_at (font, glyph, shape_draw_session, coords, scratch))
#ifndef HB_NO_CFF
if (!font->face->table.cff2->get_path_at (font, glyph, shape_draw_session, coords))
if (!font->face->table.cff1->get_path (font, glyph, shape_draw_session)) // Doesn't have variations
@ -376,9 +381,8 @@ VARC::get_path_at (hb_font_t *font,
hb_ubytes_t record = (this+glyphRecords)[idx];
VarRegionList::cache_t *cache = record.length >= 64 ? // Heuristic
(this+varStore).create_cache ()
: nullptr;
float static_cache[sizeof (void *) * 16];
VarRegionList::cache_t *cache = (this+varStore).create_cache (hb_array (static_cache));
transform.scale (font->x_multf, font->y_multf);
@ -386,9 +390,10 @@ VARC::get_path_at (hb_font_t *font,
draw_session, coords, transform,
record,
decycler, edges_left, depth_left,
scratch,
cache);
(this+varStore).destroy_cache (cache);
(this+varStore).destroy_cache (cache, hb_array (static_cache));
return true;
}

View File

@ -53,6 +53,7 @@ struct VarComponent
hb_decycler_t *decycler,
signed *edges_left,
signed depth_left,
hb_glyf_scratch_t &scratch,
VarRegionList::cache_t *cache = nullptr) const;
};
@ -68,6 +69,7 @@ struct VarCompositeGlyph
hb_decycler_t *decycler,
signed *edges_left,
signed depth_left,
hb_glyf_scratch_t &scratch,
VarRegionList::cache_t *cache = nullptr)
{
while (record)
@ -76,7 +78,7 @@ struct VarCompositeGlyph
record = comp.get_path_at (font, glyph,
draw_session, coords, transform,
record,
decycler, edges_left, depth_left, cache);
decycler, edges_left, depth_left, scratch, cache);
}
}
};
@ -98,10 +100,14 @@ struct VARC
hb_codepoint_t parent_glyph,
hb_decycler_t *decycler,
signed *edges_left,
signed depth_left) const;
signed depth_left,
hb_glyf_scratch_t &scratch) const;
bool
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const
get_path (hb_font_t *font,
hb_codepoint_t gid,
hb_draw_session_t &draw_session,
hb_glyf_scratch_t &scratch) const
{
hb_decycler_t decycler;
signed edges = HB_MAX_GRAPH_EDGE_COUNT;
@ -114,7 +120,9 @@ struct VARC
HB_CODEPOINT_INVALID,
&decycler,
&edges,
HB_MAX_NESTING_LEVEL); }
HB_MAX_NESTING_LEVEL,
scratch);
}
bool sanitize (hb_sanitize_context_t *c) const
{
@ -129,6 +137,63 @@ struct VARC
glyphRecords.sanitize (c, this));
}
struct accelerator_t
{
friend struct VarComponent;
accelerator_t (hb_face_t *face)
{
table = hb_sanitize_context_t ().reference_table<VARC> (face);
}
~accelerator_t ()
{
auto *scratch = cached_scratch.get_relaxed ();
if (scratch)
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
table.destroy ();
}
bool
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const
{
if (!table->has_data ()) return false;
hb_glyf_scratch_t *scratch;
// Borrow the cached strach buffer.
{
scratch = cached_scratch.get_acquire ();
if (!scratch || unlikely (!cached_scratch.cmpexch (scratch, nullptr)))
{
scratch = (hb_glyf_scratch_t *) hb_calloc (1, sizeof (hb_glyf_scratch_t));
if (unlikely (!scratch))
return true;
}
}
bool ret = table->get_path (font, gid, draw_session, *scratch);
// Put it back.
if (!cached_scratch.cmpexch (nullptr, scratch))
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
return ret;
}
private:
hb_blob_ptr_t<VARC> table;
hb_atomic_ptr_t<hb_glyf_scratch_t> cached_scratch;
};
bool has_data () const { return version.major != 0; }
protected:
FixedVersion<> version; /* Version identifier */
Offset32To<Coverage> coverage;
@ -140,6 +205,10 @@ struct VARC
DEFINE_SIZE_STATIC (24);
};
struct VARC_accelerator_t : VARC::accelerator_t {
VARC_accelerator_t (hb_face_t *face) : VARC::accelerator_t (face) {}
};
#endif
//}

View File

@ -11,22 +11,48 @@ namespace OT {
struct coord_setter_t
{
coord_setter_t (hb_array_t<const int> coords) :
coords (coords) {}
coord_setter_t (hb_array_t<const int> coords_)
{
length = coords_.length;
if (length <= ARRAY_LENGTH (static_coords))
hb_memcpy (static_coords, coords_.arrayZ, length * sizeof (int));
else
dynamic_coords.extend (coords_);
}
int& operator [] (unsigned idx)
{
if (unlikely (idx >= HB_VAR_COMPOSITE_MAX_AXES))
return Crap(int);
if (coords.length < idx + 1)
coords.resize (idx + 1);
return coords[idx];
if (length <= ARRAY_LENGTH (static_coords))
{
if (idx < ARRAY_LENGTH (static_coords))
{
while (length <= idx)
static_coords[length++] = 0;
return static_coords[idx];
}
else
dynamic_coords.extend (hb_array (static_coords, length));
}
if (dynamic_coords.length <= idx)
{
if (unlikely (!dynamic_coords.resize (idx + 1)))
return Crap(int);
length = idx + 1;
}
return dynamic_coords.arrayZ[idx];
}
hb_array_t<int> get_coords ()
{ return coords.as_array (); }
{ return length <= ARRAY_LENGTH (static_coords) ? hb_array (static_coords, length) : dynamic_coords.as_array (); }
hb_vector_t<int> coords;
private:
hb_vector_t<int> dynamic_coords;
unsigned length;
int static_coords[sizeof (void *) * 8];
};

View File

@ -143,7 +143,7 @@ struct CompositeGlyphRecord
float matrix[4];
contour_point_t trans;
get_transformation (matrix, trans);
if (unlikely (!points.alloc (points.length + 4))) return false; // For phantom points
if (unlikely (!points.alloc (points.length + 1 + 4))) return false; // For phantom points
points.push (trans);
return true;
}

View File

@ -251,7 +251,8 @@ struct Glyph
composite_contours_p = nullptr;
}
if (!get_points (font, glyf, all_points, &points_with_deltas, head_maxp_info_p, composite_contours_p, false, false))
hb_glyf_scratch_t scratch;
if (!get_points (font, glyf, all_points, scratch, &points_with_deltas, head_maxp_info_p, composite_contours_p, false, false))
return false;
// .notdef, set type to empty so we only update metrics and don't compile bytes for
@ -305,6 +306,7 @@ struct Glyph
template <typename accelerator_t>
bool get_points (hb_font_t *font, const accelerator_t &glyf_accelerator,
contour_point_vector_t &all_points /* OUT */,
hb_glyf_scratch_t &scratch,
contour_point_vector_t *points_with_deltas = nullptr, /* OUT */
head_maxp_info_t * head_maxp_info = nullptr, /* OUT */
unsigned *composite_contours = nullptr, /* OUT */
@ -312,7 +314,6 @@ struct Glyph
bool use_my_metrics = true,
bool phantom_only = false,
hb_array_t<const int> coords = hb_array_t<const int> (),
hb_map_t *current_glyphs = nullptr,
unsigned int depth = 0,
unsigned *edge_count = nullptr) const
{
@ -322,10 +323,6 @@ struct Glyph
if (unlikely (*edge_count > HB_MAX_GRAPH_EDGE_COUNT)) return false;
(*edge_count)++;
hb_map_t current_glyphs_stack;
if (current_glyphs == nullptr)
current_glyphs = &current_glyphs_stack;
if (head_maxp_info)
{
head_maxp_info->maxComponentDepth = hb_max (head_maxp_info->maxComponentDepth, depth);
@ -334,8 +331,7 @@ struct Glyph
if (!coords)
coords = hb_array (font->coords, font->num_coords);
contour_point_vector_t stack_points;
contour_point_vector_t &points = type == SIMPLE ? all_points : stack_points;
contour_point_vector_t &points = type == SIMPLE ? all_points : scratch.comp_points;
unsigned old_length = points.length;
switch (type) {
@ -388,36 +384,53 @@ struct Glyph
#ifndef HB_NO_VAR
if (coords)
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
phantom_only && type == SIMPLE);
{
#ifndef HB_NO_BEYOND_64K
if (glyf_accelerator.GVAR->has_data ())
glyf_accelerator.GVAR->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
scratch,
phantom_only && type == SIMPLE);
else
#endif
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
scratch,
phantom_only && type == SIMPLE);
}
#endif
// mainly used by CompositeGlyph calculating new X/Y offset value so no need to extend it
// with child glyphs' points
if (points_with_deltas != nullptr && depth == 0 && type == COMPOSITE)
{
if (unlikely (!points_with_deltas->resize (points.length))) return false;
assert (old_length == 0);
*points_with_deltas = points;
}
float shift = 0;
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, all_points.length - old_length - 4);
shift = phantoms[PHANTOM_LEFT].x;
break;
case COMPOSITE:
{
hb_decycler_node_t decycler_node (scratch.decycler);
unsigned int comp_index = 0;
for (auto &item : get_composite_iterator ())
{
hb_codepoint_t item_gid = item.get_gid ();
if (unlikely (current_glyphs->has (item_gid)))
if (unlikely (!decycler_node.visit (item_gid)))
{
comp_index++;
continue;
current_glyphs->add (item_gid);
}
unsigned old_count = all_points.length;
@ -426,6 +439,7 @@ struct Glyph
.get_points (font,
glyf_accelerator,
all_points,
scratch,
points_with_deltas,
head_maxp_info,
composite_contours,
@ -433,14 +447,16 @@ struct Glyph
use_my_metrics,
phantom_only,
coords,
current_glyphs,
depth + 1,
edge_count)))
{
current_glyphs->del (item_gid);
points.resize (old_length);
return false;
}
// points might have been reallocated. Relocate phantoms.
phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
auto comp_points = all_points.as_array ().sub_array (old_count);
/* Copy phantom points from component if USE_MY_METRICS flag set */
@ -455,7 +471,7 @@ struct Glyph
item.get_transformation (matrix, default_trans);
/* Apply component transformation & translation (with deltas applied) */
item.transform_points (comp_points, matrix, points[comp_index]);
item.transform_points (comp_points, matrix, points[old_length + comp_index]);
}
if (item.is_anchored () && !phantom_only)
@ -476,12 +492,11 @@ struct Glyph
if (all_points.length > HB_GLYF_MAX_POINTS)
{
current_glyphs->del (item_gid);
points.resize (old_length);
return false;
}
comp_index++;
current_glyphs->del (item_gid);
}
if (head_maxp_info && depth == 0)
@ -492,9 +507,13 @@ struct Glyph
head_maxp_info->maxComponentElements = hb_max (head_maxp_info->maxComponentElements, comp_index);
}
all_points.extend (phantoms);
shift = phantoms[PHANTOM_LEFT].x;
points.resize (old_length);
} break;
case EMPTY:
all_points.extend (phantoms);
shift = phantoms[PHANTOM_LEFT].x;
points.resize (old_length);
break;
}
@ -503,10 +522,9 @@ struct Glyph
/* Undocumented rasterizer behavior:
* Shift points horizontally by the updated left side bearing
*/
float v = -phantoms[PHANTOM_LEFT].x;
if (v)
if (shift)
for (auto &point : all_points)
point.x += v;
point.x -= shift;
}
return !all_points.in_error ();

View File

@ -127,19 +127,20 @@ struct SimpleGlyph
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end)
{
auto *points = points_.arrayZ;
unsigned count = points_.length;
for (unsigned int i = 0; i < count;)
{
if (unlikely (p + 1 > end)) return false;
uint8_t flag = *p++;
points_.arrayZ[i++].flag = flag;
points[i++].flag = flag;
if (flag & FLAG_REPEAT)
{
if (unlikely (p + 1 > end)) return false;
unsigned int repeat_count = *p++;
unsigned stop = hb_min (i + repeat_count, count);
for (; i < stop; i++)
points_.arrayZ[i].flag = flag;
points[i].flag = flag;
}
}
return true;
@ -160,10 +161,7 @@ struct SimpleGlyph
if (flag & short_flag)
{
if (unlikely (p + 1 > end)) return false;
if (flag & same_flag)
v += *p++;
else
v -= *p++;
v += (bool(flag & same_flag) * 2 - 1) * *p++;
}
else
{
@ -190,7 +188,7 @@ struct SimpleGlyph
unsigned int num_points = endPtsOfContours[num_contours - 1] + 1;
unsigned old_length = points.length;
points.alloc_exact (points.length + num_points + 4); // Allocate for phantom points, to avoid a possible copy
points.alloc (points.length + num_points + 4); // Allocate for phantom points, to avoid a possible copy
if (unlikely (!points.resize (points.length + num_points, false))) return false;
auto points_ = points.as_array ().sub_array (old_length);
if (!phantom_only)

View File

@ -172,6 +172,9 @@ struct glyf_accelerator_t
glyf_table = nullptr;
#ifndef HB_NO_VAR
gvar = nullptr;
#ifndef HB_NO_BEYOND_64K
GVAR = nullptr;
#endif
#endif
hmtx = nullptr;
#ifndef HB_NO_VERTICAL
@ -187,6 +190,9 @@ struct glyf_accelerator_t
glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face);
#ifndef HB_NO_VAR
gvar = face->table.gvar;
#ifndef HB_NO_BEYOND_64K
GVAR = face->table.GVAR;
#endif
#endif
hmtx = face->table.hmtx;
#ifndef HB_NO_VERTICAL
@ -198,6 +204,13 @@ struct glyf_accelerator_t
}
~glyf_accelerator_t ()
{
auto *scratch = cached_scratch.get_relaxed ();
if (scratch)
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
glyf_table.destroy ();
}
@ -206,21 +219,16 @@ struct glyf_accelerator_t
protected:
template<typename T>
bool get_points (hb_font_t *font, hb_codepoint_t gid, T consumer,
hb_array_t<const int> coords = hb_array_t<const int> ()) const
hb_array_t<const int> coords,
hb_glyf_scratch_t &scratch) const
{
if (!coords)
coords = hb_array (font->coords, font->num_coords);
if (gid >= num_glyphs) return false;
/* Making this allocfree is not that easy
https://github.com/harfbuzz/harfbuzz/issues/2095
mostly because of gvar handling in VF fonts,
perhaps a separate path for non-VF fonts can be considered */
contour_point_vector_t all_points;
auto &all_points = scratch.all_points;
all_points.resize (0);
bool phantom_only = !consumer.is_consuming_contour_points ();
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, nullptr, nullptr, nullptr, true, true, phantom_only, coords)))
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, scratch, nullptr, nullptr, nullptr, true, true, phantom_only, coords)))
return false;
unsigned count = all_points.length;
@ -372,7 +380,12 @@ struct glyf_accelerator_t
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
if (font->num_coords)
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false));
{
hb_glyf_scratch_t scratch;
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false),
hb_array (font->coords, font->num_coords),
scratch);
}
if (unlikely (!success))
return
@ -392,9 +405,11 @@ struct glyf_accelerator_t
if (unlikely (gid >= num_glyphs)) return false;
hb_glyph_extents_t extents;
hb_glyf_scratch_t scratch;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms, false))))
if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms, false),
hb_array (font->coords, font->num_coords),
scratch)))
return false;
*lsb = is_vertical
@ -420,7 +435,12 @@ struct glyf_accelerator_t
#ifndef HB_NO_VAR
if (font->num_coords)
return get_points (font, gid, points_aggregator_t (font, extents, nullptr, true));
{
hb_glyf_scratch_t scratch;
return get_points (font, gid, points_aggregator_t (font, extents, nullptr, true),
hb_array (font->coords, font->num_coords),
scratch);
}
#endif
return glyph_for_gid (gid).get_extents_without_var_scaled (font, *this, extents);
}
@ -455,15 +475,52 @@ struct glyf_accelerator_t
bool
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const
{ return get_points (font, gid, glyf_impl::path_builder_t (font, draw_session)); }
{
if (!has_data ()) return false;
hb_glyf_scratch_t *scratch;
// Borrow the cached strach buffer.
{
scratch = cached_scratch.get_acquire ();
if (!scratch || unlikely (!cached_scratch.cmpexch (scratch, nullptr)))
{
scratch = (hb_glyf_scratch_t *) hb_calloc (1, sizeof (hb_glyf_scratch_t));
if (unlikely (!scratch))
return true;
}
}
bool ret = get_points (font, gid, glyf_impl::path_builder_t (font, draw_session),
hb_array (font->coords, font->num_coords),
*scratch);
// Put it back.
if (!cached_scratch.cmpexch (nullptr, scratch))
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
return ret;
}
bool
get_path_at (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session,
hb_array_t<const int> coords) const
{ return get_points (font, gid, glyf_impl::path_builder_t (font, draw_session), coords); }
hb_array_t<const int> coords,
hb_glyf_scratch_t &scratch) const
{
if (!has_data ()) return false;
return get_points (font, gid, glyf_impl::path_builder_t (font, draw_session),
coords,
scratch);
}
#ifndef HB_NO_VAR
const gvar_accelerator_t *gvar;
#ifndef HB_NO_BEYOND_64K
const GVAR_accelerator_t *GVAR;
#endif
#endif
const hmtx_accelerator_t *hmtx;
#ifndef HB_NO_VERTICAL
@ -475,6 +532,7 @@ struct glyf_accelerator_t
unsigned int num_glyphs;
hb_blob_ptr_t<loca> loca_table;
hb_blob_ptr_t<glyf> glyf_table;
hb_atomic_ptr_t<hb_glyf_scratch_t> cached_scratch;
};

View File

@ -42,7 +42,7 @@ struct path_builder_t
{
bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE;
#ifdef HB_NO_CUBIC_GLYF
bool is_cubic = false;
constexpr bool is_cubic = false;
#else
bool is_cubic = !is_on_curve && (point.flag & glyf_impl::SimpleGlyph::FLAG_CUBIC);
#endif

View File

@ -38,11 +38,11 @@
* For Floyd's tortoise and hare algorithm, see:
* https://en.wikipedia.org/wiki/Cycle_detection#Floyd's_tortoise_and_hare
*
* Like Floyd's algorithm, hb_decycler_t is O(n) in the number of nodes
* in the graph. Unlike Floyd's algorithm, hb_decycler_t is designed
* to be used in a DFS traversal, where the graph is not a simple
* linked list, but a tree with cycles. Like Floyd's algorithm, it is
* constant-memory (just two pointers).
* hb_decycler_t is O(n) in the number of nodes in the DFS traversal
* if there are no cycles. Unlike Floyd's algorithm, hb_decycler_t
* can be used in a DFS traversal, where the graph is not a simple
* linked list, but a tree with possible cycles. Like Floyd's algorithm,
* it is constant-memory (~three pointers).
*
* The decycler works by creating an implicit linked-list on the stack,
* of the path from the root to the current node, and apply Floyd's
@ -89,7 +89,7 @@ struct hb_decycler_t
friend struct hb_decycler_node_t;
private:
bool tortoise_asleep = true;
bool tortoise_awake = false;
hb_decycler_node_t *tortoise = nullptr;
hb_decycler_node_t *hare = nullptr;
};
@ -100,15 +100,18 @@ struct hb_decycler_node_t
{
u.decycler = &decycler;
decycler.tortoise_asleep = !decycler.tortoise_asleep;
decycler.tortoise_awake = !decycler.tortoise_awake;
if (!decycler.tortoise)
{
// First node.
assert (decycler.tortoise_awake);
assert (!decycler.hare);
decycler.tortoise = decycler.hare = this;
return;
}
if (!decycler.tortoise_asleep)
if (decycler.tortoise_awake)
decycler.tortoise = decycler.tortoise->u.next; // Time to move.
this->prev = decycler.hare;
@ -128,10 +131,10 @@ struct hb_decycler_node_t
prev->u.decycler = &decycler;
assert (decycler.tortoise);
if (!decycler.tortoise_asleep)
if (decycler.tortoise_awake)
decycler.tortoise = decycler.tortoise->prev;
decycler.tortoise_asleep = !decycler.tortoise_asleep;
decycler.tortoise_awake = !decycler.tortoise_awake;
}
bool visit (uintptr_t value_)

View File

@ -277,7 +277,7 @@ void
_hb_directwrite_shaper_font_data_destroy (hb_directwrite_font_data_t *data)
{
if (data != HB_SHAPER_DATA_SUCCEEDED)
((IDWriteFont *) data)->Release();
((IDWriteFont *) (const void *) data)->Release();
}
@ -867,6 +867,24 @@ hb_directwrite_face_create (IDWriteFontFace *dw_face)
_hb_directwrite_face_release);
}
/**
* hb_directwrite_face_get_dw_font_face:
* @face: a #hb_face_t object
*
* Gets the DirectWrite IDWriteFontFace associated with @face.
*
* Return value: DirectWrite IDWriteFontFace object corresponding to the given input
*
* Since: 10.4.0
**/
IDWriteFontFace *
hb_directwrite_face_get_dw_font_face (hb_face_t *face)
{
return face->data.directwrite->fontFace;
}
#ifndef HB_DISABLE_DEPRECATED
/**
* hb_directwrite_face_get_font_face:
* @face: a #hb_face_t object
@ -876,13 +894,16 @@ hb_directwrite_face_create (IDWriteFontFace *dw_face)
* Return value: DirectWrite IDWriteFontFace object corresponding to the given input
*
* Since: 2.5.0
* Deprecated: 10.4.0: Use hb_directwrite_face_get_dw_font_face() instead
**/
IDWriteFontFace *
hb_directwrite_face_get_font_face (hb_face_t *face)
{
return face->data.directwrite->fontFace;
return hb_directwrite_face_get_dw_font_face (face);
}
#endif
/**
* hb_directwrite_font_create:
* @dw_font: a DirectWrite IDWriteFont object.

View File

@ -33,7 +33,7 @@ HB_EXTERN hb_face_t *
hb_directwrite_face_create (IDWriteFontFace *dw_face);
HB_EXTERN IDWriteFontFace *
hb_directwrite_face_get_font_face (hb_face_t *face);
hb_directwrite_face_get_dw_font_face (hb_face_t *face);
HB_EXTERN hb_font_t *
hb_directwrite_font_create (IDWriteFont *dw_font);
@ -41,6 +41,14 @@ hb_directwrite_font_create (IDWriteFont *dw_font);
HB_EXTERN IDWriteFont *
hb_directwrite_font_get_dw_font (hb_font_t *font);
#ifndef HB_DISABLE_DEPRECATED
HB_DEPRECATED_FOR (hb_directwrite_face_get_dw_font_face)
HB_EXTERN IDWriteFontFace *
hb_directwrite_face_get_font_face (hb_face_t *face);
#endif
HB_END_DECLS
#endif /* HB_DIRECTWRITE_H */

View File

@ -522,11 +522,15 @@ hb_face_reference_blob (hb_face_t *face)
for (unsigned offset = 0; offset < total_count; offset += count)
{
hb_face_get_table_tags (face, offset, &count, tags);
if (unlikely (!count))
break; // Allocation error
for (unsigned i = 0; i < count; i++)
{
hb_blob_t *table = hb_face_reference_table (face, tags[i]);
hb_face_builder_add_table (builder, tags[i], table);
hb_blob_destroy (table);
if (unlikely (!tags[i]))
continue;
hb_blob_t *table = hb_face_reference_table (face, tags[i]);
hb_face_builder_add_table (builder, tags[i], table);
hb_blob_destroy (table);
}
}

View File

@ -278,6 +278,33 @@ hb_ft_font_get_load_flags (hb_font_t *font)
return ft_font->load_flags;
}
/**
* hb_ft_font_get_ft_face: (skip)
* @font: #hb_font_t to work upon
*
* Fetches the FT_Face associated with the specified #hb_font_t
* font object.
*
* This function works with #hb_font_t objects created by
* hb_ft_font_create() or hb_ft_font_create_referenced().
*
* Return value: (nullable): the FT_Face found or `NULL`
*
* Since: 10.4.0
**/
FT_Face
hb_ft_font_get_ft_face (hb_font_t *font)
{
if (unlikely (font->destroy != (hb_destroy_func_t) _hb_ft_font_destroy))
return nullptr;
const hb_ft_font_t *ft_font = (const hb_ft_font_t *) font->user_data;
return ft_font->ft_face;
}
#ifndef HB_DISABLE_DEPRECATED
/**
* hb_ft_font_get_face: (skip)
* @font: #hb_font_t to work upon
@ -291,18 +318,16 @@ hb_ft_font_get_load_flags (hb_font_t *font)
* Return value: (nullable): the FT_Face found or `NULL`
*
* Since: 0.9.2
* Deprecated: 10.4.0: Use hb_ft_font_get_ft_face() instead.
**/
FT_Face
hb_ft_font_get_face (hb_font_t *font)
{
if (unlikely (font->destroy != (hb_destroy_func_t) _hb_ft_font_destroy))
return nullptr;
const hb_ft_font_t *ft_font = (const hb_ft_font_t *) font->user_data;
return ft_font->ft_face;
return hb_ft_font_get_ft_face (font);
}
#endif
/**
* hb_ft_font_lock_face: (skip)
* @font: #hb_font_t to work upon

View File

@ -111,7 +111,7 @@ HB_EXTERN hb_font_t *
hb_ft_font_create_referenced (FT_Face ft_face);
HB_EXTERN FT_Face
hb_ft_font_get_face (hb_font_t *font);
hb_ft_font_get_ft_face (hb_font_t *font);
HB_EXTERN FT_Face
hb_ft_font_lock_face (hb_font_t *font);
@ -142,6 +142,13 @@ hb_ft_hb_font_changed (hb_font_t *font);
HB_EXTERN void
hb_ft_font_set_funcs (hb_font_t *font);
#ifndef HB_DISABLE_DEPRECATED
HB_DEPRECATED_FOR (hb_ft_font_get_ft_face)
HB_EXTERN FT_Face
hb_ft_font_get_face (hb_font_t *font);
#endif
HB_END_DECLS

View File

@ -1733,6 +1733,19 @@ struct TupleValues
else if ((control & VALUES_SIZE_MASK) == VALUES_ARE_WORDS)
{
if (unlikely (p + run_count * HBINT16::static_size > end)) return false;
#ifndef HB_OPTIMIZE_SIZE
for (; i + 3 < stop; i += 4)
{
values.arrayZ[i] = * (const HBINT16 *) p;
p += HBINT16::static_size;
values.arrayZ[i + 1] = * (const HBINT16 *) p;
p += HBINT16::static_size;
values.arrayZ[i + 2] = * (const HBINT16 *) p;
p += HBINT16::static_size;
values.arrayZ[i + 3] = * (const HBINT16 *) p;
p += HBINT16::static_size;
}
#endif
for (; i < stop; i++)
{
values.arrayZ[i] = * (const HBINT16 *) p;
@ -1751,10 +1764,17 @@ struct TupleValues
else if ((control & VALUES_SIZE_MASK) == VALUES_ARE_BYTES)
{
if (unlikely (p + run_count > end)) return false;
#ifndef HB_OPTIMIZE_SIZE
for (; i + 3 < stop; i += 4)
{
values.arrayZ[i] = * (const HBINT8 *) p++;
values.arrayZ[i + 1] = * (const HBINT8 *) p++;
values.arrayZ[i + 2] = * (const HBINT8 *) p++;
values.arrayZ[i + 3] = * (const HBINT8 *) p++;
}
#endif
for (; i < stop; i++)
{
values.arrayZ[i] = * (const HBINT8 *) p++;
}
}
}
return true;
@ -1763,12 +1783,12 @@ struct TupleValues
struct iter_t : hb_iter_with_fallback_t<iter_t, int>
{
iter_t (const unsigned char *p_, unsigned len_)
: p (p_), end (p_ + len_)
: p (p_), endp (p_ + len_)
{ if (ensure_run ()) read_value (); }
private:
const unsigned char *p;
const unsigned char * const end;
const unsigned char * const endp;
int current_value = 0;
signed run_count = 0;
unsigned width = 0;
@ -1777,7 +1797,7 @@ struct TupleValues
{
if (likely (run_count > 0)) return true;
if (unlikely (p >= end))
if (unlikely (p >= endp))
{
run_count = 0;
current_value = 0;
@ -1796,7 +1816,7 @@ struct TupleValues
default: assert (false);
}
if (unlikely (p + run_count * width > end))
if (unlikely (p + run_count * width > endp))
{
run_count = 0;
current_value = 0;
@ -1823,7 +1843,7 @@ struct TupleValues
__item_t__ __item__ () const
{ return current_value; }
bool __more__ () const { return run_count || p < end; }
bool __more__ () const { return run_count || p < endp; }
void __next__ ()
{
run_count--;
@ -1850,10 +1870,146 @@ struct TupleValues
{ return p != o.p || run_count != o.run_count; }
iter_t __end__ () const
{
iter_t it (end, 0);
iter_t it (endp, 0);
return it;
}
};
struct fetcher_t
{
fetcher_t (const unsigned char *p_, unsigned len_)
: p (p_), end (p_ + len_) {}
private:
const unsigned char *p;
const unsigned char * const end;
signed run_count = 0;
unsigned width = 0;
bool ensure_run ()
{
if (likely (run_count > 0)) return true;
if (unlikely (p >= end))
{
run_count = 0;
return false;
}
unsigned control = *p++;
run_count = (control & VALUE_RUN_COUNT_MASK) + 1;
width = control & VALUES_SIZE_MASK;
switch (width)
{
case VALUES_ARE_ZEROS: width = 0; break;
case VALUES_ARE_BYTES: width = HBINT8::static_size; break;
case VALUES_ARE_WORDS: width = HBINT16::static_size; break;
case VALUES_ARE_LONGS: width = HBINT32::static_size; break;
default: assert (false);
}
if (unlikely (p + run_count * width > end))
{
run_count = 0;
return false;
}
return true;
}
void skip (unsigned n)
{
while (n)
{
if (unlikely (!ensure_run ()))
return;
unsigned i = hb_min (n, (unsigned) run_count);
run_count -= i;
n -= i;
p += i * width;
}
}
template <bool scaled>
void _add_to (hb_array_t<float> out, float scale = 1.0f)
{
unsigned n = out.length;
float *arrayZ = out.arrayZ;
for (unsigned i = 0; i < n;)
{
if (unlikely (!ensure_run ()))
break;
unsigned count = hb_min (n - i, (unsigned) run_count);
switch (width)
{
case 1:
{
const auto *pp = (const HBINT8 *) p;
unsigned j = 0;
#ifndef HB_OPTIMIZE_SIZE
for (; j + 3 < count; j += 4)
{
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
}
#endif
for (; j < count; j++)
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
}
break;
case 2:
{
const auto *pp = (const HBINT16 *) p;
unsigned j = 0;
#ifndef HB_OPTIMIZE_SIZE
for (; j + 3 < count; j += 4)
{
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
}
#endif
for (; j < count; j++)
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
}
break;
case 4:
{
const auto *pp = (const HBINT32 *) p;
for (unsigned j = 0; j < count; j++)
*arrayZ++ += scaled ? *pp++ * scale : *pp++;
}
break;
}
p += count * width;
run_count -= count;
i += count;
}
}
public:
void add_to (hb_array_t<float> out, float scale = 1.0f)
{
unsigned n = out.length;
if (scale == 0.0f)
{
skip (n);
return;
}
#ifndef HB_OPTIMIZE_SIZE
if (scale == 1.0f)
_add_to<false> (out);
else
#endif
_add_to<true> (out, scale);
}
};
};
struct TupleList : CFF2Index
@ -1863,6 +2019,12 @@ struct TupleList : CFF2Index
auto bytes = CFF2Index::operator [] (i);
return TupleValues::iter_t (bytes.arrayZ, bytes.length);
}
TupleValues::fetcher_t fetcher (unsigned i) const
{
auto bytes = CFF2Index::operator [] (i);
return TupleValues::fetcher_t (bytes.arrayZ, bytes.length);
}
};

View File

@ -95,9 +95,12 @@ HB_OT_CORE_TABLE (OT, fvar)
HB_OT_CORE_TABLE (OT, avar)
HB_OT_CORE_TABLE (OT, cvar)
HB_OT_ACCELERATOR (OT, gvar)
#ifndef HB_NO_BEYOND_64K
HB_OT_ACCELERATOR (OT, GVAR)
#endif
HB_OT_CORE_TABLE (OT, MVAR)
#ifndef HB_NO_VAR_COMPOSITES
HB_OT_CORE_TABLE (OT, VARC)
HB_OT_ACCELERATOR (OT, VARC)
#endif
#endif

View File

@ -41,6 +41,7 @@
#include "hb-ot-layout-gdef-table.hh"
#include "hb-ot-layout-gsub-table.hh"
#include "hb-ot-layout-gpos-table.hh"
#include "hb-ot-var-varc-table.hh"
#include "hb-aat-layout-kerx-table.hh"
#include "hb-aat-layout-morx-table.hh"

View File

@ -95,7 +95,7 @@ struct hdmx
bool serialize (hb_serialize_context_t *c,
unsigned version,
Iterator it,
const hb_vector_t<hb_codepoint_pair_t> &new_to_old_gid_list,
hb_array_t<const hb_codepoint_pair_t> new_to_old_gid_list,
unsigned num_glyphs)
{
TRACE_SERIALIZE (this);

View File

@ -182,7 +182,7 @@ struct hmtxvmtx
hb_requires (hb_is_iterator (Iterator))>
void serialize (hb_serialize_context_t *c,
Iterator it,
const hb_vector_t<hb_codepoint_pair_t> new_to_old_gid_list,
hb_array_t<const hb_codepoint_pair_t> new_to_old_gid_list,
unsigned num_long_metrics,
unsigned total_num_metrics)
{

View File

@ -460,7 +460,7 @@ struct BaseScript
{ return (this+baseValues).get_base_coord (baseline_tag_index); }
bool has_values () const { return baseValues; }
bool has_min_max () const { return defaultMinMax; /* TODO What if only per-language is present? */ }
bool has_min_max () const { return defaultMinMax || baseLangSysRecords; }
void collect_variation_indices (const hb_subset_plan_t* plan,
hb_set_t& varidx_set /* OUT */) const

View File

@ -3147,23 +3147,14 @@ struct MultiVarData
{
auto &deltaSets = StructAfter<decltype (deltaSetsX)> (regionIndices);
auto values_iter = deltaSets[inner];
auto values_iter = deltaSets.fetcher (inner);
unsigned regionCount = regionIndices.len;
unsigned count = out.length;
for (unsigned regionIndex = 0; regionIndex < regionCount; regionIndex++)
{
float scalar = regions.evaluate (regionIndices.arrayZ[regionIndex],
coords, coord_count,
cache);
if (scalar == 1.f)
for (unsigned i = 0; i < count; i++)
out.arrayZ[i] += *values_iter++;
else if (scalar)
for (unsigned i = 0; i < count; i++)
out.arrayZ[i] += *values_iter++ * scalar;
else
values_iter += count;
values_iter.add_to (out, scalar);
}
}
@ -3449,7 +3440,7 @@ struct MultiItemVariationStore
{
using cache_t = SparseVarRegionList::cache_t;
cache_t *create_cache () const
cache_t *create_cache (hb_array_t<float> static_cache = hb_array_t<float> ()) const
{
#ifdef HB_NO_VAR
return nullptr;
@ -3457,8 +3448,14 @@ struct MultiItemVariationStore
auto &r = this+regions;
unsigned count = r.regions.len;
float *cache = (float *) hb_malloc (sizeof (float) * count);
if (unlikely (!cache)) return nullptr;
float *cache;
if (count <= static_cache.length)
cache = static_cache.arrayZ;
else
{
cache = (float *) hb_malloc (sizeof (float) * count);
if (unlikely (!cache)) return nullptr;
}
for (unsigned i = 0; i < count; i++)
cache[i] = REGION_CACHE_ITEM_CACHE_INVALID;
@ -3466,7 +3463,12 @@ struct MultiItemVariationStore
return cache;
}
static void destroy_cache (cache_t *cache) { hb_free (cache); }
static void destroy_cache (cache_t *cache,
hb_array_t<float> static_cache = hb_array_t<float> ())
{
if (cache != static_cache.arrayZ)
hb_free (cache);
}
private:
void get_delta (unsigned int outer, unsigned int inner,

View File

@ -231,9 +231,9 @@ struct tuple_delta_t
/* indices_length = point_count, indice[i] = 1 means point i is referenced */
hb_vector_t<bool> indices;
hb_vector_t<double> deltas_x;
hb_vector_t<float> deltas_x;
/* empty for cvar tuples */
hb_vector_t<double> deltas_y;
hb_vector_t<float> deltas_y;
/* compiled data: header and deltas
* compiled point data is saved in a hashmap within tuple_variations_t cause
@ -299,9 +299,9 @@ struct tuple_delta_t
return *this;
}
tuple_delta_t& operator *= (double scalar)
tuple_delta_t& operator *= (float scalar)
{
if (scalar == 1.0)
if (scalar == 1.0f)
return *this;
unsigned num = indices.length;
@ -514,9 +514,9 @@ struct tuple_delta_t
bool compile_deltas ()
{ return compile_deltas (indices, deltas_x, deltas_y, compiled_deltas); }
static bool compile_deltas (const hb_vector_t<bool> &point_indices,
const hb_vector_t<double> &x_deltas,
const hb_vector_t<double> &y_deltas,
static bool compile_deltas (hb_array_t<const bool> point_indices,
hb_array_t<const float> x_deltas,
hb_array_t<const float> y_deltas,
hb_vector_t<unsigned char> &compiled_deltas /* OUT */)
{
hb_vector_t<int> rounded_deltas;
@ -629,11 +629,11 @@ struct tuple_delta_t
deltas_x.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].x,
(double) orig_points.arrayZ[prev].x,
(double) orig_points.arrayZ[next].x,
deltas_x.arrayZ[prev], deltas_x.arrayZ[next]);
(double) deltas_x.arrayZ[prev], (double) deltas_x.arrayZ[next]);
deltas_y.arrayZ[i] = infer_delta ((double) orig_points.arrayZ[i].y,
(double) orig_points.arrayZ[prev].y,
(double) orig_points.arrayZ[next].y,
deltas_y.arrayZ[prev], deltas_y.arrayZ[next]);
(double) deltas_y.arrayZ[prev], (double) deltas_y.arrayZ[next]);
inferred_idxes.add (i);
if (--unref_count == 0) goto no_more_gaps;
}
@ -692,7 +692,7 @@ struct tuple_delta_t
if (ref_count == count) return true;
hb_vector_t<double> opt_deltas_x, opt_deltas_y;
hb_vector_t<float> opt_deltas_x, opt_deltas_y;
bool is_comp_glyph_wo_deltas = (is_composite && ref_count == 0);
if (is_comp_glyph_wo_deltas)
{
@ -841,6 +841,7 @@ struct tuple_delta_t
{ return (i >= end) ? start : (i + 1); }
};
template <typename OffType = HBUINT16>
struct TupleVariationData
{
bool sanitize (hb_sanitize_context_t *c) const
@ -875,7 +876,7 @@ struct TupleVariationData
private:
/* referenced point set->compiled point data map */
hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<char>> point_data_map;
hb_hashmap_t<const hb_vector_t<bool>*, hb_vector_t<unsigned char>> point_data_map;
/* referenced point set-> count map, used in finding shared points */
hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
@ -883,7 +884,7 @@ struct TupleVariationData
* shared_points_bytes is a pointer to some value in the point_data_map,
* which will be freed during map destruction. Save it for serialization, so
* no need to do find_shared_points () again */
hb_vector_t<char> *shared_points_bytes = nullptr;
hb_vector_t<unsigned char> *shared_points_bytes = nullptr;
/* total compiled byte size as TupleVariationData format, initialized to 0 */
unsigned compiled_byte_size = 0;
@ -1244,7 +1245,7 @@ struct TupleVariationData
for (auto& tuple: tuple_vars)
{
const hb_vector_t<bool>* points_set = &(tuple.indices);
hb_vector_t<char> *points_data;
hb_vector_t<unsigned char> *points_data;
if (unlikely (!point_data_map.has (points_set, &points_data)))
return false;
@ -1289,20 +1290,20 @@ struct TupleVariationData
TRACE_SERIALIZE (this);
if (is_gvar && shared_points_bytes)
{
hb_bytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length);
hb_ubytes_t s (shared_points_bytes->arrayZ, shared_points_bytes->length);
s.copy (c);
}
for (const auto& tuple: tuple_vars)
{
const hb_vector_t<bool>* points_set = &(tuple.indices);
hb_vector_t<char> *point_data;
hb_vector_t<unsigned char> *point_data;
if (!point_data_map.has (points_set, &point_data))
return_trace (false);
if (!is_gvar || point_data != shared_points_bytes)
{
hb_bytes_t s (point_data->arrayZ, point_data->length);
hb_ubytes_t s (point_data->arrayZ, point_data->length);
s.copy (c);
}
@ -1521,15 +1522,16 @@ struct TupleVariationData
* low 12 bits are the number of tuple variation tables
* for this glyph. The number of tuple variation tables
* can be any number between 1 and 4095. */
Offset16To<HBUINT8>
OffsetTo<HBUINT8, OffType>
data; /* Offset from the start of the base table
* to the serialized data. */
/* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
public:
DEFINE_SIZE_MIN (4);
DEFINE_SIZE_MIN (2 + OffType::static_size);
};
using tuple_variations_t = TupleVariationData::tuple_variations_t;
// TODO: Move tuple_variations_t to outside of TupleVariationData
using tuple_variations_t = TupleVariationData<HBUINT16>::tuple_variations_t;
struct item_variations_t
{
using region_t = const hb_hashmap_t<hb_tag_t, Triple>*;

View File

@ -50,7 +50,7 @@ struct cvar
tupleVariationData.sanitize (c));
}
const TupleVariationData* get_tuple_var_data (void) const
const TupleVariationData<>* get_tuple_var_data (void) const
{ return &tupleVariationData; }
bool decompile_tuple_variations (unsigned axis_count,
@ -58,12 +58,12 @@ struct cvar
hb_blob_t *blob,
bool is_gvar,
const hb_map_t *axes_old_index_tag_map,
TupleVariationData::tuple_variations_t& tuple_variations /* OUT */) const
TupleVariationData<>::tuple_variations_t& tuple_variations /* OUT */) const
{
hb_vector_t<unsigned> shared_indices;
TupleVariationData::tuple_iterator_t iterator;
TupleVariationData<>::tuple_iterator_t iterator;
hb_bytes_t var_data_bytes = blob->as_bytes ().sub_array (4);
if (!TupleVariationData::get_tuple_iterator (var_data_bytes, axis_count, this,
if (!TupleVariationData<>::get_tuple_iterator (var_data_bytes, axis_count, this,
shared_indices, &iterator))
return false;
@ -77,16 +77,16 @@ struct cvar
static bool calculate_cvt_deltas (unsigned axis_count,
hb_array_t<int> coords,
unsigned num_cvt_item,
const TupleVariationData *tuple_var_data,
const TupleVariationData<> *tuple_var_data,
const void *base,
hb_vector_t<float>& cvt_deltas /* OUT */)
{
if (!coords) return true;
hb_vector_t<unsigned> shared_indices;
TupleVariationData::tuple_iterator_t iterator;
TupleVariationData<>::tuple_iterator_t iterator;
unsigned var_data_length = tuple_var_data->get_size (axis_count);
hb_bytes_t var_data_bytes = hb_bytes_t (reinterpret_cast<const char*> (tuple_var_data), var_data_length);
if (!TupleVariationData::get_tuple_iterator (var_data_bytes, axis_count, base,
if (!TupleVariationData<>::get_tuple_iterator (var_data_bytes, axis_count, base,
shared_indices, &iterator))
return true; /* isn't applied at all */
@ -107,14 +107,14 @@ struct cvar
bool has_private_points = iterator.current_tuple->has_private_points ();
if (has_private_points &&
!TupleVariationData::decompile_points (p, private_indices, end))
!TupleVariationData<>::decompile_points (p, private_indices, end))
return false;
const hb_vector_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices;
bool apply_to_all = (indices.length == 0);
unsigned num_deltas = apply_to_all ? num_cvt_item : indices.length;
if (unlikely (!unpacked_deltas.resize (num_deltas, false))) return false;
if (unlikely (!TupleVariationData::decompile_deltas (p, unpacked_deltas, end))) return false;
if (unlikely (!TupleVariationData<>::decompile_deltas (p, unpacked_deltas, end))) return false;
for (unsigned int i = 0; i < num_deltas; i++)
{
@ -129,7 +129,7 @@ struct cvar
}
bool serialize (hb_serialize_context_t *c,
TupleVariationData::tuple_variations_t& tuple_variations) const
TupleVariationData<>::tuple_variations_t& tuple_variations) const
{
TRACE_SERIALIZE (this);
if (!tuple_variations) return_trace (false);
@ -144,7 +144,7 @@ struct cvar
if (c->plan->all_axes_pinned)
return_trace (false);
OT::TupleVariationData::tuple_variations_t tuple_variations;
OT::TupleVariationData<>::tuple_variations_t tuple_variations;
unsigned axis_count = c->plan->axes_old_index_tag_map.get_population ();
const hb_tag_t cvt = HB_TAG('c','v','t',' ');
@ -169,7 +169,7 @@ struct cvar
}
static bool add_cvt_and_apply_deltas (hb_subset_plan_t *plan,
const TupleVariationData *tuple_var_data,
const TupleVariationData<> *tuple_var_data,
const void *base)
{
const hb_tag_t cvt = HB_TAG('c','v','t',' ');
@ -209,7 +209,7 @@ struct cvar
protected:
FixedVersion<>version; /* Version of the CVT variation table
* initially set to 0x00010000u */
TupleVariationData tupleVariationData; /* TupleVariationDate for cvar table */
TupleVariationData<> tupleVariationData; /* TupleVariationDate for cvar table */
public:
DEFINE_SIZE_MIN (8);
};

View File

@ -28,6 +28,7 @@
#ifndef HB_OT_VAR_GVAR_TABLE_HH
#define HB_OT_VAR_GVAR_TABLE_HH
#include "hb-decycler.hh"
#include "hb-open-type.hh"
#include "hb-ot-var-common.hh"
@ -36,15 +37,37 @@
* https://docs.microsoft.com/en-us/typography/opentype/spec/gvar
*/
#define HB_OT_TAG_gvar HB_TAG('g','v','a','r')
#define HB_OT_TAG_GVAR HB_TAG('G','V','A','R')
struct hb_glyf_scratch_t
{
// glyf
contour_point_vector_t all_points;
contour_point_vector_t comp_points;
hb_decycler_t decycler;
// gvar
contour_point_vector_t orig_points;
hb_vector_t<int> x_deltas;
hb_vector_t<int> y_deltas;
contour_point_vector_t deltas;
hb_vector_t<unsigned int> shared_indices;
hb_vector_t<unsigned int> private_indices;
// VARC
hb_vector_t<unsigned> axisIndices;
hb_vector_t<float> axisValues;
};
namespace OT {
struct GlyphVariationData : TupleVariationData
{};
template <typename OffsetType>
struct glyph_variations_t
{
using tuple_variations_t = TupleVariationData::tuple_variations_t;
// TODO: Move tuple_variations_t to outside of TupleVariationData
using tuple_variations_t = typename TupleVariationData<OffsetType>::tuple_variations_t;
using GlyphVariationData = TupleVariationData<OffsetType>;
hb_vector_t<tuple_variations_t> glyph_variations;
hb_vector_t<char> compiled_shared_tuples;
@ -86,10 +109,11 @@ struct glyph_variations_t
hb_bytes_t var_data = new_gid_var_data_map.get (new_gid);
const GlyphVariationData* p = reinterpret_cast<const GlyphVariationData*> (var_data.arrayZ);
hb_vector_t<unsigned> shared_indices;
GlyphVariationData::tuple_iterator_t iterator;
typename GlyphVariationData::tuple_iterator_t iterator;
tuple_variations_t tuple_vars;
hb_vector_t<unsigned> shared_indices;
/* in case variation data is empty, push an empty struct into the vector,
* keep the vector in sync with the new_to_old_gid_list */
if (!var_data || ! p->has_data () || !all_contour_points->length ||
@ -259,7 +283,7 @@ struct glyph_variations_t
hb_codepoint_t last_gid = 0;
unsigned idx = 0;
TupleVariationData* cur_glyph = c->start_embed<TupleVariationData> ();
GlyphVariationData* cur_glyph = c->start_embed<GlyphVariationData> ();
if (!cur_glyph) return_trace (false);
for (auto &_ : it)
{
@ -273,7 +297,7 @@ struct glyph_variations_t
if (idx >= glyph_variations.length) return_trace (false);
if (!cur_glyph->serialize (c, true, glyph_variations[idx])) return_trace (false);
TupleVariationData* next_glyph = c->start_embed<TupleVariationData> ();
GlyphVariationData* next_glyph = c->start_embed<GlyphVariationData> ();
glyph_offset += (char *) next_glyph - (char *) cur_glyph;
if (long_offset)
@ -296,9 +320,14 @@ struct glyph_variations_t
}
};
struct gvar
template <typename GidOffsetType, unsigned TableTag>
struct gvar_GVAR
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_gvar;
static constexpr hb_tag_t tableTag = TableTag;
using GlyphVariationData = TupleVariationData<GidOffsetType>;
bool has_data () const { return version.to_int () != 0; }
bool sanitize_shallow (hb_sanitize_context_t *c) const
{
@ -317,7 +346,7 @@ struct gvar
{ return sanitize_shallow (c); }
bool decompile_glyph_variations (hb_subset_context_t *c,
glyph_variations_t& glyph_vars /* OUT */) const
glyph_variations_t<GidOffsetType>& glyph_vars /* OUT */) const
{
hb_hashmap_t<hb_codepoint_t, hb_bytes_t> new_gid_var_data_map;
auto it = hb_iter (c->plan->new_to_old_gid_list);
@ -344,14 +373,14 @@ struct gvar
template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
const glyph_variations_t& glyph_vars,
const glyph_variations_t<GidOffsetType>& glyph_vars,
Iterator it,
unsigned axis_count,
unsigned num_glyphs,
bool force_long_offsets) const
{
TRACE_SERIALIZE (this);
gvar *out = c->allocate_min<gvar> ();
gvar_GVAR *out = c->allocate_min<gvar_GVAR> ();
if (unlikely (!out)) return_trace (false);
out->version.major = 1;
@ -393,7 +422,7 @@ struct gvar
bool instantiate (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
glyph_variations_t glyph_vars;
glyph_variations_t<GidOffsetType> glyph_vars;
if (!decompile_glyph_variations (c, glyph_vars))
return_trace (false);
@ -423,7 +452,7 @@ struct gvar
unsigned glyph_count = version.to_int () ? c->plan->source->get_num_glyphs () : 0;
gvar *out = c->serializer->allocate_min<gvar> ();
gvar_GVAR *out = c->serializer->allocate_min<gvar_GVAR> ();
if (unlikely (!out)) return_trace (false);
out->version.major = 1;
@ -557,9 +586,11 @@ struct gvar
public:
struct accelerator_t
{
bool has_data () const { return table->has_data (); }
accelerator_t (hb_face_t *face)
{
table = hb_sanitize_context_t ().reference_table<gvar> (face);
table = hb_sanitize_context_t ().reference_table<gvar_GVAR> (face);
/* If sanitize failed, set glyphCount to 0. */
glyphCount = table->version.to_int () ? face->get_num_glyphs () : 0;
@ -627,35 +658,40 @@ struct gvar
bool apply_deltas_to_points (hb_codepoint_t glyph,
hb_array_t<const int> coords,
const hb_array_t<contour_point_t> points,
hb_glyf_scratch_t &scratch,
bool phantom_only = false) const
{
if (unlikely (glyph >= glyphCount)) return true;
hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyphCount, glyph);
if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true;
hb_vector_t<unsigned int> shared_indices;
GlyphVariationData::tuple_iterator_t iterator;
auto &shared_indices = scratch.shared_indices;
shared_indices.clear ();
typename GlyphVariationData::tuple_iterator_t iterator;
if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount,
var_data_bytes.arrayZ,
shared_indices, &iterator))
return true; /* so isn't applied at all */
/* Save original points for inferred delta calculation */
contour_point_vector_t orig_points_vec; // Populated lazily
auto &orig_points_vec = scratch.orig_points;
orig_points_vec.clear (); // Populated lazily
auto orig_points = orig_points_vec.as_array ();
/* flag is used to indicate referenced point */
contour_point_vector_t deltas_vec; // Populated lazily
auto &deltas_vec = scratch.deltas;
deltas_vec.clear (); // Populated lazily
auto deltas = deltas_vec.as_array ();
hb_vector_t<unsigned> end_points; // Populated lazily
unsigned num_coords = table->axisCount;
hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * num_coords);
hb_vector_t<unsigned int> private_indices;
hb_vector_t<int> x_deltas;
hb_vector_t<int> y_deltas;
auto &private_indices = scratch.private_indices;
auto &x_deltas = scratch.x_deltas;
auto &y_deltas = scratch.y_deltas;
unsigned count = points.length;
bool flush = false;
do
@ -726,8 +762,8 @@ struct gvar
if (phantom_only && pt_index < count - 4) continue;
auto &delta = deltas.arrayZ[pt_index];
delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
delta.x += x_deltas.arrayZ[i] * scalar;
delta.y += y_deltas.arrayZ[i] * scalar;
delta.add_delta (x_deltas.arrayZ[i] * scalar,
y_deltas.arrayZ[i] * scalar);
}
}
else
@ -738,10 +774,9 @@ struct gvar
if (apply_to_all)
for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++)
{
unsigned int pt_index = i;
auto &delta = deltas.arrayZ[pt_index];
delta.x += x_deltas.arrayZ[i] * scalar;
delta.y += y_deltas.arrayZ[i] * scalar;
auto &delta = deltas.arrayZ[i];
delta.add_delta (x_deltas.arrayZ[i] * scalar,
y_deltas.arrayZ[i] * scalar);
}
else
for (unsigned int i = 0; i < num_deltas; i++)
@ -751,8 +786,8 @@ struct gvar
if (phantom_only && pt_index < count - 4) continue;
auto &delta = deltas.arrayZ[pt_index];
delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
delta.x += x_deltas.arrayZ[i] * scalar;
delta.y += y_deltas.arrayZ[i] * scalar;
delta.add_delta (x_deltas.arrayZ[i] * scalar,
y_deltas.arrayZ[i] * scalar);
}
}
else
@ -760,10 +795,9 @@ struct gvar
if (apply_to_all)
for (unsigned int i = phantom_only ? count - 4 : 0; i < count; i++)
{
unsigned int pt_index = i;
auto &delta = deltas.arrayZ[pt_index];
delta.x += x_deltas.arrayZ[i];
delta.y += y_deltas.arrayZ[i];
auto &delta = deltas.arrayZ[i];
delta.add_delta (x_deltas.arrayZ[i],
y_deltas.arrayZ[i]);
}
else
for (unsigned int i = 0; i < num_deltas; i++)
@ -773,8 +807,8 @@ struct gvar
if (phantom_only && pt_index < count - 4) continue;
auto &delta = deltas.arrayZ[pt_index];
delta.flag = 1; /* this point is referenced, i.e., explicit deltas specified */
delta.x += x_deltas.arrayZ[i];
delta.y += y_deltas.arrayZ[i];
delta.add_delta (x_deltas.arrayZ[i],
y_deltas.arrayZ[i]);
}
}
}
@ -782,17 +816,14 @@ struct gvar
/* infer deltas for unreferenced points */
if (!apply_to_all && !phantom_only)
{
if (!end_points)
{
for (unsigned i = 0; i < count; ++i)
if (points.arrayZ[i].is_end_point)
end_points.push (i);
if (unlikely (end_points.in_error ())) return false;
}
unsigned start_point = 0;
for (unsigned end_point : end_points)
unsigned end_point = 0;
while (true)
{
while (end_point < count && !points.arrayZ[end_point].is_end_point)
end_point++;
if (unlikely (end_point == count)) break;
/* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */
unsigned unref_count = 0;
for (unsigned i = start_point; i < end_point + 1; i++)
@ -835,7 +866,7 @@ struct gvar
}
}
no_more_gaps:
start_point = end_point + 1;
start_point = end_point = end_point + 1;
}
}
@ -855,7 +886,7 @@ struct gvar
unsigned int get_axis_count () const { return table->axisCount; }
private:
hb_blob_ptr_t<gvar> table;
hb_blob_ptr_t<gvar_GVAR> table;
unsigned glyphCount;
hb_vector_t<hb_pair_t<int, int>> shared_tuple_active_idx;
};
@ -873,7 +904,7 @@ struct gvar
NNOffset32To<UnsizedArrayOf<F2DOT14>>
sharedTuples; /* Offset from the start of this table to the shared tuple records.
* Array of tuple records shared across all glyph variation data tables. */
HBUINT16 glyphCountX; /* The number of glyphs in this font. This must match the number of
GidOffsetType glyphCountX; /* The number of glyphs in this font. This must match the number of
* glyphs stored elsewhere in the font. */
HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows.
* If bit 0 is clear, the offsets are uint16; if bit 0 is set, the
@ -888,9 +919,15 @@ struct gvar
DEFINE_SIZE_ARRAY (20, offsetZ);
};
using gvar = gvar_GVAR<HBUINT16, HB_OT_TAG_gvar>;
using GVAR = gvar_GVAR<HBUINT24, HB_OT_TAG_GVAR>;
struct gvar_accelerator_t : gvar::accelerator_t {
gvar_accelerator_t (hb_face_t *face) : gvar::accelerator_t (face) {}
};
struct GVAR_accelerator_t : GVAR::accelerator_t {
GVAR_accelerator_t (hb_face_t *face) : GVAR::accelerator_t (face) {}
};
} /* namespace OT */

View File

@ -95,7 +95,6 @@ struct contour_point_t
HB_ALWAYS_INLINE
void translate (const contour_point_t &p) { x += p.x; y += p.y; }
float x;
float y;
uint8_t flag;
@ -104,19 +103,9 @@ struct contour_point_t
struct contour_point_vector_t : hb_vector_t<contour_point_t>
{
void extend (const hb_array_t<contour_point_t> &a)
{
unsigned int old_len = length;
if (unlikely (!resize (old_len + a.length, false)))
return;
auto arrayZ = this->arrayZ + old_len;
unsigned count = a.length;
hb_memcpy (arrayZ, a.arrayZ, count * sizeof (arrayZ[0]));
}
bool add_deltas (const hb_vector_t<float> deltas_x,
const hb_vector_t<float> deltas_y,
const hb_vector_t<bool> indices)
bool add_deltas (hb_array_t<const float> deltas_x,
hb_array_t<const float> deltas_y,
hb_array_t<const bool> indices)
{
if (indices.length != deltas_x.length ||
indices.length != deltas_y.length)

View File

@ -53,12 +53,9 @@ struct hb_vector_t
}
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
hb_vector_t (const Iterable &o) : hb_vector_t ()
explicit hb_vector_t (const Iterable &o) : hb_vector_t ()
{
auto iter = hb_iter (o);
if (iter.is_random_access_iterator || iter.has_fast_len)
alloc (hb_len (iter), true);
hb_copy (iter, *this);
extend (o);
}
hb_vector_t (const hb_vector_t &o) : hb_vector_t ()
{
@ -87,6 +84,35 @@ struct hb_vector_t
}
~hb_vector_t () { fini (); }
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
void extend (const Iterable &o)
{
auto iter = hb_iter (o);
if (iter.is_random_access_iterator || iter.has_fast_len)
alloc (hb_len (iter), true);
while (iter)
{
if (unlikely (!alloc (length + 1)))
return;
unsigned room = allocated - length;
for (unsigned i = 0; i < room && iter; i++)
push_has_room (*iter++);
}
}
void extend (array_t o)
{
alloc (length + o.length);
if (unlikely (in_error ())) return;
copy_array (o);
}
void extend (c_array_t o)
{
alloc (length + o.length);
if (unlikely (in_error ())) return;
copy_array (o);
}
public:
int allocated = 0; /* < 0 means allocation failed. */
unsigned int length = 0;
@ -135,6 +161,7 @@ struct hb_vector_t
alloc_exact (o.length);
if (unlikely (in_error ())) return *this;
length = 0;
copy_array (o.as_array ());
return *this;
@ -218,6 +245,10 @@ struct hb_vector_t
// reference to it.
return std::addressof (Crap (Type));
return push_has_room (std::forward<Args> (args)...);
}
template <typename... Args> Type *push_has_room (Args&&... args)
{
/* Emplace. */
Type *p = std::addressof (arrayZ[length++]);
return new (p) Type (std::forward<Args> (args)...);
@ -311,18 +342,23 @@ struct hb_vector_t
length = size;
}
template <typename T = Type,
hb_enable_if (hb_is_trivially_copyable (T))>
void
copy_array (hb_array_t<Type> other)
{
assert ((int) (length + other.length) <= allocated);
hb_memcpy ((void *) (arrayZ + length), (const void *) other.arrayZ, other.length * item_size);
length += other.length;
}
template <typename T = Type,
hb_enable_if (hb_is_trivially_copyable (T))>
void
copy_array (hb_array_t<const Type> other)
{
length = other.length;
if (!HB_OPTIMIZE_SIZE_VAL && sizeof (T) >= sizeof (long long))
/* This runs faster because of alignment. */
for (unsigned i = 0; i < length; i++)
arrayZ[i] = other.arrayZ[i];
else
hb_memcpy ((void *) arrayZ, (const void *) other.arrayZ, length * item_size);
assert ((int) (length + other.length) <= allocated);
hb_memcpy ((void *) (arrayZ + length), (const void *) other.arrayZ, other.length * item_size);
length += other.length;
}
template <typename T = Type,
hb_enable_if (!hb_is_trivially_copyable (T) &&
@ -330,12 +366,10 @@ struct hb_vector_t
void
copy_array (hb_array_t<const Type> other)
{
length = 0;
while (length < other.length)
{
length++;
new (std::addressof (arrayZ[length - 1])) Type (other.arrayZ[length - 1]);
}
assert ((int) (length + other.length) <= allocated);
for (unsigned i = 0; i < other.length; i++)
new (std::addressof (arrayZ[length + i])) Type (other.arrayZ[i]);
length += other.length;
}
template <typename T = Type,
hb_enable_if (!hb_is_trivially_copyable (T) &&
@ -345,13 +379,13 @@ struct hb_vector_t
void
copy_array (hb_array_t<const Type> other)
{
length = 0;
while (length < other.length)
assert ((int) (length + other.length) <= allocated);
for (unsigned i = 0; i < other.length; i++)
{
length++;
new (std::addressof (arrayZ[length - 1])) Type ();
arrayZ[length - 1] = other.arrayZ[length - 1];
new (std::addressof (arrayZ[length + i])) Type ();
arrayZ[length + i] = other.arrayZ[i];
}
length += other.length;
}
void
@ -437,6 +471,11 @@ struct hb_vector_t
return alloc (size, true);
}
void clear ()
{
resize (0);
}
bool resize (int size_, bool initialize = true, bool exact = false)
{
unsigned int size = size_ < 0 ? 0u : (unsigned int) size_;

View File

@ -47,7 +47,7 @@ HB_BEGIN_DECLS
*
* The minor component of the library version available at compile-time.
*/
#define HB_VERSION_MINOR 3
#define HB_VERSION_MINOR 4
/**
* HB_VERSION_MICRO:
*
@ -60,7 +60,7 @@ HB_BEGIN_DECLS
*
* A string literal containing the library version available at compile-time.
*/
#define HB_VERSION_STRING "10.3.0"
#define HB_VERSION_STRING "10.4.0"
/**
* HB_VERSION_ATLEAST: