-
Notifications
You must be signed in to change notification settings - Fork 13.9k
ggml-hexagon: fix rope failure at test-backend-ops
#17565
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
chraac
wants to merge
11
commits into
ggml-org:master
Choose a base branch
from
chraac:dev-fix-rope
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
+47
−47
Open
Changes from 7 commits
Commits
Show all changes
11 commits
Select commit
Hold shift + click to select a range
407b408
fix test failure
chraac 4ddb8a4
fix: correct scaling calculations in rope_cache_init
chraac cfca78b
wip
chraac e9a02fd
wip
chraac e324bb0
fix: optimize element copying in rope_hex_f32 using memcpy
chraac 0121291
fix: optimize loop boundaries in rope_hex_f32 for better performance
chraac 010039a
rename
chraac a6ef41f
wip
chraac 0376146
Merge branch 'master' into dev-fix-rope
chraac 8abecfa
Merge tag 'b7207' into dev-fix-rope
chraac b567413
feat: add profiling macros for performance measurement in operations
chraac File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -73,15 +73,15 @@ static float rope_yarn_ramp(const float low, const float high, const int i0) { | |
| return (1 - MIN(1, MAX(0, y))); | ||
| } | ||
|
|
||
| static void rope_cache_init(const float theta_base, | ||
| float freq_scale, | ||
| const float * freq_factors, | ||
| float * corr_dims, | ||
| uint32_t ne0, | ||
| float ext_factor, | ||
| float mscale, | ||
| float * cache, | ||
| float theta_scale) { | ||
| static void rope_cache_init(const float theta_base, | ||
| const float freq_scale, | ||
| const float * freq_factors, | ||
| float * corr_dims, | ||
| const uint32_t ne0, | ||
| const float ext_factor, | ||
| const float mscale, | ||
| float * cache, | ||
| const float theta_scale) { | ||
| // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py | ||
| float theta = theta_base; | ||
|
|
||
|
|
@@ -92,18 +92,19 @@ static void rope_cache_init(const float theta_base, | |
|
|
||
| // Get n-d rotational scaling corrected for extrapolation | ||
| float theta_interp = freq_scale * theta_extrap; | ||
| float theta2 = theta_interp; | ||
| float theta_final = theta_interp; | ||
| float mscale_final = mscale; | ||
|
|
||
| if (ext_factor != 0.0f) { | ||
| float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; | ||
| theta2 = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; | ||
| theta_final = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; | ||
|
|
||
| // Get n-d magnitude scaling corrected for interpolation | ||
| mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); | ||
| mscale_final *= 1.0f + 0.1f * logf(1.0f / freq_scale); | ||
| } | ||
|
|
||
| cache[i0 + 0] = cosf(theta2) * mscale; | ||
| cache[i0 + 1] = sinf(theta2) * mscale; | ||
| cache[i0 + 0] = cosf(theta_final) * mscale_final; | ||
| cache[i0 + 1] = sinf(theta_final) * mscale_final; | ||
|
|
||
| theta *= theta_scale; | ||
| } | ||
|
|
@@ -151,9 +152,9 @@ static void init_rope_ctx(struct rope_th_ctx * rope_ctx, struct htp_ops_context | |
| } | ||
|
|
||
| static void hvx_calc_rope_neox_f32(const float * restrict src0, | ||
| float * restrict dst, | ||
| const int num_elems, | ||
| const float * restrict theta_cache) { | ||
| float * restrict dst, | ||
| const int num_elems, | ||
| const float * restrict theta_cache) { | ||
| // for (int i = 0; i < num_elems; i += 2) { | ||
| //const float cos_theta = theta_cache[i + 0]; | ||
| //const float sin_theta = theta_cache[i + 1]; | ||
|
|
@@ -192,7 +193,7 @@ static void hvx_calc_rope_neox_f32(const float * restrict src0, | |
| HVX_Vector v4 = Q6_Vqf32_vsub_Vqf32Vqf32(vx0_c, vx1_s); | ||
| HVX_Vector v5 = Q6_Vqf32_vadd_Vqf32Vqf32(vx0_s, vx1_c); | ||
|
|
||
| *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v4); | ||
| *(HVX_Vector *) dst_curr = Q6_Vsf_equals_Vqf32(v4); | ||
| *(HVX_Vector *) (dst_curr + half_size) = Q6_Vsf_equals_Vqf32(v5); | ||
|
|
||
| src0_curr += VLEN; | ||
|
|
@@ -259,16 +260,16 @@ static void rope_hex_f32(struct rope_th_ctx * rope_ctx, | |
| const uint32_t ir1, | ||
| int nth, | ||
| int ith, | ||
| int opt_path) { | ||
| const int opt_path) { | ||
| struct htp_ops_context * octx = rope_ctx->octx; | ||
|
|
||
| const struct htp_tensor * src0 = &octx->src0; | ||
| const struct htp_tensor * src1 = &octx->src1; | ||
| const struct htp_tensor * src2 = &octx->src2; | ||
| struct htp_tensor * dst = &octx->dst; | ||
|
|
||
| const int32_t mode = rope_ctx->mode; | ||
| const bool is_neox = mode & HTP_ROPE_TYPE_NEOX; | ||
| const int32_t mode = rope_ctx->mode; | ||
| const bool is_neox = mode & HTP_ROPE_TYPE_NEOX; | ||
|
|
||
| htp_rope_preamble; | ||
|
|
||
|
|
@@ -281,23 +282,16 @@ static void rope_hex_f32(struct rope_th_ctx * rope_ctx, | |
| freq_factors = (const float *) src2->data; | ||
| } | ||
|
|
||
| int ir = 0; | ||
|
|
||
| const uint32_t i1_end = MIN(ir1, ne1); | ||
| const int32_t half_dims = rope_ctx->n_dims / 2; | ||
| for (uint32_t i3 = 0; i3 < ne3; i3++) { // batch | ||
| for (uint32_t i2 = 0; i2 < ne2; i2++) { // seq-len | ||
| const int32_t p = pos[i2]; | ||
|
|
||
| rope_cache_init(p, rope_ctx->freq_scale, freq_factors, rope_ctx->corr_dims, ne0, rope_ctx->ext_factor, | ||
| rope_ctx->attn_factor, wp0, rope_ctx->theta_scale); | ||
|
|
||
| for (uint32_t i1 = 0; i1 < ne1; i1++) { // attn-heads | ||
| if (ir++ < ir0) { | ||
| continue; | ||
| } | ||
| if (ir > ir1) { | ||
| break; | ||
| } | ||
|
|
||
| for (uint32_t i1 = ir0; i1 < i1_end; i1++) { // attn-heads | ||
| const float * src = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); | ||
| float * dst_data = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); | ||
|
|
||
|
|
@@ -310,17 +304,20 @@ static void rope_hex_f32(struct rope_th_ctx * rope_ctx, | |
| } else { | ||
| hvx_calc_rope_f32(src_loc, dst_data_loc, rope_ctx->n_dims, wp0); | ||
| } | ||
|
|
||
| src_loc += rope_ctx->n_dims; | ||
| dst_data_loc += rope_ctx->n_dims; | ||
| } else { | ||
| for (uint32_t i0 = 0; i0 < rope_ctx->n_dims; i0 += 2) { | ||
| const float cos_theta = wp0[i0 + 0]; | ||
| const float sin_theta = wp0[i0 + 1]; | ||
|
|
||
| if (is_neox) { | ||
| const float x0 = src_loc[0]; | ||
| const float x1 = src_loc[rope_ctx->n_dims/2]; | ||
| const float x1 = src_loc[half_dims]; | ||
|
|
||
| dst_data_loc[0] = x0 * cos_theta - x1 * sin_theta; | ||
| dst_data_loc[rope_ctx->n_dims/2] = x0 * sin_theta + x1 * cos_theta; | ||
| dst_data_loc[0] = x0 * cos_theta - x1 * sin_theta; | ||
| dst_data_loc[half_dims] = x0 * sin_theta + x1 * cos_theta; | ||
|
|
||
| src_loc += 1; | ||
| dst_data_loc += 1; | ||
|
|
@@ -335,15 +332,13 @@ static void rope_hex_f32(struct rope_th_ctx * rope_ctx, | |
| dst_data_loc += 2; | ||
| } | ||
| } | ||
| } | ||
|
|
||
| for (uint32_t i0 = rope_ctx->n_dims; i0 < ne0; i0 += 2) { | ||
| dst_data_loc[0] = src_loc[0]; | ||
| dst_data_loc[1] = src_loc[1]; | ||
|
|
||
| src_loc += 2; | ||
| dst_data_loc += 2; | ||
| src_loc += (is_neox ? half_dims : 0); | ||
| dst_data_loc += (is_neox ? half_dims : 0); | ||
| } | ||
|
|
||
| // TODO: use simd to speed up the remaining elements copy | ||
| memcpy(dst_data_loc, src_loc, (ne0 - rope_ctx->n_dims) * sizeof(float)); | ||
|
||
| } | ||
| } | ||
| } | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Those two inner
ifstatements can be merged into theforloop's condition.