Spaces:
Running
ggml : move rope type enum to ggml.h (llama/8949)
Browse files* ggml : move rope type enum to ggml.h
This commit moves the `llama_rope_type` enum from `llama.h` to
`ggml.h` and changes its name to `ggml_rope_type`.
The motivation for this change is to address the TODO in `llama.h` and
use the enum in ggml.
Note: This commit does not change the `mode` parameter to be of type
`enum ggml_rope_type`. The name `mode` and its usage suggest that it
might be more generic and possibly used as a bit field for multiple
flags. Further investigation/discussion may be needed to determine
if `mode` should be restricted to RoPE types.
* squash! ggml : move rope type enum to ggml.h
This commit removes GGML_ROPE_TYPE_NONE and GGML_ROPE_TYPE_GLM from
ggml.h, and back the llama_rope_type enum.
I've kept the assert for GGML_ROPE_TYPE_GLM as I'm not sure if it is
safe to remove it yet.
* squash! ggml : move rope type enum to ggml.h
This commit removes the enum ggml_rope_type from ggml.h and replaces it
with a define (GGML_ROPE_TYPE_NEOX). This define is used in the code to
check if the mode is set to GPT-NeoX. Also the enum llama_rope_type has
been updated to reflect this change.
* squash! ggml : move rope type enum to ggml.h
This commit contains a suggestion enable the GGML_ROPE_TYPE_NEOX
macro/define to be passed to the shader compiler.
* squash! ggml : move rope type enum to ggml.h
This commit fixes the editorconfig-checker warnings.
* squash! ggml : move rope type enum to ggml.h
Update comment for ggml_rope function.
* Revert "squash! ggml : move rope type enum to ggml.h"
This reverts commit 6261222bd0dc0efd51f0fb0435ad3f16a5b52fd6.
* squash! ggml : move rope type enum to ggml.h
Add GGML_ROPE_TYPE_NEOX to rope_common.comp.
* remove extra line
---------
Co-authored-by: slaren <[email protected]>
- ggml/include/ggml.h +4 -2
- ggml/src/ggml-cann/aclnn_ops.cpp +1 -1
- ggml/src/ggml-cuda/rope.cu +1 -1
- ggml/src/ggml-metal.m +1 -1
- ggml/src/ggml-sycl/rope.cpp +1 -1
- ggml/src/ggml-vulkan.cpp +1 -1
- ggml/src/ggml.c +2 -2
|
@@ -244,6 +244,8 @@
|
|
| 244 |
#define GGML_EXIT_SUCCESS 0
|
| 245 |
#define GGML_EXIT_ABORTED 1
|
| 246 |
|
|
|
|
|
|
|
| 247 |
#define GGUF_MAGIC "GGUF"
|
| 248 |
|
| 249 |
#define GGUF_VERSION 3
|
|
@@ -1473,8 +1475,8 @@ extern "C" {
|
|
| 1473 |
struct ggml_tensor * b);
|
| 1474 |
|
| 1475 |
// rotary position embedding
|
| 1476 |
-
// if mode & 1
|
| 1477 |
-
// if mode &
|
| 1478 |
//
|
| 1479 |
// b is an int32 vector with size a->ne[2], it contains the positions
|
| 1480 |
GGML_API struct ggml_tensor * ggml_rope(
|
|
|
|
| 244 |
#define GGML_EXIT_SUCCESS 0
|
| 245 |
#define GGML_EXIT_ABORTED 1
|
| 246 |
|
| 247 |
+
#define GGML_ROPE_TYPE_NEOX 2
|
| 248 |
+
|
| 249 |
#define GGUF_MAGIC "GGUF"
|
| 250 |
|
| 251 |
#define GGUF_VERSION 3
|
|
|
|
| 1475 |
struct ggml_tensor * b);
|
| 1476 |
|
| 1477 |
// rotary position embedding
|
| 1478 |
+
// if (mode & 1) - skip n_past elements (NOT SUPPORTED)
|
| 1479 |
+
// if (mode & GGML_ROPE_TYPE_NEOX) - GPT-NeoX style
|
| 1480 |
//
|
| 1481 |
// b is an int32 vector with size a->ne[2], it contains the positions
|
| 1482 |
GGML_API struct ggml_tensor * ggml_rope(
|
|
@@ -2881,7 +2881,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) {
|
|
| 2881 |
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast,
|
| 2882 |
beta_slow, corr_dims);
|
| 2883 |
|
| 2884 |
-
const bool is_neox = mode &
|
| 2885 |
|
| 2886 |
// init cos/sin cache
|
| 2887 |
ggml_cann_pool_alloc sin_allocator(
|
|
|
|
| 2881 |
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast,
|
| 2882 |
beta_slow, corr_dims);
|
| 2883 |
|
| 2884 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 2885 |
|
| 2886 |
// init cos/sin cache
|
| 2887 |
ggml_cann_pool_alloc sin_allocator(
|
|
@@ -226,7 +226,7 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
| 226 |
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
|
| 227 |
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
|
| 228 |
|
| 229 |
-
const bool is_neox = mode &
|
| 230 |
|
| 231 |
const int32_t * pos = (const int32_t *) src1_d;
|
| 232 |
|
|
|
|
| 226 |
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
|
| 227 |
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
|
| 228 |
|
| 229 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 230 |
|
| 231 |
const int32_t * pos = (const int32_t *) src1_d;
|
| 232 |
|
|
@@ -2373,7 +2373,7 @@ static enum ggml_status ggml_metal_graph_compute(
|
|
| 2373 |
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
|
| 2374 |
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
|
| 2375 |
|
| 2376 |
-
const bool is_neox = mode &
|
| 2377 |
|
| 2378 |
id<MTLComputePipelineState> pipeline = nil;
|
| 2379 |
|
|
|
|
| 2373 |
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
|
| 2374 |
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
|
| 2375 |
|
| 2376 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 2377 |
|
| 2378 |
id<MTLComputePipelineState> pipeline = nil;
|
| 2379 |
|
|
@@ -226,7 +226,7 @@ void ggml_sycl_op_rope(
|
|
| 226 |
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
|
| 227 |
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
|
| 228 |
|
| 229 |
-
const bool is_neox = mode &
|
| 230 |
|
| 231 |
const int32_t * pos = (const int32_t *) src1_dd;
|
| 232 |
|
|
|
|
| 226 |
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
|
| 227 |
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
|
| 228 |
|
| 229 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 230 |
|
| 231 |
const int32_t * pos = (const int32_t *) src1_dd;
|
| 232 |
|
|
@@ -4067,7 +4067,7 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
|
|
| 4067 |
case GGML_OP_ROPE:
|
| 4068 |
{
|
| 4069 |
const int mode = ((const int32_t *) dst->op_params)[2];
|
| 4070 |
-
const bool is_neox = mode &
|
| 4071 |
|
| 4072 |
if (is_neox) {
|
| 4073 |
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
|
|
|
|
| 4067 |
case GGML_OP_ROPE:
|
| 4068 |
{
|
| 4069 |
const int mode = ((const int32_t *) dst->op_params)[2];
|
| 4070 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 4071 |
|
| 4072 |
if (is_neox) {
|
| 4073 |
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
|
|
@@ -14353,7 +14353,7 @@ static void ggml_compute_forward_rope_f32(
|
|
| 14353 |
float corr_dims[2];
|
| 14354 |
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
|
| 14355 |
|
| 14356 |
-
const bool is_neox = mode &
|
| 14357 |
|
| 14358 |
const float * freq_factors = NULL;
|
| 14359 |
if (src2 != NULL) {
|
|
@@ -14478,7 +14478,7 @@ static void ggml_compute_forward_rope_f16(
|
|
| 14478 |
float corr_dims[2];
|
| 14479 |
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
|
| 14480 |
|
| 14481 |
-
const bool is_neox = mode &
|
| 14482 |
|
| 14483 |
const float * freq_factors = NULL;
|
| 14484 |
if (src2 != NULL) {
|
|
|
|
| 14353 |
float corr_dims[2];
|
| 14354 |
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
|
| 14355 |
|
| 14356 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 14357 |
|
| 14358 |
const float * freq_factors = NULL;
|
| 14359 |
if (src2 != NULL) {
|
|
|
|
| 14478 |
float corr_dims[2];
|
| 14479 |
ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
|
| 14480 |
|
| 14481 |
+
const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
|
| 14482 |
|
| 14483 |
const float * freq_factors = NULL;
|
| 14484 |
if (src2 != NULL) {
|