ggerganov commited on
Commit
d4f82d5
·
1 Parent(s): 67c5387

metal : fix indent (ggml/0)

Browse files
Files changed (1) hide show
  1. ggml-metal.m +14 -14
ggml-metal.m CHANGED
@@ -1195,24 +1195,24 @@ static enum ggml_status ggml_metal_graph_compute(
1195
  [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
1196
  } break;
1197
  case GGML_OP_CLAMP:
1198
- {
1199
- id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
1200
 
1201
- float min;
1202
- float max;
1203
- memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
1204
- memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
1205
 
1206
- [encoder setComputePipelineState:pipeline];
1207
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
1208
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
1209
- [encoder setBytes:&min length:sizeof(min) atIndex:2];
1210
- [encoder setBytes:&max length:sizeof(max) atIndex:3];
1211
 
1212
- const int64_t n = ggml_nelements(dst);
1213
 
1214
- [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
1215
- } break;
1216
  case GGML_OP_UNARY:
1217
  switch (ggml_get_unary_op(gf->nodes[i])) {
1218
  // we are not taking into account the strides, so for now require contiguous tensors
 
1195
  [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
1196
  } break;
1197
  case GGML_OP_CLAMP:
1198
+ {
1199
+ id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
1200
 
1201
+ float min;
1202
+ float max;
1203
+ memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
1204
+ memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
1205
 
1206
+ [encoder setComputePipelineState:pipeline];
1207
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
1208
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
1209
+ [encoder setBytes:&min length:sizeof(min) atIndex:2];
1210
+ [encoder setBytes:&max length:sizeof(max) atIndex:3];
1211
 
1212
+ const int64_t n = ggml_nelements(dst);
1213
 
1214
+ [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
1215
+ } break;
1216
  case GGML_OP_UNARY:
1217
  switch (ggml_get_unary_op(gf->nodes[i])) {
1218
  // we are not taking into account the strides, so for now require contiguous tensors