jeffbolznv commited on
Commit
6df82f4
·
1 Parent(s): e6a209e

vulkan: Use coopmat2 for conv2d (llama/14982)

Browse files
ggml/src/ggml-vulkan/ggml-vulkan.cpp CHANGED
@@ -3096,6 +3096,10 @@ static void ggml_vk_load_shaders(vk_device& device) {
3096
  uint32_t conv2d_SHMEM_PAD = 4;
3097
  bool conv2d_UNROLL = true;
3098
 
 
 
 
 
3099
  if (device->vendor_id == VK_VENDOR_ID_INTEL) {
3100
  conv2d_SHMEM_PAD = 0;
3101
  conv2d_UNROLL = false;
@@ -3154,7 +3158,14 @@ static void ggml_vk_load_shaders(vk_device& device) {
3154
  std::array<uint32_t, 3> wg_denoms = { conv2d_BS_K, conv2d_BS_NPQ, 1 };
3155
  std::vector<uint32_t> spec_constants = { conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives, conv2d_SHMEM_PAD };
3156
 
3157
- if (conv2d_UNROLL) {
 
 
 
 
 
 
 
3158
  ggml_vk_create_pipeline(
3159
  device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_unroll_len, conv2d_f32_unroll_data, "main", 3,
3160
  sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
 
3096
  uint32_t conv2d_SHMEM_PAD = 4;
3097
  bool conv2d_UNROLL = true;
3098
 
3099
+ if (device->coopmat2) {
3100
+ conv2d_SHMEM_PAD = 8; // 8 float16_t
3101
+ }
3102
+
3103
  if (device->vendor_id == VK_VENDOR_ID_INTEL) {
3104
  conv2d_SHMEM_PAD = 0;
3105
  conv2d_UNROLL = false;
 
3158
  std::array<uint32_t, 3> wg_denoms = { conv2d_BS_K, conv2d_BS_NPQ, 1 };
3159
  std::vector<uint32_t> spec_constants = { conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives, conv2d_SHMEM_PAD };
3160
 
3161
+ if (device->coopmat2) {
3162
+ ggml_vk_create_pipeline(
3163
+ device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_cm2_len, conv2d_f32_cm2_data, "main", 3,
3164
+ sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
3165
+ ggml_vk_create_pipeline(
3166
+ device, device->pipeline_conv2d_f16_f32[s], "conv2d_f16_f32", conv2d_f16_f32_cm2_len, conv2d_f16_f32_cm2_data, "main", 3,
3167
+ sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
3168
+ } else if (conv2d_UNROLL) {
3169
  ggml_vk_create_pipeline(
3170
  device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_unroll_len, conv2d_f32_unroll_data, "main", 3,
3171
  sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp CHANGED
@@ -1,6 +1,11 @@
1
  #version 450
2
 
3
  #extension GL_EXT_control_flow_attributes : enable
 
 
 
 
 
4
 
5
  #ifdef USE_COLLECTIVES
6
  # extension GL_KHR_shader_subgroup_shuffle : enable
@@ -91,6 +96,12 @@ uint32_t n_elems_out = K * NPQ;
91
  // Number of blocktiles per input
92
  uint32_t NB_CRS = splitWork(CRS, BS_CRS);
93
 
 
 
 
 
 
 
94
  const uint32_t Ash_stride = BS_CRS + SHMEM_PAD;
95
  const uint32_t Bsh_stride = BS_NPQ + SHMEM_PAD;
96
 
@@ -100,8 +111,8 @@ const uint32_t Bsh_numel = BS_CRS * BS_NPQ;
100
  const uint32_t Ash_len = BS_K * Ash_stride;
101
  const uint32_t Bsh_len = BS_CRS * Bsh_stride;
102
 
103
- shared float Ash[Ash_len]; // K x CRS
104
- shared float Bsh[Bsh_len]; // CRS x NPQ
105
 
106
  // Threadtile sizes
107
  const uint32_t TS_NPQ = BS_K * BS_NPQ / WG_SIZE / TS_K;
@@ -110,10 +121,6 @@ const uint32_t TS_NPQ = BS_K * BS_NPQ / WG_SIZE / TS_K;
110
  const uint32_t NT_K = BS_K / TS_K;
111
  const uint32_t NT_NPQ = BS_NPQ / TS_NPQ;
112
 
113
- float regA[TS_K];
114
- float regB[TS_NPQ];
115
- float regC[TS_K][TS_NPQ];
116
-
117
  /*
118
  Compute
119
  KxCRS @ CRSxNPQ = K x NPQ
@@ -145,12 +152,36 @@ uint fastdiv(uint n, uint mp, uint L) {
145
  return (msbs + n) >> L;
146
  }
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  void main() {
 
 
 
 
 
149
  for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
150
  for (uint32_t T_lx = 0; T_lx < TS_NPQ; T_lx++) {
151
  regC[T_ly][T_lx] = 0.0;
152
  }
153
  }
 
154
  /* Advance block in CRS dim */
155
  for (uint32_t B_idx_CRS = 0; B_idx_CRS < NB_CRS; B_idx_CRS++) {
156
  uint32_t CRS_idx_a;
@@ -199,7 +230,7 @@ void main() {
199
  if (K_idx >= K || CRS_idx_a >= CRS) {
200
  val = 0.0;
201
  }
202
- Ash[B_ly * Ash_stride + B_lx] = val;
203
  }
204
  /* Load input to B_block: (BS_CRS x BS_NPQ) */
205
  UNROLL for (uint32_t r_offset = 0; r_offset < BS_CRS; r_offset += BrpWg) {
@@ -244,11 +275,21 @@ void main() {
244
  if (CRS_idx_b >= CRS || NPQ_idx >= NPQ || H_idx < 0 || H_idx >= p.H || W_idx < 0 || W_idx >= p.W) {
245
  val = 0.0;
246
  }
247
- Bsh[B_ly * Bsh_stride + B_lx] = val;
248
  }
249
  barrier();
 
 
 
 
 
 
 
 
250
  if (T_y * TS_K < K) {
251
  UNROLL for (uint32_t CRS_lidx = 0; CRS_lidx < BS_CRS; CRS_lidx++) {
 
 
252
  for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
253
  regA[T_ly] = Ash[(T_y * TS_K + T_ly) * Ash_stride + CRS_lidx];
254
  }
@@ -262,9 +303,13 @@ void main() {
262
  }
263
  }
264
  }
 
265
  barrier();
266
  }
267
  /* Save C* */
 
 
 
268
  if (T_y * TS_K < K) {
269
  for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
270
  for (uint32_t T_lx = 0; T_lx < TS_NPQ; T_lx++) {
@@ -280,4 +325,5 @@ void main() {
280
  }
281
  }
282
  }
 
283
  }
 
1
  #version 450
2
 
3
  #extension GL_EXT_control_flow_attributes : enable
4
+ #ifdef COOPMAT2
5
+ #extension GL_NV_cooperative_matrix2 : enable
6
+ #extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
7
+ #extension GL_KHR_memory_scope_semantics : enable
8
+ #endif
9
 
10
  #ifdef USE_COLLECTIVES
11
  # extension GL_KHR_shader_subgroup_shuffle : enable
 
96
  // Number of blocktiles per input
97
  uint32_t NB_CRS = splitWork(CRS, BS_CRS);
98
 
99
+ #ifdef COOPMAT2
100
+ #define SHMEM_TYPE float16_t
101
+ #else
102
+ #define SHMEM_TYPE float
103
+ #endif
104
+
105
  const uint32_t Ash_stride = BS_CRS + SHMEM_PAD;
106
  const uint32_t Bsh_stride = BS_NPQ + SHMEM_PAD;
107
 
 
111
  const uint32_t Ash_len = BS_K * Ash_stride;
112
  const uint32_t Bsh_len = BS_CRS * Bsh_stride;
113
 
114
+ shared SHMEM_TYPE Ash[Ash_len]; // K x CRS
115
+ shared SHMEM_TYPE Bsh[Bsh_len]; // CRS x NPQ
116
 
117
  // Threadtile sizes
118
  const uint32_t TS_NPQ = BS_K * BS_NPQ / WG_SIZE / TS_K;
 
121
  const uint32_t NT_K = BS_K / TS_K;
122
  const uint32_t NT_NPQ = BS_NPQ / TS_NPQ;
123
 
 
 
 
 
124
  /*
125
  Compute
126
  KxCRS @ CRSxNPQ = K x NPQ
 
152
  return (msbs + n) >> L;
153
  }
154
 
155
+ #ifdef COOPMAT2
156
+ #define ACC_TYPE float16_t
157
+
158
+ ACC_TYPE perElemOpStore(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem)
159
+ {
160
+ uint32_t K_idx = B_idx_K * BS_K + r;
161
+ uint32_t NPQ_idx = B_idx_NPQ * BS_NPQ + c;
162
+ uint32_t N_idx = fastdiv(NPQ_idx, p.OWOHmp, p.OWOHL); // divide by p.OH * p.OW;
163
+ uint32_t OH_idx = fastdiv(NPQ_idx - N_idx * p.OH * p.OW, p.OWmp, p.OWL); // divide by p.OW;
164
+ uint32_t OW_idx = NPQ_idx - N_idx * p.OH * p.OW - OH_idx * p.OW;
165
+ uint32_t dst_idx = OW_idx + OH_idx * p.nb1 + K_idx * p.nb2 + N_idx * p.nb3;
166
+ if (K_idx < K && NPQ_idx < NPQ) {
167
+ dst_data[dst_idx] = D_TYPE(elem);
168
+ }
169
+ return elem;
170
+ }
171
+ #endif
172
+
173
  void main() {
174
+ #ifdef COOPMAT2
175
+ coopmat<ACC_TYPE, gl_ScopeWorkgroup, BS_K, BS_NPQ, gl_MatrixUseAccumulator> matC;
176
+ matC = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BS_K, BS_NPQ, gl_MatrixUseAccumulator>(0.0);
177
+ #else
178
+ float regC[TS_K][TS_NPQ];
179
  for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
180
  for (uint32_t T_lx = 0; T_lx < TS_NPQ; T_lx++) {
181
  regC[T_ly][T_lx] = 0.0;
182
  }
183
  }
184
+ #endif
185
  /* Advance block in CRS dim */
186
  for (uint32_t B_idx_CRS = 0; B_idx_CRS < NB_CRS; B_idx_CRS++) {
187
  uint32_t CRS_idx_a;
 
230
  if (K_idx >= K || CRS_idx_a >= CRS) {
231
  val = 0.0;
232
  }
233
+ Ash[B_ly * Ash_stride + B_lx] = SHMEM_TYPE(val);
234
  }
235
  /* Load input to B_block: (BS_CRS x BS_NPQ) */
236
  UNROLL for (uint32_t r_offset = 0; r_offset < BS_CRS; r_offset += BrpWg) {
 
275
  if (CRS_idx_b >= CRS || NPQ_idx >= NPQ || H_idx < 0 || H_idx >= p.H || W_idx < 0 || W_idx >= p.W) {
276
  val = 0.0;
277
  }
278
+ Bsh[B_ly * Bsh_stride + B_lx] = SHMEM_TYPE(val);
279
  }
280
  barrier();
281
+ #ifdef COOPMAT2
282
+ coopmat<float16_t, gl_ScopeWorkgroup, BS_K, BS_CRS, gl_MatrixUseA> matA;
283
+ coopmat<float16_t, gl_ScopeWorkgroup, BS_CRS, BS_NPQ, gl_MatrixUseB> matB;
284
+
285
+ coopMatLoad(matA, Ash, 0, Ash_stride, gl_CooperativeMatrixLayoutRowMajor);
286
+ coopMatLoad(matB, Bsh, 0, Bsh_stride, gl_CooperativeMatrixLayoutRowMajor);
287
+ matC = coopMatMulAdd(matA, matB, matC);
288
+ #else
289
  if (T_y * TS_K < K) {
290
  UNROLL for (uint32_t CRS_lidx = 0; CRS_lidx < BS_CRS; CRS_lidx++) {
291
+ float regA[TS_K];
292
+ float regB[TS_NPQ];
293
  for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
294
  regA[T_ly] = Ash[(T_y * TS_K + T_ly) * Ash_stride + CRS_lidx];
295
  }
 
303
  }
304
  }
305
  }
306
+ #endif
307
  barrier();
308
  }
309
  /* Save C* */
310
+ #ifdef COOPMAT2
311
+ coopMatPerElementNV(matC, matC, perElemOpStore);
312
+ #else
313
  if (T_y * TS_K < K) {
314
  for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
315
  for (uint32_t T_lx = 0; T_lx < TS_NPQ; T_lx++) {
 
325
  }
326
  }
327
  }
328
+ #endif
329
  }
ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp CHANGED
@@ -661,6 +661,9 @@ void process_shaders() {
661
  string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}});
662
  string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}});
663
 
 
 
 
664
  string_to_spv("conv2d_dw_whcn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"WHCN", "1"}}));
665
  string_to_spv("conv2d_dw_cwhn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"CWHN", "1"}}));
666
 
 
661
  string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}});
662
  string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}});
663
 
664
+ string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}, {"COOPMAT2", "1"}}, true, false, true);
665
+ string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}, {"COOPMAT2", "1"}}, true, false, true);
666
+
667
  string_to_spv("conv2d_dw_whcn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"WHCN", "1"}}));
668
  string_to_spv("conv2d_dw_cwhn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"CWHN", "1"}}));
669