Todd commited on
Commit
a2bce18
·
unverified ·
1 Parent(s): ca6f4b2

ruby : update bindings (#2154)

Browse files

* update library files

* update whispercpp

* not needed for gem

bindings/ruby/Rakefile ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ require 'rake/clean'
2
+ require 'rubygems/package'
3
+
4
+ desc 'Build gem'
5
+ task :package do
6
+ spec_source = File.read File.join(File.dirname(__FILE__),'whispercpp.gemspec')
7
+ spec = nil
8
+ # see: http://gist.github.com/16215
9
+ Thread.new { spec = eval("#{spec_source}") }.join
10
+ spec.validate
11
+ Gem::Package.build(spec)
12
+ end
bindings/ruby/ext/ggml-backend-impl.h CHANGED
@@ -12,31 +12,63 @@ extern "C" {
12
  // Backend buffer
13
  //
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  typedef void * ggml_backend_buffer_context_t;
16
 
17
  struct ggml_backend_buffer_i {
18
- void (*free_buffer) (ggml_backend_buffer_t buffer);
19
- void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer
20
- size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback
21
- void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback
22
- void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback
 
 
 
 
23
  };
24
 
25
  struct ggml_backend_buffer {
26
- struct ggml_backend_buffer_i iface;
27
-
28
- ggml_backend_t backend;
29
  ggml_backend_buffer_context_t context;
30
-
31
  size_t size;
 
32
  };
33
 
34
- GGML_API ggml_backend_buffer_t ggml_backend_buffer_init(
35
- struct ggml_backend * backend,
36
  struct ggml_backend_buffer_i iface,
37
  ggml_backend_buffer_context_t context,
38
  size_t size);
39
 
 
 
 
 
 
 
 
 
40
  //
41
  // Backend
42
  //
@@ -44,44 +76,66 @@ extern "C" {
44
  typedef void * ggml_backend_context_t;
45
 
46
  struct ggml_backend_i {
47
- const char * (*get_name)(ggml_backend_t backend);
48
 
49
- void (*free)(ggml_backend_t backend);
50
 
51
  // buffer allocation
52
- ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size);
53
 
54
- // get buffer alignment
55
- size_t (*get_alignment)(ggml_backend_t backend);
 
 
56
 
57
- // tensor data access
58
- // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize
59
- void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
60
- void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
61
- void (*synchronize) (ggml_backend_t backend);
62
 
63
- // (optional) copy tensor between different backends, allow for single-copy tranfers
64
- void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
65
- void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
66
 
67
  // compute graph with a plan
68
- ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
69
- void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
70
- void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
71
-
72
- // compute graph without a plan
73
- bool (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
74
 
75
  // check if the backend supports an operation
76
- bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
 
 
 
 
 
 
 
 
 
 
 
 
77
  };
78
 
79
  struct ggml_backend {
80
- struct ggml_backend_i iface;
81
 
 
82
  ggml_backend_context_t context;
83
  };
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  #ifdef __cplusplus
86
  }
87
  #endif
 
12
  // Backend buffer
13
  //
14
 
15
+ // buffer type
16
+ typedef void * ggml_backend_buffer_type_context_t;
17
+
18
+ struct ggml_backend_buffer_type_i {
19
+ const char * (*GGML_CALL get_name) (ggml_backend_buffer_type_t buft);
20
+ ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
21
+ size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
22
+ size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft); // allocation max size
23
+ size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
24
+ bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
25
+ // check if tensor data is in host memory
26
+ // should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
27
+ bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft);
28
+ };
29
+
30
+ struct ggml_backend_buffer_type {
31
+ struct ggml_backend_buffer_type_i iface;
32
+ ggml_backend_buffer_type_context_t context;
33
+ };
34
+
35
+ // buffer
36
  typedef void * ggml_backend_buffer_context_t;
37
 
38
  struct ggml_backend_buffer_i {
39
+ const char * (*GGML_CALL get_name) (ggml_backend_buffer_t buffer);
40
+ void (*GGML_CALL free_buffer)(ggml_backend_buffer_t buffer);
41
+ void * (*GGML_CALL get_base) (ggml_backend_buffer_t buffer);
42
+ void (*GGML_CALL init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
43
+ void (*GGML_CALL set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
44
+ void (*GGML_CALL get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
45
+ bool (*GGML_CALL cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer
46
+ void (*GGML_CALL clear) (ggml_backend_buffer_t buffer, uint8_t value);
47
+ void (*GGML_CALL reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras
48
  };
49
 
50
  struct ggml_backend_buffer {
51
+ struct ggml_backend_buffer_i iface;
52
+ ggml_backend_buffer_type_t buft;
 
53
  ggml_backend_buffer_context_t context;
 
54
  size_t size;
55
+ enum ggml_backend_buffer_usage usage;
56
  };
57
 
58
+ GGML_CALL ggml_backend_buffer_t ggml_backend_buffer_init(
59
+ ggml_backend_buffer_type_t buft,
60
  struct ggml_backend_buffer_i iface,
61
  ggml_backend_buffer_context_t context,
62
  size_t size);
63
 
64
+ // do not use directly, use ggml_backend_tensor_copy instead
65
+ bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst);
66
+
67
+ // buffer that contains a collection of buffers
68
+ GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers);
69
+ GGML_CALL bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer);
70
+ GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
71
+
72
  //
73
  // Backend
74
  //
 
76
  typedef void * ggml_backend_context_t;
77
 
78
  struct ggml_backend_i {
79
+ const char * (*GGML_CALL get_name)(ggml_backend_t backend);
80
 
81
+ void (*GGML_CALL free)(ggml_backend_t backend);
82
 
83
  // buffer allocation
84
+ ggml_backend_buffer_type_t (*GGML_CALL get_default_buffer_type)(ggml_backend_t backend);
85
 
86
+ // (optional) asynchronous tensor data access
87
+ void (*GGML_CALL set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
88
+ void (*GGML_CALL get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
89
+ bool (*GGML_CALL cpy_tensor_async)(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst);
90
 
91
+ // (optional) complete all pending operations
92
+ void (*GGML_CALL synchronize)(ggml_backend_t backend);
 
 
 
93
 
94
+ // compute graph with a plan (not used currently)
95
+ ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
96
+ void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
97
 
98
  // compute graph with a plan
99
+ enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
100
+ // compute graph without a plan (async)
101
+ enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
 
 
 
102
 
103
  // check if the backend supports an operation
104
+ bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
105
+
106
+ // check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
107
+ // these should be expensive operations with large batch sizes that may benefit from running on this backend
108
+ // even if the weight has to be copied from the CPU temporarily
109
+ bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op);
110
+
111
+ // (optional) event synchronization
112
+ ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend);
113
+ void (*GGML_CALL event_free) (ggml_backend_event_t event);
114
+ void (*GGML_CALL event_record) (ggml_backend_event_t event);
115
+ void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event);
116
+ void (*GGML_CALL event_synchronize) (ggml_backend_event_t event);
117
  };
118
 
119
  struct ggml_backend {
120
+ ggml_guid_t guid;
121
 
122
+ struct ggml_backend_i iface;
123
  ggml_backend_context_t context;
124
  };
125
 
126
+ struct ggml_backend_event {
127
+ ggml_backend_t backend;
128
+ void * context;
129
+ };
130
+
131
+ //
132
+ // Backend registry
133
+ //
134
+
135
+ typedef ggml_backend_t (*GGML_CALL ggml_backend_init_fn)(const char * params, void * user_data);
136
+
137
+ GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data);
138
+
139
  #ifdef __cplusplus
140
  }
141
  #endif
bindings/ruby/ext/ggml-backend.c CHANGED
The diff for this file is too large to render. See raw diff
 
bindings/ruby/ext/ggml-backend.h CHANGED
@@ -7,69 +7,123 @@
7
  extern "C" {
8
  #endif
9
 
 
 
 
 
 
 
10
  //
11
  // Backend buffer
12
  //
13
 
14
- struct ggml_backend_buffer;
15
- typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
16
-
17
- // backend buffer functions
18
- GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
19
- GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
20
- GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
21
- GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
22
- GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
23
- GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
24
- GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  //
27
  // Backend
28
  //
29
 
30
- struct ggml_backend;
31
- typedef struct ggml_backend * ggml_backend_t;
32
- typedef void * ggml_backend_graph_plan_t;
33
-
34
- GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor);
35
-
36
  GGML_API const char * ggml_backend_name(ggml_backend_t backend);
37
  GGML_API void ggml_backend_free(ggml_backend_t backend);
38
 
39
- GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
 
 
 
40
 
41
- GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
 
42
 
43
- GGML_API void ggml_backend_tensor_set_async( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
44
- GGML_API void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
45
-
46
- GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
47
- GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
48
 
49
  GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
50
 
51
- GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph);
 
52
 
53
- GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
54
- GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
55
- GGML_API bool ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
56
- GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
 
57
 
58
  // tensor copy between different backends
59
  GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  //
62
  // CPU backend
63
  //
64
 
65
  GGML_API ggml_backend_t ggml_backend_cpu_init(void);
66
 
67
- GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend);
68
- GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads);
 
69
 
70
  // Create a backend buffer from an existing pointer
71
- GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size);
 
 
 
 
 
 
 
 
 
 
 
 
72
 
 
 
 
 
 
 
 
73
 
74
  //
75
  // Backend scheduler
@@ -83,53 +137,96 @@ extern "C" {
83
  /*
84
  Example usage:
85
 
86
- sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
87
- // sched is initialized with measure allocators and cannot be used until allocated with a measure graph
 
88
 
89
- // initialize buffers from a measure graph
90
- measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
91
 
92
- // in build_graph:
93
- build_graph(...) {
94
- // allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
95
- alloc_cpu = ggml_backend_sched_get_allocr(sched, backend_cpu);
96
- ggml_allocr_alloc(alloc_cpu, tensor);
97
 
98
- // manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
99
- struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
100
- ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
101
- }
102
 
103
- // allocate backend buffers from measure graph
104
- ggml_backend_sched_init_measure(sched, measure_graph);
105
-
106
- // the scheduler is now ready to compute graphs
107
 
108
  // compute
109
  graph = build_graph(sched);
110
  ggml_backend_sched_graph_compute(sched, graph);
 
 
 
 
 
 
 
111
  */
112
 
113
  struct ggml_backend_sched;
114
  typedef struct ggml_backend_sched * ggml_backend_sched_t;
115
 
116
- // Initialize a backend scheduler
117
- GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends);
 
 
 
 
 
118
 
119
- GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
 
 
120
 
121
  // Initialize backend buffers from a measure graph
122
- GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
125
- GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
126
 
127
- GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
 
 
128
 
129
- // Allocate a graph on the backend scheduler
130
- GGML_API void ggml_backend_sched_graph_compute(
131
- ggml_backend_sched_t sched,
132
- struct ggml_cgraph * graph);
133
 
134
  #ifdef __cplusplus
135
  }
 
7
  extern "C" {
8
  #endif
9
 
10
+ typedef struct ggml_backend_buffer_type * ggml_backend_buffer_type_t;
11
+ typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
12
+ typedef struct ggml_backend_event * ggml_backend_event_t;
13
+ typedef struct ggml_backend * ggml_backend_t;
14
+ typedef void * ggml_backend_graph_plan_t;
15
+
16
  //
17
  // Backend buffer
18
  //
19
 
20
+ // buffer type
21
+ GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft);
22
+ GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
23
+ GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
24
+ GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft);
25
+ GGML_API GGML_CALL size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
26
+ GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
27
+ GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
28
+
29
+ // buffer
30
+ enum ggml_backend_buffer_usage {
31
+ GGML_BACKEND_BUFFER_USAGE_ANY = 0,
32
+ GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1,
33
+ };
34
+
35
+ GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer);
36
+ GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
37
+ GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
38
+ GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
39
+ GGML_API GGML_CALL void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
40
+ GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
41
+ GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer);
42
+ GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
43
+ GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
44
+ GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
45
+ GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);
46
+ GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer);
47
+ GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer);
48
 
49
  //
50
  // Backend
51
  //
52
 
53
+ GGML_API ggml_guid_t ggml_backend_guid(ggml_backend_t backend);
 
 
 
 
 
54
  GGML_API const char * ggml_backend_name(ggml_backend_t backend);
55
  GGML_API void ggml_backend_free(ggml_backend_t backend);
56
 
57
+ GGML_API ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend);
58
+ GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
59
+ GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
60
+ GGML_API size_t ggml_backend_get_max_size(ggml_backend_t backend);
61
 
62
+ GGML_API void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
63
+ GGML_API void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
64
 
65
+ GGML_API GGML_CALL void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
66
+ GGML_API GGML_CALL void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
 
 
 
67
 
68
  GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
69
 
70
+ GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph);
71
+ GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
72
 
73
+ GGML_API enum ggml_status ggml_backend_graph_plan_compute (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
74
+ GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
75
+ GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
76
+ GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
77
+ GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op);
78
 
79
  // tensor copy between different backends
80
  GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
81
 
82
+ // asynchronous copy
83
+ // the copy is performed after all the currently queued operations in backend_src
84
+ // backend_dst will wait for the copy to complete before performing other operations
85
+ // automatic fallback to sync copy if async is not supported
86
+ GGML_API void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst);
87
+
88
+ // events
89
+ GGML_API ggml_backend_event_t ggml_backend_event_new (ggml_backend_t backend);
90
+ GGML_API void ggml_backend_event_free (ggml_backend_event_t event);
91
+ GGML_API void ggml_backend_event_record (ggml_backend_event_t event);
92
+ GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
93
+ GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event
94
+
95
  //
96
  // CPU backend
97
  //
98
 
99
  GGML_API ggml_backend_t ggml_backend_cpu_init(void);
100
 
101
+ GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend);
102
+ GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
103
+ GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
104
 
105
  // Create a backend buffer from an existing pointer
106
+ GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
107
+
108
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void);
109
+
110
+ #ifdef GGML_USE_CPU_HBM
111
+ GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
112
+ #endif
113
+
114
+ //
115
+ // Backend registry
116
+ //
117
+
118
+ // The backend registry is a registry of all the available backends, and allows initializing backends in a generic way
119
 
120
+ GGML_API size_t ggml_backend_reg_get_count(void);
121
+ GGML_API size_t ggml_backend_reg_find_by_name(const char * name);
122
+ GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
123
+ GGML_API const char * ggml_backend_reg_get_name(size_t i);
124
+ GGML_API ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
125
+ GGML_API ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i);
126
+ GGML_API ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size);
127
 
128
  //
129
  // Backend scheduler
 
137
  /*
138
  Example usage:
139
 
140
+ // operations that use tensors allocated in a buffer with USAGE_WEIGHTS will be assigned
141
+ // preferrably to run on the same backend as the buffer
142
+ ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
143
 
144
+ sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
 
145
 
146
+ // initialize buffers from a max size graph (optional)
147
+ reserve_graph = build_graph(sched, max_batch_size);
 
 
 
148
 
149
+ // manually assign nodes to a backend (optional, should not be needed in most cases)
150
+ struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
151
+ ggml_backend_sched_set_tensor_backend(sched, node, backend_gpu);
 
152
 
153
+ ggml_backend_sched_reserve(sched, reserve_graph);
 
 
 
154
 
155
  // compute
156
  graph = build_graph(sched);
157
  ggml_backend_sched_graph_compute(sched, graph);
158
+
159
+ // if there are graph inputs:
160
+ ggml_backend_sched_reset(sched);
161
+ ggml_backend_sched_alloc_graph(sched, graph);
162
+ ggml_backend_tensor_set(input_tensor, ...);
163
+ ggml_backend_sched_graph_compute(sched, graph);
164
+ }
165
  */
166
 
167
  struct ggml_backend_sched;
168
  typedef struct ggml_backend_sched * ggml_backend_sched_t;
169
 
170
+ // when ask == true, the scheduler wants to know if the user wants to observe this node
171
+ // this allows the scheduler to batch nodes together in order to evaluate them in a single call
172
+ //
173
+ // when ask == false, the scheduler is passing the node tensor to the user for observation
174
+ // if the user returns false, the scheduler will cancel the graph compute
175
+ //
176
+ typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
177
 
178
+ // Initialize a backend scheduler
179
+ GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
180
+ GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
181
 
182
  // Initialize backend buffers from a measure graph
183
+ GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
184
+
185
+ // Get the number of splits of the last graph
186
+ GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
187
+ GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
188
+
189
+ GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
190
+
191
+ GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
192
+ GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
193
+
194
+ // Allocate and compute graph on the backend scheduler
195
+ GGML_API bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
196
+ GGML_API enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
197
+ GGML_API enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph);
198
+ GGML_API void ggml_backend_sched_synchronize(ggml_backend_sched_t sched);
199
+
200
+ // Reset all assignments and allocators - must be called before changing the node backends
201
+ GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
202
+
203
+ // Set a callback to be called for each resulting node during graph compute
204
+ GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data);
205
+
206
+ //
207
+ // Utils
208
+ //
209
+
210
+ struct ggml_backend_graph_copy {
211
+ ggml_backend_buffer_t buffer;
212
+ struct ggml_context * ctx_allocated;
213
+ struct ggml_context * ctx_unallocated;
214
+ struct ggml_cgraph * graph;
215
+ };
216
+
217
+ // Copy a graph to a different backend
218
+ GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph);
219
+ GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy);
220
+
221
+ typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
222
 
223
+ // Compare the output of two backends
224
+ GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
225
 
226
+ // Tensor initialization
227
+ GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
228
+ GGML_API void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
229
 
 
 
 
 
230
 
231
  #ifdef __cplusplus
232
  }
bindings/ruby/ext/ggml-common.h ADDED
The diff for this file is too large to render. See raw diff
 
bindings/ruby/ext/ggml-cuda.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef GGML_USE_HIPBLAS
7
+ #define GGML_CUDA_NAME "ROCm"
8
+ #define GGML_CUBLAS_NAME "hipBLAS"
9
+ #else
10
+ #define GGML_CUDA_NAME "CUDA"
11
+ #define GGML_CUBLAS_NAME "cuBLAS"
12
+ #endif
13
+
14
+ #ifdef __cplusplus
15
+ extern "C" {
16
+ #endif
17
+
18
+ #define GGML_CUDA_MAX_DEVICES 16
19
+
20
+ // backend API
21
+ GGML_API GGML_CALL ggml_backend_t ggml_backend_cuda_init(int device);
22
+
23
+ GGML_API GGML_CALL bool ggml_backend_is_cuda(ggml_backend_t backend);
24
+
25
+ // device buffer
26
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
27
+
28
+ // split tensor buffer that splits matrices by rows across multiple devices
29
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split);
30
+
31
+ // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
32
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
33
+
34
+ GGML_API GGML_CALL int ggml_backend_cuda_get_device_count(void);
35
+ GGML_API GGML_CALL void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
36
+ GGML_API GGML_CALL void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
37
+
38
+ GGML_API GGML_CALL bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
39
+ GGML_API GGML_CALL void ggml_backend_cuda_unregister_host_buffer(void * buffer);
40
+
41
+ #ifdef __cplusplus
42
+ }
43
+ #endif
bindings/ruby/ext/ggml-impl.h CHANGED
@@ -5,6 +5,7 @@
5
  // GGML internal header
6
 
7
  #include <assert.h>
 
8
  #include <stddef.h>
9
  #include <stdbool.h>
10
  #include <string.h> // memcpy
@@ -18,6 +19,7 @@ extern "C" {
18
  // fall back to the _Static_assert C11 keyword.
19
  // if C99 - static_assert is noop
20
  // ref: https://stackoverflow.com/a/53923785/4039976
 
21
  #ifndef static_assert
22
  #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
23
  #define static_assert(cond, msg) _Static_assert(cond, msg)
@@ -25,6 +27,7 @@ extern "C" {
25
  #define static_assert(cond, msg) struct global_scope_noop_trick
26
  #endif
27
  #endif
 
28
 
29
  // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
30
  #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
@@ -34,16 +37,17 @@ extern "C" {
34
  #ifndef __F16C__
35
  #define __F16C__
36
  #endif
 
 
 
 
37
  #ifndef __SSE3__
38
  #define __SSE3__
39
  #endif
 
 
 
40
  #endif
41
-
42
- #undef MIN
43
- #undef MAX
44
-
45
- #define MIN(a, b) ((a) < (b) ? (a) : (b))
46
- #define MAX(a, b) ((a) > (b) ? (a) : (b))
47
 
48
  // 16-bit float
49
  // on Arm, we use __fp16
@@ -56,14 +60,30 @@ extern "C" {
56
  //
57
  #include <arm_neon.h>
58
 
59
- #define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
60
- #define GGML_COMPUTE_FP32_TO_FP16(x) (x)
61
 
62
- #define GGML_FP16_TO_FP32(x) ((float) (x))
63
- #define GGML_FP32_TO_FP16(x) (x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  #else
66
 
 
 
67
  #ifdef __wasm_simd128__
68
  #include <wasm_simd128.h>
69
  #else
@@ -217,8 +237,7 @@ extern float ggml_table_f32_f16[1 << 16];
217
  // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
218
  // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
219
  // This is also true for POWER9.
220
- #if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
221
-
222
  inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
223
  uint16_t s;
224
  memcpy(&s, &f, sizeof(uint16_t));
@@ -226,19 +245,23 @@ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
226
  }
227
 
228
  #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
229
- #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
230
 
 
 
231
  #endif
232
 
233
  #define GGML_HASHTABLE_FULL ((size_t)-1)
234
  #define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
235
 
 
 
236
  bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
237
 
238
  // returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
239
  size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
240
 
241
- // returns GGML_HAHSHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
242
  size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
243
 
244
  // return index, asserts if table is full
 
5
  // GGML internal header
6
 
7
  #include <assert.h>
8
+ #include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
9
  #include <stddef.h>
10
  #include <stdbool.h>
11
  #include <string.h> // memcpy
 
19
  // fall back to the _Static_assert C11 keyword.
20
  // if C99 - static_assert is noop
21
  // ref: https://stackoverflow.com/a/53923785/4039976
22
+ #ifndef __cplusplus
23
  #ifndef static_assert
24
  #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
25
  #define static_assert(cond, msg) _Static_assert(cond, msg)
 
27
  #define static_assert(cond, msg) struct global_scope_noop_trick
28
  #endif
29
  #endif
30
+ #endif
31
 
32
  // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
33
  #if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
 
37
  #ifndef __F16C__
38
  #define __F16C__
39
  #endif
40
+ #endif
41
+
42
+ // __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available
43
+ #if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__))
44
  #ifndef __SSE3__
45
  #define __SSE3__
46
  #endif
47
+ #ifndef __SSSE3__
48
+ #define __SSSE3__
49
+ #endif
50
  #endif
 
 
 
 
 
 
51
 
52
  // 16-bit float
53
  // on Arm, we use __fp16
 
60
  //
61
  #include <arm_neon.h>
62
 
63
+ typedef __fp16 ggml_fp16_internal_t;
 
64
 
65
+ #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
66
+ #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
67
+
68
+ #define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
69
+
70
+ static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
71
+ ggml_fp16_internal_t tmp;
72
+ memcpy(&tmp, &h, sizeof(ggml_fp16_t));
73
+ return (float)tmp;
74
+ }
75
+
76
+ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
77
+ ggml_fp16_t res;
78
+ ggml_fp16_internal_t tmp = f;
79
+ memcpy(&res, &tmp, sizeof(ggml_fp16_t));
80
+ return res;
81
+ }
82
 
83
  #else
84
 
85
+ typedef uint16_t ggml_fp16_internal_t;
86
+
87
  #ifdef __wasm_simd128__
88
  #include <wasm_simd128.h>
89
  #else
 
237
  // On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
238
  // so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
239
  // This is also true for POWER9.
240
+ #if !defined(GGML_FP16_TO_FP32)
 
241
  inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
242
  uint16_t s;
243
  memcpy(&s, &f, sizeof(uint16_t));
 
245
  }
246
 
247
  #define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
248
+ #endif
249
 
250
+ #if !defined(GGML_FP32_TO_FP16)
251
+ #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
252
  #endif
253
 
254
  #define GGML_HASHTABLE_FULL ((size_t)-1)
255
  #define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
256
 
257
+ struct ggml_hash_set ggml_hash_set_new(size_t size);
258
+
259
  bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
260
 
261
  // returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
262
  size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
263
 
264
+ // returns GGML_HASHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
265
  size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
266
 
267
  // return index, asserts if table is full
bindings/ruby/ext/ggml-kompute.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #include <stdbool.h>
7
+ #include <stddef.h>
8
+ #include <stdint.h>
9
+
10
+ #ifdef __cplusplus
11
+ extern "C" {
12
+ #endif
13
+
14
+ struct ggml_vk_device {
15
+ int index;
16
+ int type; // same as VkPhysicalDeviceType
17
+ size_t heapSize;
18
+ const char * name;
19
+ const char * vendor;
20
+ int subgroupSize;
21
+ uint64_t bufferAlignment;
22
+ uint64_t maxAlloc;
23
+ };
24
+
25
+ struct ggml_vk_device * ggml_vk_available_devices(size_t memoryRequired, size_t * count);
26
+ bool ggml_vk_get_device(struct ggml_vk_device * device, size_t memoryRequired, const char * name);
27
+ bool ggml_vk_has_vulkan(void);
28
+ bool ggml_vk_has_device(void);
29
+ struct ggml_vk_device ggml_vk_current_device(void);
30
+
31
+ //
32
+ // backend API
33
+ //
34
+
35
+ // forward declaration
36
+ typedef struct ggml_backend * ggml_backend_t;
37
+
38
+ GGML_API ggml_backend_t ggml_backend_kompute_init(int device);
39
+
40
+ GGML_API bool ggml_backend_is_kompute(ggml_backend_t backend);
41
+
42
+ GGML_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
43
+
44
+ #ifdef __cplusplus
45
+ }
46
+ #endif
bindings/ruby/ext/ggml-metal.h ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // An interface allowing to compute ggml_cgraph with Metal
2
+ //
3
+ // This is a fully functional interface that extends ggml with GPU support for Apple devices.
4
+ // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
5
+ //
6
+ // How it works?
7
+ //
8
+ // As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
9
+ // interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
10
+ // use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
11
+ //
12
+ // You only need to make sure that all memory buffers that you used during the graph creation
13
+ // are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
14
+ // used during the graph evaluation to determine the arguments of the compute kernels.
15
+ //
16
+ // Synchronization between device and host memory (for example for input and output tensors)
17
+ // is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
18
+ //
19
+
20
+ #pragma once
21
+
22
+ #include "ggml.h"
23
+ #include "ggml-backend.h"
24
+
25
+ #include <stddef.h>
26
+ #include <stdbool.h>
27
+
28
+ // max memory buffers that can be mapped to the device
29
+ #define GGML_METAL_MAX_BUFFERS 64
30
+
31
+ struct ggml_tensor;
32
+ struct ggml_cgraph;
33
+
34
+ #ifdef __cplusplus
35
+ extern "C" {
36
+ #endif
37
+
38
+ //
39
+ // backend API
40
+ // user-code should use only these functions
41
+ //
42
+
43
+ GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
44
+
45
+ GGML_API ggml_backend_t ggml_backend_metal_init(void);
46
+
47
+ GGML_API bool ggml_backend_is_metal(ggml_backend_t backend);
48
+
49
+ GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size);
50
+
51
+ GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb);
52
+
53
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);
54
+
55
+ // helper to check if the device supports a specific family
56
+ // ideally, the user code should be doing these checks
57
+ // ref: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
58
+ GGML_API bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family);
59
+
60
+ // capture all command buffers committed the next time `ggml_backend_graph_compute` is called
61
+ GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
62
+
63
+ #ifdef __cplusplus
64
+ }
65
+ #endif
66
+
bindings/ruby/ext/ggml-opencl.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ GGML_API void ggml_cl_init(void);
11
+
12
+ GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
13
+ GGML_API void ggml_cl_add(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
14
+ GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);
15
+ GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
16
+ GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
17
+
18
+ // GGML_API void * ggml_cl_host_malloc(size_t size);
19
+ // GGML_API void ggml_cl_host_free(void * ptr);
20
+
21
+ GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
22
+
23
+ GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
24
+
25
+ // backend API
26
+
27
+ // GGML_API ggml_backend_t ggml_backend_opencl_init(void);
28
+
29
+ // GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend);
30
+
31
+ GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
32
+ // GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
33
+
34
+ #ifdef __cplusplus
35
+ }
36
+ #endif
bindings/ruby/ext/ggml-quants.c CHANGED
The diff for this file is too large to render. See raw diff
 
bindings/ruby/ext/ggml-quants.h CHANGED
@@ -1,224 +1,133 @@
1
  #pragma once
2
 
3
- #include "ggml-impl.h"
 
4
 
5
- // GGML internal header
6
-
7
- #include <stdint.h>
8
- #include <stddef.h>
9
-
10
- #define QK4_0 32
11
- typedef struct {
12
- ggml_fp16_t d; // delta
13
- uint8_t qs[QK4_0 / 2]; // nibbles / quants
14
- } block_q4_0;
15
- static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
16
-
17
- #define QK4_1 32
18
- typedef struct {
19
- ggml_fp16_t d; // delta
20
- ggml_fp16_t m; // min
21
- uint8_t qs[QK4_1 / 2]; // nibbles / quants
22
- } block_q4_1;
23
- static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
24
-
25
- #define QK5_0 32
26
- typedef struct {
27
- ggml_fp16_t d; // delta
28
- uint8_t qh[4]; // 5-th bit of quants
29
- uint8_t qs[QK5_0 / 2]; // nibbles / quants
30
- } block_q5_0;
31
- static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
32
-
33
- #define QK5_1 32
34
- typedef struct {
35
- ggml_fp16_t d; // delta
36
- ggml_fp16_t m; // min
37
- uint8_t qh[4]; // 5-th bit of quants
38
- uint8_t qs[QK5_1 / 2]; // nibbles / quants
39
- } block_q5_1;
40
- static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
41
-
42
- #define QK8_0 32
43
- typedef struct {
44
- ggml_fp16_t d; // delta
45
- int8_t qs[QK8_0]; // quants
46
- } block_q8_0;
47
- static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
48
-
49
- #define QK8_1 32
50
- typedef struct {
51
- float d; // delta
52
- float s; // d * sum(qs[i])
53
- int8_t qs[QK8_1]; // quants
54
- } block_q8_1;
55
- static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
56
-
57
- //
58
- // Super-block quantization structures
59
- //
60
-
61
- // Super-block size
62
- #ifdef GGML_QKK_64
63
- #define QK_K 64
64
- #define K_SCALE_SIZE 4
65
- #else
66
- #define QK_K 256
67
- #define K_SCALE_SIZE 12
68
- #endif
69
 
70
- // 2-bit quantization
71
- // weight is represented as x = a * q + b
72
- // 16 blocks of 16 elements each
73
- // Effectively 2.5625 bits per weight
74
- typedef struct {
75
- uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
76
- uint8_t qs[QK_K/4]; // quants
77
- ggml_fp16_t d; // super-block scale for quantized scales
78
- ggml_fp16_t dmin; // super-block scale for quantized mins
79
- } block_q2_K;
80
- static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
81
-
82
- // 3-bit quantization
83
- // weight is represented as x = a * q
84
- // 16 blocks of 16 elements each
85
- // Effectively 3.4375 bits per weight
86
- #ifdef GGML_QKK_64
87
- typedef struct {
88
- uint8_t hmask[QK_K/8]; // quants - high bit
89
- uint8_t qs[QK_K/4]; // quants - low 2 bits
90
- uint8_t scales[2];
91
- ggml_fp16_t d; // super-block scale
92
- } block_q3_K;
93
- static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
94
- #else
95
- typedef struct {
96
- uint8_t hmask[QK_K/8]; // quants - high bit
97
- uint8_t qs[QK_K/4]; // quants - low 2 bits
98
- uint8_t scales[12]; // scales, quantized with 6 bits
99
- ggml_fp16_t d; // super-block scale
100
- } block_q3_K;
101
- static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
102
- #endif
103
-
104
- // 4-bit quantization
105
- // 8 blocks of 32 elements each
106
- // weight is represented as x = a * q + b
107
- // Effectively 4.5 bits per weight
108
- #ifdef GGML_QKK_64
109
- typedef struct {
110
- ggml_fp16_t d[2]; // super-block scales/mins
111
- uint8_t scales[2]; // 4-bit block scales/mins
112
- uint8_t qs[QK_K/2]; // 4--bit quants
113
- } block_q4_K;
114
- static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
115
- #else
116
- typedef struct {
117
- ggml_fp16_t d; // super-block scale for quantized scales
118
- ggml_fp16_t dmin; // super-block scale for quantized mins
119
- uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
120
- uint8_t qs[QK_K/2]; // 4--bit quants
121
- } block_q4_K;
122
- static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
123
- #endif
124
 
125
- // 5-bit quantization
126
- // 8 blocks of 32 elements each
127
- // weight is represented as x = a * q + b
128
- // Effectively 5.5 bits per weight
129
- #ifdef GGML_QKK_64
130
- typedef struct {
131
- ggml_fp16_t d; // super-block scale
132
- int8_t scales[QK_K/16]; // 8-bit block scales
133
- uint8_t qh[QK_K/8]; // quants, high bit
134
- uint8_t qs[QK_K/2]; // quants, low 4 bits
135
- } block_q5_K;
136
- static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
137
- #else
138
- typedef struct {
139
- ggml_fp16_t d; // super-block scale for quantized scales
140
- ggml_fp16_t dmin; // super-block scale for quantized mins
141
- uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
142
- uint8_t qh[QK_K/8]; // quants, high bit
143
- uint8_t qs[QK_K/2]; // quants, low 4 bits
144
- } block_q5_K;
145
- static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
146
  #endif
147
 
148
- // 6-bit quantization
149
- // weight is represented as x = a * q
150
- // 16 blocks of 16 elements each
151
- // Effectively 6.5625 bits per weight
152
- typedef struct {
153
- uint8_t ql[QK_K/2]; // quants, lower 4 bits
154
- uint8_t qh[QK_K/4]; // quants, upper 2 bits
155
- int8_t scales[QK_K/16]; // scales, quantized with 8 bits
156
- ggml_fp16_t d; // super-block scale
157
- } block_q6_K;
158
- static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
159
-
160
- // This is only used for intermediate quantization and dot products
161
- typedef struct {
162
- float d; // delta
163
- int8_t qs[QK_K]; // quants
164
- int16_t bsums[QK_K/16]; // sum of quants in groups of 16
165
- } block_q8_K;
166
- static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
167
-
168
-
169
  // Quantization
170
- void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k);
171
- void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k);
172
- void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k);
173
- void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k);
174
- void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k);
175
- void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k);
176
-
177
- void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
178
- void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
179
- void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
180
- void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
181
- void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
182
- void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
183
-
184
- void quantize_row_q4_0(const float * restrict x, void * restrict y, int k);
185
- void quantize_row_q4_1(const float * restrict x, void * restrict y, int k);
186
- void quantize_row_q5_0(const float * restrict x, void * restrict y, int k);
187
- void quantize_row_q5_1(const float * restrict x, void * restrict y, int k);
188
- void quantize_row_q8_0(const float * restrict x, void * restrict y, int k);
189
- void quantize_row_q8_1(const float * restrict x, void * restrict y, int k);
190
-
191
- void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
192
- void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
193
- void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
194
- void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
195
- void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
196
- void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
  // Dequantization
199
- void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k);
200
- void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k);
201
- void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k);
202
- void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k);
203
- void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k);
204
- //void dequantize_row_q8_1(const block_q8_1 * restrict x, float * restrict y, int k);
205
-
206
- void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
207
- void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
208
- void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
209
- void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
210
- void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
211
- void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
 
 
 
 
 
 
 
 
 
 
212
 
213
  // Dot product
214
- void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
215
- void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
216
- void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
217
- void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
218
- void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
219
-
220
- void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
221
- void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
222
- void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
223
- void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
224
- void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #pragma once
2
 
3
+ #define GGML_COMMON_DECL_C
4
+ #include "ggml-common.h"
5
 
6
+ #include "ggml.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ // GGML internal header
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ #ifdef __cplusplus
11
+ extern "C" {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  #endif
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  // Quantization
15
+ void quantize_row_q4_0_reference(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k);
16
+ void quantize_row_q4_1_reference(const float * GGML_RESTRICT x, block_q4_1 * GGML_RESTRICT y, int64_t k);
17
+ void quantize_row_q5_0_reference(const float * GGML_RESTRICT x, block_q5_0 * GGML_RESTRICT y, int64_t k);
18
+ void quantize_row_q5_1_reference(const float * GGML_RESTRICT x, block_q5_1 * GGML_RESTRICT y, int64_t k);
19
+ void quantize_row_q8_0_reference(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k);
20
+ void quantize_row_q8_1_reference(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k);
21
+
22
+ void quantize_row_q2_K_reference(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k);
23
+ void quantize_row_q3_K_reference(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k);
24
+ void quantize_row_q4_K_reference(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k);
25
+ void quantize_row_q5_K_reference(const float * GGML_RESTRICT x, block_q5_K * GGML_RESTRICT y, int64_t k);
26
+ void quantize_row_q6_K_reference(const float * GGML_RESTRICT x, block_q6_K * GGML_RESTRICT y, int64_t k);
27
+ void quantize_row_q8_K_reference(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k);
28
+
29
+ void quantize_row_iq3_xxs_reference(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k);
30
+ void quantize_row_iq4_nl_reference (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k);
31
+ void quantize_row_iq4_xs_reference (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k);
32
+ void quantize_row_iq3_s_reference (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k);
33
+ void quantize_row_iq2_s_reference (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k);
34
+
35
+ void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
36
+ void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
37
+ void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
38
+ void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
39
+ void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
40
+ void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
41
+
42
+ void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
43
+ void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
44
+ void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
45
+ void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
46
+ void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
47
+ void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
48
+
49
+ void quantize_row_iq3_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
50
+ void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
51
+ void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
52
+ void quantize_row_iq3_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
53
+ void quantize_row_iq2_s (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
54
 
55
  // Dequantization
56
+ void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
57
+ void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
58
+ void dequantize_row_q5_0(const block_q5_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
59
+ void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
60
+ void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
61
+ //void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
62
+
63
+ void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
64
+ void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
65
+ void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
66
+ void dequantize_row_q5_K(const block_q5_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
67
+ void dequantize_row_q6_K(const block_q6_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
68
+ void dequantize_row_q8_K(const block_q8_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
69
+
70
+ void dequantize_row_iq2_xxs(const block_iq2_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
71
+ void dequantize_row_iq2_xs (const block_iq2_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
72
+ void dequantize_row_iq2_s (const block_iq2_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
73
+ void dequantize_row_iq3_xxs(const block_iq3_xxs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
74
+ void dequantize_row_iq1_s (const block_iq1_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
75
+ void dequantize_row_iq1_m (const block_iq1_m * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
76
+ void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
77
+ void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
78
+ void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
79
 
80
  // Dot product
81
+ void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
82
+ void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
83
+ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
84
+ void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
85
+ void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
86
+
87
+ void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
88
+ void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
89
+ void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
90
+ void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
91
+ void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
92
+
93
+ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
94
+ void ggml_vec_dot_iq2_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
95
+ void ggml_vec_dot_iq2_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
96
+ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
97
+ void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
98
+ void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
99
+ void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
100
+ void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
101
+ void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
102
+
103
+ // Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization")
104
+ size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
105
+ size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
106
+ size_t quantize_iq2_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
107
+ size_t quantize_iq3_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
108
+ size_t quantize_iq1_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
109
+ size_t quantize_iq1_m (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
110
+ size_t quantize_iq4_nl (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
111
+ size_t quantize_iq4_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
112
+ size_t quantize_iq3_s (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
113
+
114
+ size_t quantize_q2_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
115
+ size_t quantize_q3_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
116
+ size_t quantize_q4_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
117
+ size_t quantize_q5_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
118
+ size_t quantize_q6_K(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
119
+ size_t quantize_q4_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
120
+ size_t quantize_q4_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
121
+ size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
122
+ size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
123
+ size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
124
+
125
+ void iq2xs_init_impl(enum ggml_type type);
126
+ void iq2xs_free_impl(enum ggml_type type);
127
+ void iq3xs_init_impl(int grid_size);
128
+ void iq3xs_free_impl(int grid_size);
129
+
130
+ #ifdef __cplusplus
131
+ }
132
+ #endif
133
+
bindings/ruby/ext/ggml-sycl.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // MIT license
3
+ // Copyright (C) 2024 Intel Corporation
4
+ // SPDX-License-Identifier: MIT
5
+ //
6
+
7
+ #pragma once
8
+
9
+ #include "ggml.h"
10
+ #include "ggml-backend.h"
11
+
12
+ #ifdef __cplusplus
13
+ extern "C" {
14
+ #endif
15
+
16
+ #define GGML_SYCL_MAX_DEVICES 48
17
+ #define GGML_SYCL_NAME "SYCL"
18
+
19
+ // backend API
20
+ GGML_API ggml_backend_t ggml_backend_sycl_init(int device);
21
+
22
+ // devide buffer
23
+ GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device);
24
+
25
+ // split tensor buffer that splits matrices by rows across multiple devices
26
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split);
27
+
28
+ // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
29
+ GGML_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void);
30
+
31
+ GGML_API void ggml_backend_sycl_print_sycl_devices(void);
32
+ GGML_API GGML_CALL void ggml_sycl_get_gpu_list(int *id_list, int max_len);
33
+ GGML_API GGML_CALL void ggml_sycl_get_device_description(int device, char *description, size_t description_size);
34
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_count();
35
+ GGML_API GGML_CALL void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total);
36
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_index(int device_id);
37
+
38
+ // TODO: these are temporary
39
+ // ref: https://github.com/ggerganov/llama.cpp/pull/6022#issuecomment-1992615670
40
+ GGML_API GGML_CALL int ggml_backend_sycl_get_device_id(int device_index);
41
+ GGML_API GGML_CALL void ggml_backend_sycl_set_single_device_mode(int main_gpu_id);
42
+ GGML_API GGML_CALL void ggml_backend_sycl_set_mul_device_mode();
43
+
44
+ // SYCL doesn't support registering host memory, keep here for reference
45
+ // GGML_API GGML_CALL bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size);
46
+ // GGML_API GGML_CALL void ggml_backend_sycl_unregister_host_buffer(void * buffer);
47
+ #ifdef __cplusplus
48
+ }
49
+ #endif
bindings/ruby/ext/ggml-vulkan.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include "ggml.h"
4
+ #include "ggml-backend.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ #define GGML_VK_NAME "Vulkan"
11
+ #define GGML_VK_MAX_DEVICES 16
12
+
13
+ GGML_API void ggml_vk_instance_init(void);
14
+
15
+ // backend API
16
+ GGML_API GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t dev_num);
17
+
18
+ GGML_API GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend);
19
+ GGML_API GGML_CALL int ggml_backend_vk_get_device_count(void);
20
+ GGML_API GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size);
21
+ GGML_API GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total);
22
+
23
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num);
24
+ // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
25
+ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void);
26
+
27
+ #ifdef __cplusplus
28
+ }
29
+ #endif
bindings/ruby/whispercpp.gemspec ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Gem::Specification.new do |s|
2
+ s.name = "whispercpp"
3
+ s.authors = ["Georgi Gerganov", "Todd A. Fisher"]
4
+ s.version = '1.3.0'
5
+ s.date = '2024-05-14'
6
+ s.description = %q{High-performance inference of OpenAI's Whisper automatic speech recognition (ASR) model via Ruby}
7
+ s.email = '[email protected]'
8
+ s.extra_rdoc_files = ['LICENSE', 'README.md']
9
+
10
+ s.files = ["LICENSE", "README.md", "Rakefile", "ext/extconf.rb", "ext/ggml.c", "ext/ruby_whisper.cpp", "ext/whisper.cpp", "ext/dr_wav.h", "ext/ggml.h", "ext/ruby_whisper.h", "ext/whisper.h"]
11
+
12
+ #### Load-time details
13
+ s.require_paths = ['lib','ext']
14
+ s.summary = %q{Ruby whisper.cpp bindings}
15
+ s.test_files = ["tests/test_whisper.rb"]
16
+
17
+ s.extensions << 'ext/extconf.rb'
18
+
19
+
20
+ #### Documentation and testing.
21
+ s.homepage = 'https://github.com/ggerganov/whisper.cpp'
22
+ s.rdoc_options = ['--main', '../../README.md']
23
+
24
+
25
+ s.platform = Gem::Platform::RUBY
26
+
27
+ s.licenses = ['MIT']
28
+ end