ggerganov commited on
Commit
ab97f9c
·
unverified ·
1 Parent(s): 036726b

Reduce memory usage even more + better sampling

Browse files

- The encode/decode memory buffers are now reused
- If the 30-sec segment goes for too long without a timestamp token, we
force one. Improves transcription for large model
- Stereo support
- Add "micro-machines.wav" sample

Files changed (3) hide show
  1. Makefile +3 -0
  2. README.md +34 -26
  3. main.cpp +59 -61
Makefile CHANGED
@@ -20,10 +20,13 @@ samples:
20
  @wget --quiet --show-progress -O samples/gb0.ogg https://upload.wikimedia.org/wikipedia/commons/2/22/George_W._Bush%27s_weekly_radio_address_%28November_1%2C_2008%29.oga
21
  @wget --quiet --show-progress -O samples/gb1.ogg https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg
22
  @wget --quiet --show-progress -O samples/hp0.ogg https://upload.wikimedia.org/wikipedia/en/d/d4/En.henryfphillips.ogg
 
23
  @echo "Converting to 16-bit WAV ..."
24
  @ffmpeg -loglevel -0 -y -i samples/gb0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb0.wav
25
  @ffmpeg -loglevel -0 -y -i samples/gb1.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb1.wav
26
  @ffmpeg -loglevel -0 -y -i samples/hp0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/hp0.wav
 
 
27
 
28
 
29
  # if not already downloaded, the following targets download the specified model and
 
20
  @wget --quiet --show-progress -O samples/gb0.ogg https://upload.wikimedia.org/wikipedia/commons/2/22/George_W._Bush%27s_weekly_radio_address_%28November_1%2C_2008%29.oga
21
  @wget --quiet --show-progress -O samples/gb1.ogg https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg
22
  @wget --quiet --show-progress -O samples/hp0.ogg https://upload.wikimedia.org/wikipedia/en/d/d4/En.henryfphillips.ogg
23
+ @wget --quiet --show-progress -O samples/mm1.wav https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav
24
  @echo "Converting to 16-bit WAV ..."
25
  @ffmpeg -loglevel -0 -y -i samples/gb0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb0.wav
26
  @ffmpeg -loglevel -0 -y -i samples/gb1.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/gb1.wav
27
  @ffmpeg -loglevel -0 -y -i samples/hp0.ogg -ar 16000 -ac 1 -c:a pcm_s16le samples/hp0.wav
28
+ @ffmpeg -loglevel -0 -y -i samples/mm1.wav -ar 16000 -ac 1 -c:a pcm_s16le samples/mm0.wav
29
+ @rm samples/mm1.wav
30
 
31
 
32
  # if not already downloaded, the following targets download the specified model and
README.md CHANGED
@@ -1,12 +1,13 @@
1
  # whisper.cpp
2
 
3
- C/C++ port of [OpenAI's Whisper](https://github.com/openai/whisper) speech-to-text model
4
 
5
  - Plain C/C++ implementation without dependencies
6
  - ARM_NEON and AVX intrinsics support
7
  - Mixed F16 / F32 support
8
  - Low memory usage (Flash Attention + Flash Forward)
9
  - Zero memory allocations at runtime
 
10
 
11
  ## Usage
12
 
@@ -50,7 +51,12 @@ options:
50
 
51
  bash ./download-ggml-model.sh base.en
52
  Downloading ggml model base.en ...
53
- Model base.en already exists. Skipping download.
 
 
 
 
 
54
 
55
  ===============================================
56
  Running base.en on all samples in ./samples ...
@@ -73,7 +79,7 @@ whisper_model_load: n_text_layer = 6
73
  whisper_model_load: n_mels = 80
74
  whisper_model_load: f16 = 1
75
  whisper_model_load: type = 2
76
- whisper_model_load: mem_required = 611.00 MB
77
  whisper_model_load: adding 1607 extra tokens
78
  whisper_model_load: ggml ctx size = 163.43 MB
79
  whisper_model_load: memory size = 22.83 MB
@@ -86,12 +92,12 @@ main: processing 176000 samples (11.0 sec), 4 threads, lang = english, task = tr
86
  [00:00.000 --> 00:11.000] And so my fellow Americans ask not what your country can do for you. Ask what you can do for your country.
87
 
88
 
89
- main: load time = 61.78 ms
90
- main: mel time = 41.74 ms
91
- main: sample time = 2.10 ms
92
- main: encode time = 718.60 ms / 119.77 ms per layer
93
- main: decode time = 83.55 ms
94
- main: total time = 908.15 ms
95
  ```
96
 
97
  The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
@@ -131,10 +137,12 @@ make large
131
 
132
  ## Another example
133
 
134
- Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg) in less than a minute, using `medium.en` model:
 
135
 
136
  ```java
137
  $ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
 
138
  whisper_model_load: loading model from 'models/ggml-medium.en.bin'
139
  whisper_model_load: n_vocab = 51864
140
  whisper_model_load: n_audio_ctx = 1500
@@ -148,7 +156,7 @@ whisper_model_load: n_text_layer = 24
148
  whisper_model_load: n_mels = 80
149
  whisper_model_load: f16 = 1
150
  whisper_model_load: type = 4
151
- whisper_model_load: mem_required = 2786.00 MB
152
  whisper_model_load: adding 1607 extra tokens
153
  whisper_model_load: ggml ctx size = 1644.97 MB
154
  whisper_model_load: memory size = 182.62 MB
@@ -187,30 +195,30 @@ main: processing 3179750 samples (198.7 sec), 8 threads, lang = english, task =
187
  [03:14.000 --> 03:24.000] [Music]
188
 
189
 
190
- main: load time = 438.55 ms
191
- main: mel time = 440.22 ms
192
- main: sample time = 32.23 ms
193
- main: encode time = 42329.63 ms / 1763.73 ms per layer
194
- main: decode time = 15190.00 ms
195
- main: total time = 58444.63 ms
196
  ```
197
 
198
  ## Limitations
199
 
200
  - Very basic greedy sampling scheme - always pick up the top token
 
201
  - Inference only
202
- - Runs on the CPU
203
- - Only mono-channel 16-bit WAV is supported
204
 
205
  ## Memory usage
206
 
207
- | Model | Disk | Mem |
208
- | --- | --- | --- |
209
- | tiny | 75 MB | ~460 MB |
210
- | base | 142 MB | ~620 MB |
211
- | small | 466 MB | ~1.3 GB |
212
- | medium | 1.5 GB | ~2.8 GB |
213
- | large | 2.9 GB | ~4.9 GB |
214
 
215
  ## ggml format
216
 
 
1
  # whisper.cpp
2
 
3
+ High-performance inference of [OpenAI's Whisper](https://github.com/openai/whisper) automatic speech recognition (ASR) model:
4
 
5
  - Plain C/C++ implementation without dependencies
6
  - ARM_NEON and AVX intrinsics support
7
  - Mixed F16 / F32 support
8
  - Low memory usage (Flash Attention + Flash Forward)
9
  - Zero memory allocations at runtime
10
+ - Runs on the CPU (Mac and Linux support)
11
 
12
  ## Usage
13
 
 
51
 
52
  bash ./download-ggml-model.sh base.en
53
  Downloading ggml model base.en ...
54
+ models/ggml-base.en.bin 100%[=====================================>] 141.11M 8.58MB/s in 22s
55
+ Done! Model 'base.en' saved in 'models/ggml-base.en.bin'
56
+ You can now use it like this:
57
+
58
+ $ ./main -m models/ggml-base.en.bin -f samples/jfk.wav
59
+
60
 
61
  ===============================================
62
  Running base.en on all samples in ./samples ...
 
79
  whisper_model_load: n_mels = 80
80
  whisper_model_load: f16 = 1
81
  whisper_model_load: type = 2
82
+ whisper_model_load: mem_required = 377.00 MB
83
  whisper_model_load: adding 1607 extra tokens
84
  whisper_model_load: ggml ctx size = 163.43 MB
85
  whisper_model_load: memory size = 22.83 MB
 
92
  [00:00.000 --> 00:11.000] And so my fellow Americans ask not what your country can do for you. Ask what you can do for your country.
93
 
94
 
95
+ main: load time = 82.05 ms
96
+ main: mel time = 44.15 ms
97
+ main: sample time = 1.98 ms
98
+ main: encode time = 674.77 ms / 112.46 ms per layer
99
+ main: decode time = 82.91 ms
100
+ main: total time = 886.29 ms
101
  ```
102
 
103
  The command downloads the `base.en` model converted to custom `ggml` format and runs the inference on all `.wav` samples in the folder `samples`.
 
137
 
138
  ## Another example
139
 
140
+ Here is another example of transcribing a [3:24 min speech](https://upload.wikimedia.org/wikipedia/commons/1/1f/George_W_Bush_Columbia_FINAL.ogg)
141
+ in less than a minute on a MacBook M1 Pro, using `medium.en` model:
142
 
143
  ```java
144
  $ ./main -m models/ggml-medium.en.bin -f samples/gb1.wav -t 8
145
+
146
  whisper_model_load: loading model from 'models/ggml-medium.en.bin'
147
  whisper_model_load: n_vocab = 51864
148
  whisper_model_load: n_audio_ctx = 1500
 
156
  whisper_model_load: n_mels = 80
157
  whisper_model_load: f16 = 1
158
  whisper_model_load: type = 4
159
+ whisper_model_load: mem_required = 2502.00 MB
160
  whisper_model_load: adding 1607 extra tokens
161
  whisper_model_load: ggml ctx size = 1644.97 MB
162
  whisper_model_load: memory size = 182.62 MB
 
195
  [03:14.000 --> 03:24.000] [Music]
196
 
197
 
198
+ main: load time = 522.18 ms
199
+ main: mel time = 423.43 ms
200
+ main: sample time = 31.42 ms
201
+ main: encode time = 41518.51 ms / 1729.94 ms per layer
202
+ main: decode time = 14907.22 ms
203
+ main: total time = 57416.63 ms
204
  ```
205
 
206
  ## Limitations
207
 
208
  - Very basic greedy sampling scheme - always pick up the top token
209
+ - Only 16-bit WAV at 16 kHz is supported
210
  - Inference only
211
+ - No GPU support
 
212
 
213
  ## Memory usage
214
 
215
+ | Model | Disk | Mem |
216
+ | --- | --- | --- |
217
+ | tiny | 75 MB | ~240 MB |
218
+ | base | 142 MB | ~380 MB |
219
+ | small | 466 MB | ~970 MB |
220
+ | medium | 1.5 GB | ~2.5 GB |
221
+ | large | 2.9 GB | ~4.6 GB |
222
 
223
  ## ggml format
224
 
main.cpp CHANGED
@@ -158,11 +158,11 @@ const std::map<e_model, size_t> MEM_REQ_ENCODE_LAYER = {
158
  };
159
 
160
  const std::map<e_model, size_t> MEM_REQ_DECODE = {
161
- { MODEL_TINY, 190ull*MB },
162
- { MODEL_BASE, 190ull*MB },
163
- { MODEL_SMALL, 190ull*MB },
164
- { MODEL_MEDIUM, 200ull*MB },
165
- { MODEL_LARGE, 200ull*MB },
166
  };
167
 
168
  const std::map<e_model, size_t> MEM_REQ_DECODE_LAYER = {
@@ -173,6 +173,11 @@ const std::map<e_model, size_t> MEM_REQ_DECODE_LAYER = {
173
  { MODEL_LARGE, 110ull*MB },
174
  };
175
 
 
 
 
 
 
176
  const int SAMPLE_RATE = 16000;
177
  const int N_FFT = 400;
178
  const int N_MEL = 80;
@@ -542,13 +547,15 @@ bool whisper_model_load(const std::string & fname, whisper_model & model, whispe
542
  printf("%s: f16 = %d\n", __func__, hparams.f16);
543
  printf("%s: type = %d\n", __func__, model.type);
544
 
 
 
 
 
545
  // this is the total memory required to run the inference
546
  const size_t mem_required =
547
- MEM_REQ_MODEL.at(model.type) +
548
- MEM_REQ_ENCODE.at(model.type) +
549
- MEM_REQ_ENCODE_LAYER.at(model.type) +
550
- MEM_REQ_DECODE.at(model.type) +
551
- MEM_REQ_DECODE_LAYER.at(model.type);
552
 
553
  printf("%s: mem_required = %.2f MB\n", __func__, mem_required / 1024.0 / 1024.0);
554
  }
@@ -752,8 +759,8 @@ bool whisper_model_load(const std::string & fname, whisper_model & model, whispe
752
  // create the ggml context
753
  {
754
  struct ggml_init_params params = {
755
- .mem_size = ctx_size,
756
- .mem_buffer = NULL,
757
  };
758
 
759
  model.ctx = ggml_init(params);
@@ -1089,17 +1096,10 @@ bool whisper_encode(
1089
  const int n_mels = hparams.n_mels;
1090
  assert(mel_inp.n_mel == n_mels);
1091
 
1092
- struct ggml_init_params params;
1093
-
1094
- {
1095
- static size_t buf_size = MEM_REQ_ENCODE.at(model.type);
1096
- static void * buf = malloc(buf_size);
1097
-
1098
- params = {
1099
- .mem_size = buf_size,
1100
- .mem_buffer = buf,
1101
- };
1102
- }
1103
 
1104
  struct ggml_context * ctx0 = ggml_init(params);
1105
 
@@ -1151,16 +1151,10 @@ bool whisper_encode(
1151
 
1152
  // create separate context for each layer to reduce memory usage
1153
 
1154
- struct ggml_init_params paramsL;
1155
- {
1156
- static size_t buf_size = MEM_REQ_ENCODE_LAYER.at(model.type);
1157
- static void * buf = malloc(buf_size);
1158
-
1159
- paramsL = {
1160
- .mem_size = buf_size,
1161
- .mem_buffer = buf,
1162
- };
1163
- }
1164
 
1165
  struct ggml_context * ctxL = ggml_init(paramsL);
1166
 
@@ -1492,17 +1486,10 @@ bool whisper_decode(
1492
  const int N = prompt.size();
1493
  const int M = hparams.n_audio_ctx;
1494
 
1495
- struct ggml_init_params params;
1496
-
1497
- {
1498
- static size_t buf_size = MEM_REQ_DECODE.at(model.type);
1499
- static void * buf = malloc(buf_size);
1500
-
1501
- params = {
1502
- .mem_size = buf_size,
1503
- .mem_buffer = buf,
1504
  };
1505
- }
1506
 
1507
  struct ggml_context * ctx0 = ggml_init(params);
1508
 
@@ -1525,17 +1512,10 @@ bool whisper_decode(
1525
  for (int il = 0; il < n_layer; ++il) {
1526
  const auto & layer = model.layers_decoder[il];
1527
 
1528
- struct ggml_init_params paramsL;
1529
-
1530
- {
1531
- static size_t buf_size = MEM_REQ_DECODE_LAYER.at(model.type);
1532
- static void * buf = malloc(buf_size);
1533
-
1534
- paramsL = {
1535
- .mem_size = buf_size,
1536
- .mem_buffer = buf,
1537
- };
1538
- }
1539
 
1540
  struct ggml_context * ctxL = ggml_init(paramsL);
1541
  struct ggml_cgraph gf = { .n_threads = n_threads };
@@ -1849,7 +1829,7 @@ bool whisper_decode(
1849
  // TODO: temperature
1850
  whisper_vocab::id whisper_sample_best(
1851
  const whisper_vocab & vocab,
1852
- const float * probs) {
1853
  int n_logits = vocab.id_to_token.size();
1854
 
1855
  std::vector<std::pair<double, whisper_vocab::id>> probs_id;
@@ -1859,7 +1839,7 @@ whisper_vocab::id whisper_sample_best(
1859
  probs_id.push_back(std::make_pair(probs[i], i));
1860
  }
1861
 
1862
- const int top_k = 10;
1863
 
1864
  // find the top K tokens
1865
  std::partial_sort(
@@ -1876,6 +1856,15 @@ whisper_vocab::id whisper_sample_best(
1876
  // printf("%d: '%s' %f, %d\n", i, vocab.id_to_token.at(probs_id[i].second).c_str(), probs_id[i].first, probs_id[i].second);
1877
  //}
1878
 
 
 
 
 
 
 
 
 
 
1879
  int res = 0;
1880
  while ((probs_id[res].second == vocab.token_sot ||
1881
  probs_id[res].second == vocab.token_solm ||
@@ -2136,8 +2125,8 @@ int main(int argc, char ** argv) {
2136
  return 2;
2137
  }
2138
 
2139
- if (wav.channels != 1) {
2140
- fprintf(stderr, "%s: WAV file '%s' must be mono\n", argv[0], params.fname_inp.c_str());
2141
  return 3;
2142
  }
2143
 
@@ -2158,8 +2147,14 @@ int main(int argc, char ** argv) {
2158
 
2159
  // convert to float
2160
  pcmf32.resize(pcm16.size());
2161
- for (size_t i = 0; i < pcm16.size(); i++) {
2162
- pcmf32[i] = float(pcm16[i])/32768.0f;
 
 
 
 
 
 
2163
  }
2164
  }
2165
 
@@ -2252,6 +2247,7 @@ int main(int argc, char ** argv) {
2252
  int seek_delta = 100*CHUNK_SIZE;
2253
  whisper_vocab::id last_id = 0;
2254
 
 
2255
  //printf("\n\n");
2256
  //for (int i = 0; i < prompt.size(); i++) {
2257
  // printf("%s: prompt[%d] = %s\n", __func__, i, vocab.id_to_token[prompt[i]].c_str());
@@ -2294,7 +2290,7 @@ int main(int argc, char ** argv) {
2294
  {
2295
  const int64_t t_start_sample_us = ggml_time_us();
2296
 
2297
- id = whisper_sample_best(vocab, probs.data() + (probs.size() - n_vocab));
2298
  if (i > 0) {
2299
  tid = whisper_sample_timestamp(vocab, probs.data() + (probs.size() - n_vocab));
2300
  }
@@ -2313,6 +2309,8 @@ int main(int argc, char ** argv) {
2313
  prompt.push_back(id);
2314
  result_cur.push_back({ id, seek + 2*(tid - vocab.token_beg) });
2315
 
 
 
2316
  // end of text token
2317
  if (id == vocab.token_eot) {
2318
  break;
 
158
  };
159
 
160
  const std::map<e_model, size_t> MEM_REQ_DECODE = {
161
+ { MODEL_TINY, 94ull*MB },
162
+ { MODEL_BASE, 96ull*MB },
163
+ { MODEL_SMALL, 98ull*MB },
164
+ { MODEL_MEDIUM, 100ull*MB },
165
+ { MODEL_LARGE, 102ull*MB },
166
  };
167
 
168
  const std::map<e_model, size_t> MEM_REQ_DECODE_LAYER = {
 
173
  { MODEL_LARGE, 110ull*MB },
174
  };
175
 
176
+ // the memory buffers used to store the model in memory and perform the inference computations
177
+ std::vector<uint8_t> g_buf_model;
178
+ std::vector<uint8_t> g_buf_compute;
179
+ std::vector<uint8_t> g_buf_compute_layer;
180
+
181
  const int SAMPLE_RATE = 16000;
182
  const int N_FFT = 400;
183
  const int N_MEL = 80;
 
547
  printf("%s: f16 = %d\n", __func__, hparams.f16);
548
  printf("%s: type = %d\n", __func__, model.type);
549
 
550
+ g_buf_model.resize(MEM_REQ_MODEL.at(model.type));
551
+ g_buf_compute.resize(std::max(MEM_REQ_ENCODE.at(model.type), MEM_REQ_DECODE.at(model.type)));
552
+ g_buf_compute_layer.resize(std::max(MEM_REQ_ENCODE_LAYER.at(model.type), MEM_REQ_DECODE_LAYER.at(model.type)));
553
+
554
  // this is the total memory required to run the inference
555
  const size_t mem_required =
556
+ g_buf_model.size() +
557
+ g_buf_compute.size() +
558
+ g_buf_compute_layer.size();
 
 
559
 
560
  printf("%s: mem_required = %.2f MB\n", __func__, mem_required / 1024.0 / 1024.0);
561
  }
 
759
  // create the ggml context
760
  {
761
  struct ggml_init_params params = {
762
+ .mem_size = g_buf_model.size(),
763
+ .mem_buffer = g_buf_model.data(),
764
  };
765
 
766
  model.ctx = ggml_init(params);
 
1096
  const int n_mels = hparams.n_mels;
1097
  assert(mel_inp.n_mel == n_mels);
1098
 
1099
+ struct ggml_init_params params = {
1100
+ .mem_size = g_buf_compute.size(),
1101
+ .mem_buffer = g_buf_compute.data(),
1102
+ };
 
 
 
 
 
 
 
1103
 
1104
  struct ggml_context * ctx0 = ggml_init(params);
1105
 
 
1151
 
1152
  // create separate context for each layer to reduce memory usage
1153
 
1154
+ struct ggml_init_params paramsL = {
1155
+ .mem_size = g_buf_compute_layer.size(),
1156
+ .mem_buffer = g_buf_compute_layer.data(),
1157
+ };
 
 
 
 
 
 
1158
 
1159
  struct ggml_context * ctxL = ggml_init(paramsL);
1160
 
 
1486
  const int N = prompt.size();
1487
  const int M = hparams.n_audio_ctx;
1488
 
1489
+ struct ggml_init_params params = {
1490
+ .mem_size = g_buf_compute.size(),
1491
+ .mem_buffer = g_buf_compute.data(),
 
 
 
 
 
 
1492
  };
 
1493
 
1494
  struct ggml_context * ctx0 = ggml_init(params);
1495
 
 
1512
  for (int il = 0; il < n_layer; ++il) {
1513
  const auto & layer = model.layers_decoder[il];
1514
 
1515
+ struct ggml_init_params paramsL = {
1516
+ .mem_size = g_buf_compute_layer.size(),
1517
+ .mem_buffer = g_buf_compute_layer.data(),
1518
+ };
 
 
 
 
 
 
 
1519
 
1520
  struct ggml_context * ctxL = ggml_init(paramsL);
1521
  struct ggml_cgraph gf = { .n_threads = n_threads };
 
1829
  // TODO: temperature
1830
  whisper_vocab::id whisper_sample_best(
1831
  const whisper_vocab & vocab,
1832
+ const float * probs, bool need_timestamp) {
1833
  int n_logits = vocab.id_to_token.size();
1834
 
1835
  std::vector<std::pair<double, whisper_vocab::id>> probs_id;
 
1839
  probs_id.push_back(std::make_pair(probs[i], i));
1840
  }
1841
 
1842
+ const int top_k = 4;
1843
 
1844
  // find the top K tokens
1845
  std::partial_sort(
 
1856
  // printf("%d: '%s' %f, %d\n", i, vocab.id_to_token.at(probs_id[i].second).c_str(), probs_id[i].first, probs_id[i].second);
1857
  //}
1858
 
1859
+ if (need_timestamp) {
1860
+ // at the end of the 30-second audio segment, we start giving preference to time tokens
1861
+ for (int i = 0; i < top_k; i++) {
1862
+ if (probs_id[i].second > vocab.token_beg + 1300 && probs_id[i].first > probs_id[0].first*0.1) {
1863
+ return probs_id[i].second;
1864
+ }
1865
+ }
1866
+ }
1867
+
1868
  int res = 0;
1869
  while ((probs_id[res].second == vocab.token_sot ||
1870
  probs_id[res].second == vocab.token_solm ||
 
2125
  return 2;
2126
  }
2127
 
2128
+ if (wav.channels != 1 && wav.channels != 2) {
2129
+ fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", argv[0], params.fname_inp.c_str());
2130
  return 3;
2131
  }
2132
 
 
2147
 
2148
  // convert to float
2149
  pcmf32.resize(pcm16.size());
2150
+ if (wav.channels == 1) {
2151
+ for (size_t i = 0; i < pcm16.size(); i++) {
2152
+ pcmf32[i] = float(pcm16[i])/32768.0f;
2153
+ }
2154
+ } else {
2155
+ for (size_t i = 0; i < pcm16.size(); i++) {
2156
+ pcmf32[i] = float(pcm16[i*2 + 0] + pcm16[i*2 + 1])/32768.0f/2.0f;
2157
+ }
2158
  }
2159
  }
2160
 
 
2247
  int seek_delta = 100*CHUNK_SIZE;
2248
  whisper_vocab::id last_id = 0;
2249
 
2250
+ // print the prompt
2251
  //printf("\n\n");
2252
  //for (int i = 0; i < prompt.size(); i++) {
2253
  // printf("%s: prompt[%d] = %s\n", __func__, i, vocab.id_to_token[prompt[i]].c_str());
 
2290
  {
2291
  const int64_t t_start_sample_us = ggml_time_us();
2292
 
2293
+ id = whisper_sample_best(vocab, probs.data() + (probs.size() - n_vocab), result_len == 0);
2294
  if (i > 0) {
2295
  tid = whisper_sample_timestamp(vocab, probs.data() + (probs.size() - n_vocab));
2296
  }
 
2309
  prompt.push_back(id);
2310
  result_cur.push_back({ id, seek + 2*(tid - vocab.token_beg) });
2311
 
2312
+ //printf("%s: %s\n", __func__, vocab.id_to_token[id].c_str());
2313
+
2314
  // end of text token
2315
  if (id == vocab.token_eot) {
2316
  break;