1 /*
2 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12 /* ====== Compiler specifics ====== */
13 #if defined(_MSC_VER)
14 # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
15 #endif
16
17
18 /* ====== Constants ====== */
19 #define ZSTDMT_OVERLAPLOG_DEFAULT 0
20
21
22 /* ====== Dependencies ====== */
23 #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
24 #include "../common/mem.h" /* MEM_STATIC */
25 #include "../common/pool.h" /* threadpool */
26 #include "../common/threading.h" /* mutex */
27 #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
28 #include "zstd_ldm.h"
29 #include "zstdmt_compress.h"
30
31 /* Guards code to support resizing the SeqPool.
32 * We will want to resize the SeqPool to save memory in the future.
33 * Until then, comment the code out since it is unused.
34 */
35 #define ZSTD_RESIZE_SEQPOOL 0
36
37 /* ====== Debug ====== */
38 #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
39 && !defined(_MSC_VER) \
40 && !defined(__MINGW32__)
41
42 # include <stdio.h>
43 # include <unistd.h>
44 # include <sys/times.h>
45
46 # define DEBUG_PRINTHEX(l,p,n) { \
47 unsigned debug_u; \
48 for (debug_u=0; debug_u<(n); debug_u++) \
49 RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
50 RAWLOG(l, " \n"); \
51 }
52
GetCurrentClockTimeMicroseconds(void)53 static unsigned long long GetCurrentClockTimeMicroseconds(void)
54 {
55 static clock_t _ticksPerSecond = 0;
56 if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
57
58 { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
59 return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
60 } }
61
62 #define MUTEX_WAIT_TIME_DLEVEL 6
63 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
64 if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
65 unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
66 ZSTD_pthread_mutex_lock(mutex); \
67 { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
68 unsigned long long const elapsedTime = (afterTime-beforeTime); \
69 if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
70 DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
71 elapsedTime, #mutex); \
72 } } \
73 } else { \
74 ZSTD_pthread_mutex_lock(mutex); \
75 } \
76 }
77
78 #else
79
80 # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
81 # define DEBUG_PRINTHEX(l,p,n) {}
82
83 #endif
84
85
86 /* ===== Buffer Pool ===== */
87 /* a single Buffer Pool can be invoked from multiple threads in parallel */
88
89 typedef struct buffer_s {
90 void* start;
91 size_t capacity;
92 } buffer_t;
93
94 static const buffer_t g_nullBuffer = { NULL, 0 };
95
96 typedef struct ZSTDMT_bufferPool_s {
97 ZSTD_pthread_mutex_t poolMutex;
98 size_t bufferSize;
99 unsigned totalBuffers;
100 unsigned nbBuffers;
101 ZSTD_customMem cMem;
102 buffer_t bTable[1]; /* variable size */
103 } ZSTDMT_bufferPool;
104
ZSTDMT_createBufferPool(unsigned nbWorkers,ZSTD_customMem cMem)105 static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
106 {
107 unsigned const maxNbBuffers = 2*nbWorkers + 3;
108 ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
109 sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
110 if (bufPool==NULL) return NULL;
111 if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
112 ZSTD_customFree(bufPool, cMem);
113 return NULL;
114 }
115 bufPool->bufferSize = 64 KB;
116 bufPool->totalBuffers = maxNbBuffers;
117 bufPool->nbBuffers = 0;
118 bufPool->cMem = cMem;
119 return bufPool;
120 }
121
ZSTDMT_freeBufferPool(ZSTDMT_bufferPool * bufPool)122 static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
123 {
124 unsigned u;
125 DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
126 if (!bufPool) return; /* compatibility with free on NULL */
127 for (u=0; u<bufPool->totalBuffers; u++) {
128 DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
129 ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
130 }
131 ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
132 ZSTD_customFree(bufPool, bufPool->cMem);
133 }
134
135 /* only works at initialization, not during compression */
ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool * bufPool)136 static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
137 {
138 size_t const poolSize = sizeof(*bufPool)
139 + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
140 unsigned u;
141 size_t totalBufferSize = 0;
142 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
143 for (u=0; u<bufPool->totalBuffers; u++)
144 totalBufferSize += bufPool->bTable[u].capacity;
145 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
146
147 return poolSize + totalBufferSize;
148 }
149
150 /* ZSTDMT_setBufferSize() :
151 * all future buffers provided by this buffer pool will have _at least_ this size
152 * note : it's better for all buffers to have same size,
153 * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
ZSTDMT_setBufferSize(ZSTDMT_bufferPool * const bufPool,size_t const bSize)154 static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
155 {
156 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
157 DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
158 bufPool->bufferSize = bSize;
159 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
160 }
161
162
ZSTDMT_expandBufferPool(ZSTDMT_bufferPool * srcBufPool,U32 nbWorkers)163 static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
164 {
165 unsigned const maxNbBuffers = 2*nbWorkers + 3;
166 if (srcBufPool==NULL) return NULL;
167 if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
168 return srcBufPool;
169 /* need a larger buffer pool */
170 { ZSTD_customMem const cMem = srcBufPool->cMem;
171 size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
172 ZSTDMT_bufferPool* newBufPool;
173 ZSTDMT_freeBufferPool(srcBufPool);
174 newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
175 if (newBufPool==NULL) return newBufPool;
176 ZSTDMT_setBufferSize(newBufPool, bSize);
177 return newBufPool;
178 }
179 }
180
181 /** ZSTDMT_getBuffer() :
182 * assumption : bufPool must be valid
183 * @return : a buffer, with start pointer and size
184 * note: allocation may fail, in this case, start==NULL and size==0 */
ZSTDMT_getBuffer(ZSTDMT_bufferPool * bufPool)185 static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
186 {
187 size_t const bSize = bufPool->bufferSize;
188 DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
189 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
190 if (bufPool->nbBuffers) { /* try to use an existing buffer */
191 buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
192 size_t const availBufferSize = buf.capacity;
193 bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
194 if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
195 /* large enough, but not too much */
196 DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
197 bufPool->nbBuffers, (U32)buf.capacity);
198 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
199 return buf;
200 }
201 /* size conditions not respected : scratch this buffer, create new one */
202 DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
203 ZSTD_customFree(buf.start, bufPool->cMem);
204 }
205 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
206 /* create new buffer */
207 DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
208 { buffer_t buffer;
209 void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
210 buffer.start = start; /* note : start can be NULL if malloc fails ! */
211 buffer.capacity = (start==NULL) ? 0 : bSize;
212 if (start==NULL) {
213 DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
214 } else {
215 DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
216 }
217 return buffer;
218 }
219 }
220
221 #if ZSTD_RESIZE_SEQPOOL
222 /** ZSTDMT_resizeBuffer() :
223 * assumption : bufPool must be valid
224 * @return : a buffer that is at least the buffer pool buffer size.
225 * If a reallocation happens, the data in the input buffer is copied.
226 */
ZSTDMT_resizeBuffer(ZSTDMT_bufferPool * bufPool,buffer_t buffer)227 static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
228 {
229 size_t const bSize = bufPool->bufferSize;
230 if (buffer.capacity < bSize) {
231 void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
232 buffer_t newBuffer;
233 newBuffer.start = start;
234 newBuffer.capacity = start == NULL ? 0 : bSize;
235 if (start != NULL) {
236 assert(newBuffer.capacity >= buffer.capacity);
237 ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
238 DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
239 return newBuffer;
240 }
241 DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
242 }
243 return buffer;
244 }
245 #endif
246
247 /* store buffer for later re-use, up to pool capacity */
ZSTDMT_releaseBuffer(ZSTDMT_bufferPool * bufPool,buffer_t buf)248 static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
249 {
250 DEBUGLOG(5, "ZSTDMT_releaseBuffer");
251 if (buf.start == NULL) return; /* compatible with release on NULL */
252 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
253 if (bufPool->nbBuffers < bufPool->totalBuffers) {
254 bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
255 DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
256 (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
257 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
258 return;
259 }
260 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
261 /* Reached bufferPool capacity (should not happen) */
262 DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
263 ZSTD_customFree(buf.start, bufPool->cMem);
264 }
265
266
267 /* ===== Seq Pool Wrapper ====== */
268
269 typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
270
ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool * seqPool)271 static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
272 {
273 return ZSTDMT_sizeof_bufferPool(seqPool);
274 }
275
bufferToSeq(buffer_t buffer)276 static rawSeqStore_t bufferToSeq(buffer_t buffer)
277 {
278 rawSeqStore_t seq = kNullRawSeqStore;
279 seq.seq = (rawSeq*)buffer.start;
280 seq.capacity = buffer.capacity / sizeof(rawSeq);
281 return seq;
282 }
283
seqToBuffer(rawSeqStore_t seq)284 static buffer_t seqToBuffer(rawSeqStore_t seq)
285 {
286 buffer_t buffer;
287 buffer.start = seq.seq;
288 buffer.capacity = seq.capacity * sizeof(rawSeq);
289 return buffer;
290 }
291
ZSTDMT_getSeq(ZSTDMT_seqPool * seqPool)292 static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
293 {
294 if (seqPool->bufferSize == 0) {
295 return kNullRawSeqStore;
296 }
297 return bufferToSeq(ZSTDMT_getBuffer(seqPool));
298 }
299
300 #if ZSTD_RESIZE_SEQPOOL
ZSTDMT_resizeSeq(ZSTDMT_seqPool * seqPool,rawSeqStore_t seq)301 static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
302 {
303 return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
304 }
305 #endif
306
ZSTDMT_releaseSeq(ZSTDMT_seqPool * seqPool,rawSeqStore_t seq)307 static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
308 {
309 ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
310 }
311
ZSTDMT_setNbSeq(ZSTDMT_seqPool * const seqPool,size_t const nbSeq)312 static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
313 {
314 ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
315 }
316
ZSTDMT_createSeqPool(unsigned nbWorkers,ZSTD_customMem cMem)317 static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
318 {
319 ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
320 if (seqPool == NULL) return NULL;
321 ZSTDMT_setNbSeq(seqPool, 0);
322 return seqPool;
323 }
324
ZSTDMT_freeSeqPool(ZSTDMT_seqPool * seqPool)325 static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
326 {
327 ZSTDMT_freeBufferPool(seqPool);
328 }
329
ZSTDMT_expandSeqPool(ZSTDMT_seqPool * pool,U32 nbWorkers)330 static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
331 {
332 return ZSTDMT_expandBufferPool(pool, nbWorkers);
333 }
334
335
336 /* ===== CCtx Pool ===== */
337 /* a single CCtx Pool can be invoked from multiple threads in parallel */
338
339 typedef struct {
340 ZSTD_pthread_mutex_t poolMutex;
341 int totalCCtx;
342 int availCCtx;
343 ZSTD_customMem cMem;
344 ZSTD_CCtx* cctx[1]; /* variable size */
345 } ZSTDMT_CCtxPool;
346
347 /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool * pool)348 static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
349 {
350 int cid;
351 for (cid=0; cid<pool->totalCCtx; cid++)
352 ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
353 ZSTD_pthread_mutex_destroy(&pool->poolMutex);
354 ZSTD_customFree(pool, pool->cMem);
355 }
356
357 /* ZSTDMT_createCCtxPool() :
358 * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
ZSTDMT_createCCtxPool(int nbWorkers,ZSTD_customMem cMem)359 static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
360 ZSTD_customMem cMem)
361 {
362 ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
363 sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
364 assert(nbWorkers > 0);
365 if (!cctxPool) return NULL;
366 if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
367 ZSTD_customFree(cctxPool, cMem);
368 return NULL;
369 }
370 cctxPool->cMem = cMem;
371 cctxPool->totalCCtx = nbWorkers;
372 cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
373 cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
374 if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
375 DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
376 return cctxPool;
377 }
378
ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool * srcPool,int nbWorkers)379 static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
380 int nbWorkers)
381 {
382 if (srcPool==NULL) return NULL;
383 if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
384 /* need a larger cctx pool */
385 { ZSTD_customMem const cMem = srcPool->cMem;
386 ZSTDMT_freeCCtxPool(srcPool);
387 return ZSTDMT_createCCtxPool(nbWorkers, cMem);
388 }
389 }
390
391 /* only works during initialization phase, not during compression */
ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool * cctxPool)392 static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
393 {
394 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
395 { unsigned const nbWorkers = cctxPool->totalCCtx;
396 size_t const poolSize = sizeof(*cctxPool)
397 + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
398 unsigned u;
399 size_t totalCCtxSize = 0;
400 for (u=0; u<nbWorkers; u++) {
401 totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
402 }
403 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
404 assert(nbWorkers > 0);
405 return poolSize + totalCCtxSize;
406 }
407 }
408
ZSTDMT_getCCtx(ZSTDMT_CCtxPool * cctxPool)409 static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
410 {
411 DEBUGLOG(5, "ZSTDMT_getCCtx");
412 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
413 if (cctxPool->availCCtx) {
414 cctxPool->availCCtx--;
415 { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
416 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
417 return cctx;
418 } }
419 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
420 DEBUGLOG(5, "create one more CCtx");
421 return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
422 }
423
ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool * pool,ZSTD_CCtx * cctx)424 static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
425 {
426 if (cctx==NULL) return; /* compatibility with release on NULL */
427 ZSTD_pthread_mutex_lock(&pool->poolMutex);
428 if (pool->availCCtx < pool->totalCCtx)
429 pool->cctx[pool->availCCtx++] = cctx;
430 else {
431 /* pool overflow : should not happen, since totalCCtx==nbWorkers */
432 DEBUGLOG(4, "CCtx pool overflow : free cctx");
433 ZSTD_freeCCtx(cctx);
434 }
435 ZSTD_pthread_mutex_unlock(&pool->poolMutex);
436 }
437
438 /* ==== Serial State ==== */
439
440 typedef struct {
441 void const* start;
442 size_t size;
443 } range_t;
444
445 typedef struct {
446 /* All variables in the struct are protected by mutex. */
447 ZSTD_pthread_mutex_t mutex;
448 ZSTD_pthread_cond_t cond;
449 ZSTD_CCtx_params params;
450 ldmState_t ldmState;
451 XXH64_state_t xxhState;
452 unsigned nextJobID;
453 /* Protects ldmWindow.
454 * Must be acquired after the main mutex when acquiring both.
455 */
456 ZSTD_pthread_mutex_t ldmWindowMutex;
457 ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
458 ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
459 } serialState_t;
460
461 static int
ZSTDMT_serialState_reset(serialState_t * serialState,ZSTDMT_seqPool * seqPool,ZSTD_CCtx_params params,size_t jobSize,const void * dict,size_t const dictSize,ZSTD_dictContentType_e dictContentType)462 ZSTDMT_serialState_reset(serialState_t* serialState,
463 ZSTDMT_seqPool* seqPool,
464 ZSTD_CCtx_params params,
465 size_t jobSize,
466 const void* dict, size_t const dictSize,
467 ZSTD_dictContentType_e dictContentType)
468 {
469 /* Adjust parameters */
470 if (params.ldmParams.enableLdm) {
471 DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
472 ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
473 assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
474 assert(params.ldmParams.hashRateLog < 32);
475 serialState->ldmState.hashPower =
476 ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
477 } else {
478 ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams));
479 }
480 serialState->nextJobID = 0;
481 if (params.fParams.checksumFlag)
482 XXH64_reset(&serialState->xxhState, 0);
483 if (params.ldmParams.enableLdm) {
484 ZSTD_customMem cMem = params.customMem;
485 unsigned const hashLog = params.ldmParams.hashLog;
486 size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
487 unsigned const bucketLog =
488 params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
489 size_t const bucketSize = (size_t)1 << bucketLog;
490 unsigned const prevBucketLog =
491 serialState->params.ldmParams.hashLog -
492 serialState->params.ldmParams.bucketSizeLog;
493 /* Size the seq pool tables */
494 ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
495 /* Reset the window */
496 ZSTD_window_init(&serialState->ldmState.window);
497 /* Resize tables and output space if necessary. */
498 if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
499 ZSTD_customFree(serialState->ldmState.hashTable, cMem);
500 serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
501 }
502 if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
503 ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
504 serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(bucketSize, cMem);
505 }
506 if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
507 return 1;
508 /* Zero the tables */
509 ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
510 ZSTD_memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
511
512 /* Update window state and fill hash table with dict */
513 serialState->ldmState.loadedDictEnd = 0;
514 if (dictSize > 0) {
515 if (dictContentType == ZSTD_dct_rawContent) {
516 BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
517 ZSTD_window_update(&serialState->ldmState.window, dict, dictSize);
518 ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams);
519 serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
520 } else {
521 /* don't even load anything */
522 }
523 }
524
525 /* Initialize serialState's copy of ldmWindow. */
526 serialState->ldmWindow = serialState->ldmState.window;
527 }
528
529 serialState->params = params;
530 serialState->params.jobSize = (U32)jobSize;
531 return 0;
532 }
533
ZSTDMT_serialState_init(serialState_t * serialState)534 static int ZSTDMT_serialState_init(serialState_t* serialState)
535 {
536 int initError = 0;
537 ZSTD_memset(serialState, 0, sizeof(*serialState));
538 initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
539 initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
540 initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
541 initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
542 return initError;
543 }
544
ZSTDMT_serialState_free(serialState_t * serialState)545 static void ZSTDMT_serialState_free(serialState_t* serialState)
546 {
547 ZSTD_customMem cMem = serialState->params.customMem;
548 ZSTD_pthread_mutex_destroy(&serialState->mutex);
549 ZSTD_pthread_cond_destroy(&serialState->cond);
550 ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
551 ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
552 ZSTD_customFree(serialState->ldmState.hashTable, cMem);
553 ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
554 }
555
ZSTDMT_serialState_update(serialState_t * serialState,ZSTD_CCtx * jobCCtx,rawSeqStore_t seqStore,range_t src,unsigned jobID)556 static void ZSTDMT_serialState_update(serialState_t* serialState,
557 ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
558 range_t src, unsigned jobID)
559 {
560 /* Wait for our turn */
561 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
562 while (serialState->nextJobID < jobID) {
563 DEBUGLOG(5, "wait for serialState->cond");
564 ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
565 }
566 /* A future job may error and skip our job */
567 if (serialState->nextJobID == jobID) {
568 /* It is now our turn, do any processing necessary */
569 if (serialState->params.ldmParams.enableLdm) {
570 size_t error;
571 assert(seqStore.seq != NULL && seqStore.pos == 0 &&
572 seqStore.size == 0 && seqStore.capacity > 0);
573 assert(src.size <= serialState->params.jobSize);
574 ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
575 error = ZSTD_ldm_generateSequences(
576 &serialState->ldmState, &seqStore,
577 &serialState->params.ldmParams, src.start, src.size);
578 /* We provide a large enough buffer to never fail. */
579 assert(!ZSTD_isError(error)); (void)error;
580 /* Update ldmWindow to match the ldmState.window and signal the main
581 * thread if it is waiting for a buffer.
582 */
583 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
584 serialState->ldmWindow = serialState->ldmState.window;
585 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
586 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
587 }
588 if (serialState->params.fParams.checksumFlag && src.size > 0)
589 XXH64_update(&serialState->xxhState, src.start, src.size);
590 }
591 /* Now it is the next jobs turn */
592 serialState->nextJobID++;
593 ZSTD_pthread_cond_broadcast(&serialState->cond);
594 ZSTD_pthread_mutex_unlock(&serialState->mutex);
595
596 if (seqStore.size > 0) {
597 size_t const err = ZSTD_referenceExternalSequences(
598 jobCCtx, seqStore.seq, seqStore.size);
599 assert(serialState->params.ldmParams.enableLdm);
600 assert(!ZSTD_isError(err));
601 (void)err;
602 }
603 }
604
ZSTDMT_serialState_ensureFinished(serialState_t * serialState,unsigned jobID,size_t cSize)605 static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
606 unsigned jobID, size_t cSize)
607 {
608 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
609 if (serialState->nextJobID <= jobID) {
610 assert(ZSTD_isError(cSize)); (void)cSize;
611 DEBUGLOG(5, "Skipping past job %u because of error", jobID);
612 serialState->nextJobID = jobID + 1;
613 ZSTD_pthread_cond_broadcast(&serialState->cond);
614
615 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
616 ZSTD_window_clear(&serialState->ldmWindow);
617 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
618 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
619 }
620 ZSTD_pthread_mutex_unlock(&serialState->mutex);
621
622 }
623
624
625 /* ------------------------------------------ */
626 /* ===== Worker thread ===== */
627 /* ------------------------------------------ */
628
629 static const range_t kNullRange = { NULL, 0 };
630
631 typedef struct {
632 size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
633 size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
634 ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
635 ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
636 ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
637 ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
638 ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
639 serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
640 buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
641 range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
642 range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
643 unsigned jobID; /* set by mtctx, then read by worker => no barrier */
644 unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
645 unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
646 ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
647 const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
648 unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
649 size_t dstFlushed; /* used only by mtctx */
650 unsigned frameChecksumNeeded; /* used only by mtctx */
651 } ZSTDMT_jobDescription;
652
653 #define JOB_ERROR(e) { \
654 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
655 job->cSize = e; \
656 ZSTD_pthread_mutex_unlock(&job->job_mutex); \
657 goto _endJob; \
658 }
659
660 /* ZSTDMT_compressionJob() is a POOL_function type */
ZSTDMT_compressionJob(void * jobDescription)661 static void ZSTDMT_compressionJob(void* jobDescription)
662 {
663 ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
664 ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
665 ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
666 rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
667 buffer_t dstBuff = job->dstBuff;
668 size_t lastCBlockSize = 0;
669
670 /* resources */
671 if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
672 if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
673 dstBuff = ZSTDMT_getBuffer(job->bufPool);
674 if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
675 job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
676 }
677 if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
678 JOB_ERROR(ERROR(memory_allocation));
679
680 /* Don't compute the checksum for chunks, since we compute it externally,
681 * but write it in the header.
682 */
683 if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
684 /* Don't run LDM for the chunks, since we handle it externally */
685 jobParams.ldmParams.enableLdm = 0;
686
687
688 /* init */
689 if (job->cdict) {
690 size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
691 assert(job->firstJob); /* only allowed for first job */
692 if (ZSTD_isError(initError)) JOB_ERROR(initError);
693 } else { /* srcStart points at reloaded section */
694 U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
695 { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
696 if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
697 }
698 { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
699 job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
700 ZSTD_dtlm_fast,
701 NULL, /*cdict*/
702 &jobParams, pledgedSrcSize);
703 if (ZSTD_isError(initError)) JOB_ERROR(initError);
704 } }
705
706 /* Perform serial step as early as possible, but after CCtx initialization */
707 ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
708
709 if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
710 size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
711 if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
712 DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
713 ZSTD_invalidateRepCodes(cctx);
714 }
715
716 /* compress */
717 { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
718 int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
719 const BYTE* ip = (const BYTE*) job->src.start;
720 BYTE* const ostart = (BYTE*)dstBuff.start;
721 BYTE* op = ostart;
722 BYTE* oend = op + dstBuff.capacity;
723 int chunkNb;
724 if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
725 DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
726 assert(job->cSize == 0);
727 for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
728 size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
729 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
730 ip += chunkSize;
731 op += cSize; assert(op < oend);
732 /* stats */
733 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
734 job->cSize += cSize;
735 job->consumed = chunkSize * chunkNb;
736 DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
737 (U32)cSize, (U32)job->cSize);
738 ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
739 ZSTD_pthread_mutex_unlock(&job->job_mutex);
740 }
741 /* last block */
742 assert(chunkSize > 0);
743 assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
744 if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
745 size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
746 size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
747 size_t const cSize = (job->lastJob) ?
748 ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
749 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
750 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
751 lastCBlockSize = cSize;
752 } }
753
754 _endJob:
755 ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
756 if (job->prefix.size > 0)
757 DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
758 DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
759 /* release resources */
760 ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
761 ZSTDMT_releaseCCtx(job->cctxPool, cctx);
762 /* report */
763 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
764 if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
765 job->cSize += lastCBlockSize;
766 job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
767 ZSTD_pthread_cond_signal(&job->job_cond);
768 ZSTD_pthread_mutex_unlock(&job->job_mutex);
769 }
770
771
772 /* ------------------------------------------ */
773 /* ===== Multi-threaded compression ===== */
774 /* ------------------------------------------ */
775
776 typedef struct {
777 range_t prefix; /* read-only non-owned prefix buffer */
778 buffer_t buffer;
779 size_t filled;
780 } inBuff_t;
781
782 typedef struct {
783 BYTE* buffer; /* The round input buffer. All jobs get references
784 * to pieces of the buffer. ZSTDMT_tryGetInputRange()
785 * handles handing out job input buffers, and makes
786 * sure it doesn't overlap with any pieces still in use.
787 */
788 size_t capacity; /* The capacity of buffer. */
789 size_t pos; /* The position of the current inBuff in the round
790 * buffer. Updated past the end if the inBuff once
791 * the inBuff is sent to the worker thread.
792 * pos <= capacity.
793 */
794 } roundBuff_t;
795
796 static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
797
798 #define RSYNC_LENGTH 32
799
800 typedef struct {
801 U64 hash;
802 U64 hitMask;
803 U64 primePower;
804 } rsyncState_t;
805
806 struct ZSTDMT_CCtx_s {
807 POOL_ctx* factory;
808 ZSTDMT_jobDescription* jobs;
809 ZSTDMT_bufferPool* bufPool;
810 ZSTDMT_CCtxPool* cctxPool;
811 ZSTDMT_seqPool* seqPool;
812 ZSTD_CCtx_params params;
813 size_t targetSectionSize;
814 size_t targetPrefixSize;
815 int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
816 inBuff_t inBuff;
817 roundBuff_t roundBuff;
818 serialState_t serial;
819 rsyncState_t rsync;
820 unsigned jobIDMask;
821 unsigned doneJobID;
822 unsigned nextJobID;
823 unsigned frameEnded;
824 unsigned allJobsCompleted;
825 unsigned long long frameContentSize;
826 unsigned long long consumed;
827 unsigned long long produced;
828 ZSTD_customMem cMem;
829 ZSTD_CDict* cdictLocal;
830 const ZSTD_CDict* cdict;
831 unsigned providedFactory: 1;
832 };
833
ZSTDMT_freeJobsTable(ZSTDMT_jobDescription * jobTable,U32 nbJobs,ZSTD_customMem cMem)834 static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
835 {
836 U32 jobNb;
837 if (jobTable == NULL) return;
838 for (jobNb=0; jobNb<nbJobs; jobNb++) {
839 ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
840 ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
841 }
842 ZSTD_customFree(jobTable, cMem);
843 }
844
845 /* ZSTDMT_allocJobsTable()
846 * allocate and init a job table.
847 * update *nbJobsPtr to next power of 2 value, as size of table */
ZSTDMT_createJobsTable(U32 * nbJobsPtr,ZSTD_customMem cMem)848 static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
849 {
850 U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
851 U32 const nbJobs = 1 << nbJobsLog2;
852 U32 jobNb;
853 ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
854 ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
855 int initError = 0;
856 if (jobTable==NULL) return NULL;
857 *nbJobsPtr = nbJobs;
858 for (jobNb=0; jobNb<nbJobs; jobNb++) {
859 initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
860 initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
861 }
862 if (initError != 0) {
863 ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
864 return NULL;
865 }
866 return jobTable;
867 }
868
ZSTDMT_expandJobsTable(ZSTDMT_CCtx * mtctx,U32 nbWorkers)869 static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
870 U32 nbJobs = nbWorkers + 2;
871 if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
872 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
873 mtctx->jobIDMask = 0;
874 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
875 if (mtctx->jobs==NULL) return ERROR(memory_allocation);
876 assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
877 mtctx->jobIDMask = nbJobs - 1;
878 }
879 return 0;
880 }
881
882
883 /* ZSTDMT_CCtxParam_setNbWorkers():
884 * Internal use only */
ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params * params,unsigned nbWorkers)885 static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
886 {
887 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
888 }
889
ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,ZSTD_customMem cMem,ZSTD_threadPool * pool)890 MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
891 {
892 ZSTDMT_CCtx* mtctx;
893 U32 nbJobs = nbWorkers + 2;
894 int initError;
895 DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
896
897 if (nbWorkers < 1) return NULL;
898 nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
899 if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
900 /* invalid custom allocator */
901 return NULL;
902
903 mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
904 if (!mtctx) return NULL;
905 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
906 mtctx->cMem = cMem;
907 mtctx->allJobsCompleted = 1;
908 if (pool != NULL) {
909 mtctx->factory = pool;
910 mtctx->providedFactory = 1;
911 }
912 else {
913 mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
914 mtctx->providedFactory = 0;
915 }
916 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
917 assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
918 mtctx->jobIDMask = nbJobs - 1;
919 mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
920 mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
921 mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
922 initError = ZSTDMT_serialState_init(&mtctx->serial);
923 mtctx->roundBuff = kNullRoundBuff;
924 if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
925 ZSTDMT_freeCCtx(mtctx);
926 return NULL;
927 }
928 DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
929 return mtctx;
930 }
931
ZSTDMT_createCCtx_advanced(unsigned nbWorkers,ZSTD_customMem cMem,ZSTD_threadPool * pool)932 ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
933 {
934 #ifdef ZSTD_MULTITHREAD
935 return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
936 #else
937 (void)nbWorkers;
938 (void)cMem;
939 (void)pool;
940 return NULL;
941 #endif
942 }
943
944
945 /* ZSTDMT_releaseAllJobResources() :
946 * note : ensure all workers are killed first ! */
ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx * mtctx)947 static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
948 {
949 unsigned jobID;
950 DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
951 for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
952 /* Copy the mutex/cond out */
953 ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
954 ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
955
956 DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
957 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
958
959 /* Clear the job description, but keep the mutex/cond */
960 ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
961 mtctx->jobs[jobID].job_mutex = mutex;
962 mtctx->jobs[jobID].job_cond = cond;
963 }
964 mtctx->inBuff.buffer = g_nullBuffer;
965 mtctx->inBuff.filled = 0;
966 mtctx->allJobsCompleted = 1;
967 }
968
ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx * mtctx)969 static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
970 {
971 DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
972 while (mtctx->doneJobID < mtctx->nextJobID) {
973 unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
974 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
975 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
976 DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
977 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
978 }
979 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
980 mtctx->doneJobID++;
981 }
982 }
983
ZSTDMT_freeCCtx(ZSTDMT_CCtx * mtctx)984 size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
985 {
986 if (mtctx==NULL) return 0; /* compatible with free on NULL */
987 if (!mtctx->providedFactory)
988 POOL_free(mtctx->factory); /* stop and free worker threads */
989 ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
990 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
991 ZSTDMT_freeBufferPool(mtctx->bufPool);
992 ZSTDMT_freeCCtxPool(mtctx->cctxPool);
993 ZSTDMT_freeSeqPool(mtctx->seqPool);
994 ZSTDMT_serialState_free(&mtctx->serial);
995 ZSTD_freeCDict(mtctx->cdictLocal);
996 if (mtctx->roundBuff.buffer)
997 ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
998 ZSTD_customFree(mtctx, mtctx->cMem);
999 return 0;
1000 }
1001
ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx * mtctx)1002 size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
1003 {
1004 if (mtctx == NULL) return 0; /* supports sizeof NULL */
1005 return sizeof(*mtctx)
1006 + POOL_sizeof(mtctx->factory)
1007 + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
1008 + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
1009 + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
1010 + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
1011 + ZSTD_sizeof_CDict(mtctx->cdictLocal)
1012 + mtctx->roundBuff.capacity;
1013 }
1014
1015
1016 /* ZSTDMT_resize() :
1017 * @return : error code if fails, 0 on success */
ZSTDMT_resize(ZSTDMT_CCtx * mtctx,unsigned nbWorkers)1018 static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
1019 {
1020 if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
1021 FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
1022 mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
1023 if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
1024 mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
1025 if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
1026 mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
1027 if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
1028 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
1029 return 0;
1030 }
1031
1032
1033 /*! ZSTDMT_updateCParams_whileCompressing() :
1034 * Updates a selected set of compression parameters, remaining compatible with currently active frame.
1035 * New parameters will be applied to next compression job. */
ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx * mtctx,const ZSTD_CCtx_params * cctxParams)1036 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
1037 {
1038 U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
1039 int const compressionLevel = cctxParams->compressionLevel;
1040 DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
1041 compressionLevel);
1042 mtctx->params.compressionLevel = compressionLevel;
1043 { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1044 cParams.windowLog = saved_wlog;
1045 mtctx->params.cParams = cParams;
1046 }
1047 }
1048
1049 /* ZSTDMT_getFrameProgression():
1050 * tells how much data has been consumed (input) and produced (output) for current frame.
1051 * able to count progression inside worker threads.
1052 * Note : mutex will be acquired during statistics collection inside workers. */
ZSTDMT_getFrameProgression(ZSTDMT_CCtx * mtctx)1053 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
1054 {
1055 ZSTD_frameProgression fps;
1056 DEBUGLOG(5, "ZSTDMT_getFrameProgression");
1057 fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
1058 fps.consumed = mtctx->consumed;
1059 fps.produced = fps.flushed = mtctx->produced;
1060 fps.currentJobID = mtctx->nextJobID;
1061 fps.nbActiveWorkers = 0;
1062 { unsigned jobNb;
1063 unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
1064 DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
1065 mtctx->doneJobID, lastJobNb, mtctx->jobReady)
1066 for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
1067 unsigned const wJobID = jobNb & mtctx->jobIDMask;
1068 ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
1069 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1070 { size_t const cResult = jobPtr->cSize;
1071 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1072 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1073 assert(flushed <= produced);
1074 fps.ingested += jobPtr->src.size;
1075 fps.consumed += jobPtr->consumed;
1076 fps.produced += produced;
1077 fps.flushed += flushed;
1078 fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
1079 }
1080 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1081 }
1082 }
1083 return fps;
1084 }
1085
1086
ZSTDMT_toFlushNow(ZSTDMT_CCtx * mtctx)1087 size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
1088 {
1089 size_t toFlush;
1090 unsigned const jobID = mtctx->doneJobID;
1091 assert(jobID <= mtctx->nextJobID);
1092 if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
1093
1094 /* look into oldest non-fully-flushed job */
1095 { unsigned const wJobID = jobID & mtctx->jobIDMask;
1096 ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
1097 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1098 { size_t const cResult = jobPtr->cSize;
1099 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1100 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1101 assert(flushed <= produced);
1102 assert(jobPtr->consumed <= jobPtr->src.size);
1103 toFlush = produced - flushed;
1104 /* if toFlush==0, nothing is available to flush.
1105 * However, jobID is expected to still be active:
1106 * if jobID was already completed and fully flushed,
1107 * ZSTDMT_flushProduced() should have already moved onto next job.
1108 * Therefore, some input has not yet been consumed. */
1109 if (toFlush==0) {
1110 assert(jobPtr->consumed < jobPtr->src.size);
1111 }
1112 }
1113 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1114 }
1115
1116 return toFlush;
1117 }
1118
1119
1120 /* ------------------------------------------ */
1121 /* ===== Multi-threaded compression ===== */
1122 /* ------------------------------------------ */
1123
ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params * params)1124 static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
1125 {
1126 unsigned jobLog;
1127 if (params->ldmParams.enableLdm) {
1128 /* In Long Range Mode, the windowLog is typically oversized.
1129 * In which case, it's preferable to determine the jobSize
1130 * based on cycleLog instead. */
1131 jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
1132 } else {
1133 jobLog = MAX(20, params->cParams.windowLog + 2);
1134 }
1135 return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
1136 }
1137
ZSTDMT_overlapLog_default(ZSTD_strategy strat)1138 static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
1139 {
1140 switch(strat)
1141 {
1142 case ZSTD_btultra2:
1143 return 9;
1144 case ZSTD_btultra:
1145 case ZSTD_btopt:
1146 return 8;
1147 case ZSTD_btlazy2:
1148 case ZSTD_lazy2:
1149 return 7;
1150 case ZSTD_lazy:
1151 case ZSTD_greedy:
1152 case ZSTD_dfast:
1153 case ZSTD_fast:
1154 default:;
1155 }
1156 return 6;
1157 }
1158
ZSTDMT_overlapLog(int ovlog,ZSTD_strategy strat)1159 static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
1160 {
1161 assert(0 <= ovlog && ovlog <= 9);
1162 if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
1163 return ovlog;
1164 }
1165
ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params * params)1166 static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
1167 {
1168 int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
1169 int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
1170 assert(0 <= overlapRLog && overlapRLog <= 8);
1171 if (params->ldmParams.enableLdm) {
1172 /* In Long Range Mode, the windowLog is typically oversized.
1173 * In which case, it's preferable to determine the jobSize
1174 * based on chainLog instead.
1175 * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
1176 ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1177 - overlapRLog;
1178 }
1179 assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
1180 DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
1181 DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
1182 return (ovLog==0) ? 0 : (size_t)1 << ovLog;
1183 }
1184
1185 /* ====================================== */
1186 /* ======= Streaming API ======= */
1187 /* ====================================== */
1188
ZSTDMT_initCStream_internal(ZSTDMT_CCtx * mtctx,const void * dict,size_t dictSize,ZSTD_dictContentType_e dictContentType,const ZSTD_CDict * cdict,ZSTD_CCtx_params params,unsigned long long pledgedSrcSize)1189 size_t ZSTDMT_initCStream_internal(
1190 ZSTDMT_CCtx* mtctx,
1191 const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
1192 const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
1193 unsigned long long pledgedSrcSize)
1194 {
1195 DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
1196 (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
1197
1198 /* params supposed partially fully validated at this point */
1199 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1200 assert(!((dict) && (cdict))); /* either dict or cdict, not both */
1201
1202 /* init */
1203 if (params.nbWorkers != mtctx->params.nbWorkers)
1204 FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
1205
1206 if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
1207 if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
1208
1209 DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
1210
1211 if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
1212 ZSTDMT_waitForAllJobsCompleted(mtctx);
1213 ZSTDMT_releaseAllJobResources(mtctx);
1214 mtctx->allJobsCompleted = 1;
1215 }
1216
1217 mtctx->params = params;
1218 mtctx->frameContentSize = pledgedSrcSize;
1219 if (dict) {
1220 ZSTD_freeCDict(mtctx->cdictLocal);
1221 mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1222 ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
1223 params.cParams, mtctx->cMem);
1224 mtctx->cdict = mtctx->cdictLocal;
1225 if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1226 } else {
1227 ZSTD_freeCDict(mtctx->cdictLocal);
1228 mtctx->cdictLocal = NULL;
1229 mtctx->cdict = cdict;
1230 }
1231
1232 mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms);
1233 DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
1234 mtctx->targetSectionSize = params.jobSize;
1235 if (mtctx->targetSectionSize == 0) {
1236 mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms);
1237 }
1238 assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
1239
1240 if (params.rsyncable) {
1241 /* Aim for the targetsectionSize as the average job size. */
1242 U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
1243 U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
1244 assert(jobSizeMB >= 1);
1245 DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
1246 mtctx->rsync.hash = 0;
1247 mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
1248 mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
1249 }
1250 if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
1251 DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
1252 DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
1253 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
1254 {
1255 /* If ldm is enabled we need windowSize space. */
1256 size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
1257 /* Two buffers of slack, plus extra space for the overlap
1258 * This is the minimum slack that LDM works with. One extra because
1259 * flush might waste up to targetSectionSize-1 bytes. Another extra
1260 * for the overlap (if > 0), then one to fill which doesn't overlap
1261 * with the LDM window.
1262 */
1263 size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
1264 size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
1265 /* Compute the total size, and always have enough slack */
1266 size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
1267 size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
1268 size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
1269 if (mtctx->roundBuff.capacity < capacity) {
1270 if (mtctx->roundBuff.buffer)
1271 ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1272 mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
1273 if (mtctx->roundBuff.buffer == NULL) {
1274 mtctx->roundBuff.capacity = 0;
1275 return ERROR(memory_allocation);
1276 }
1277 mtctx->roundBuff.capacity = capacity;
1278 }
1279 }
1280 DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
1281 mtctx->roundBuff.pos = 0;
1282 mtctx->inBuff.buffer = g_nullBuffer;
1283 mtctx->inBuff.filled = 0;
1284 mtctx->inBuff.prefix = kNullRange;
1285 mtctx->doneJobID = 0;
1286 mtctx->nextJobID = 0;
1287 mtctx->frameEnded = 0;
1288 mtctx->allJobsCompleted = 0;
1289 mtctx->consumed = 0;
1290 mtctx->produced = 0;
1291 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
1292 dict, dictSize, dictContentType))
1293 return ERROR(memory_allocation);
1294 return 0;
1295 }
1296
1297
1298 /* ZSTDMT_writeLastEmptyBlock()
1299 * Write a single empty block with an end-of-frame to finish a frame.
1300 * Job must be created from streaming variant.
1301 * This function is always successful if expected conditions are fulfilled.
1302 */
ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription * job)1303 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
1304 {
1305 assert(job->lastJob == 1);
1306 assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
1307 assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
1308 assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
1309 job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
1310 if (job->dstBuff.start == NULL) {
1311 job->cSize = ERROR(memory_allocation);
1312 return;
1313 }
1314 assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
1315 job->src = kNullRange;
1316 job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
1317 assert(!ZSTD_isError(job->cSize));
1318 assert(job->consumed == 0);
1319 }
1320
ZSTDMT_createCompressionJob(ZSTDMT_CCtx * mtctx,size_t srcSize,ZSTD_EndDirective endOp)1321 static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
1322 {
1323 unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
1324 int const endFrame = (endOp == ZSTD_e_end);
1325
1326 if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
1327 DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
1328 assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
1329 return 0;
1330 }
1331
1332 if (!mtctx->jobReady) {
1333 BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
1334 DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
1335 mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
1336 mtctx->jobs[jobID].src.start = src;
1337 mtctx->jobs[jobID].src.size = srcSize;
1338 assert(mtctx->inBuff.filled >= srcSize);
1339 mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
1340 mtctx->jobs[jobID].consumed = 0;
1341 mtctx->jobs[jobID].cSize = 0;
1342 mtctx->jobs[jobID].params = mtctx->params;
1343 mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
1344 mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
1345 mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1346 mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
1347 mtctx->jobs[jobID].bufPool = mtctx->bufPool;
1348 mtctx->jobs[jobID].seqPool = mtctx->seqPool;
1349 mtctx->jobs[jobID].serial = &mtctx->serial;
1350 mtctx->jobs[jobID].jobID = mtctx->nextJobID;
1351 mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
1352 mtctx->jobs[jobID].lastJob = endFrame;
1353 mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
1354 mtctx->jobs[jobID].dstFlushed = 0;
1355
1356 /* Update the round buffer pos and clear the input buffer to be reset */
1357 mtctx->roundBuff.pos += srcSize;
1358 mtctx->inBuff.buffer = g_nullBuffer;
1359 mtctx->inBuff.filled = 0;
1360 /* Set the prefix */
1361 if (!endFrame) {
1362 size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
1363 mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
1364 mtctx->inBuff.prefix.size = newPrefixSize;
1365 } else { /* endFrame==1 => no need for another input buffer */
1366 mtctx->inBuff.prefix = kNullRange;
1367 mtctx->frameEnded = endFrame;
1368 if (mtctx->nextJobID == 0) {
1369 /* single job exception : checksum is already calculated directly within worker thread */
1370 mtctx->params.fParams.checksumFlag = 0;
1371 } }
1372
1373 if ( (srcSize == 0)
1374 && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
1375 DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
1376 assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
1377 ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
1378 mtctx->nextJobID++;
1379 return 0;
1380 }
1381 }
1382
1383 DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
1384 mtctx->nextJobID,
1385 (U32)mtctx->jobs[jobID].src.size,
1386 mtctx->jobs[jobID].lastJob,
1387 mtctx->nextJobID,
1388 jobID);
1389 if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
1390 mtctx->nextJobID++;
1391 mtctx->jobReady = 0;
1392 } else {
1393 DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
1394 mtctx->jobReady = 1;
1395 }
1396 return 0;
1397 }
1398
1399
1400 /*! ZSTDMT_flushProduced() :
1401 * flush whatever data has been produced but not yet flushed in current job.
1402 * move to next job if current one is fully flushed.
1403 * `output` : `pos` will be updated with amount of data flushed .
1404 * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
1405 * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
ZSTDMT_flushProduced(ZSTDMT_CCtx * mtctx,ZSTD_outBuffer * output,unsigned blockToFlush,ZSTD_EndDirective end)1406 static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
1407 {
1408 unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
1409 DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
1410 blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
1411 assert(output->size >= output->pos);
1412
1413 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1414 if ( blockToFlush
1415 && (mtctx->doneJobID < mtctx->nextJobID) ) {
1416 assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
1417 while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
1418 if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
1419 DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
1420 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
1421 break;
1422 }
1423 DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
1424 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1425 ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
1426 } }
1427
1428 /* try to flush something */
1429 { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
1430 size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
1431 size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
1432 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1433 if (ZSTD_isError(cSize)) {
1434 DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
1435 mtctx->doneJobID, ZSTD_getErrorName(cSize));
1436 ZSTDMT_waitForAllJobsCompleted(mtctx);
1437 ZSTDMT_releaseAllJobResources(mtctx);
1438 return cSize;
1439 }
1440 /* add frame checksum if necessary (can only happen once) */
1441 assert(srcConsumed <= srcSize);
1442 if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
1443 && mtctx->jobs[wJobID].frameChecksumNeeded ) {
1444 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1445 DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
1446 MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
1447 cSize += 4;
1448 mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
1449 mtctx->jobs[wJobID].frameChecksumNeeded = 0;
1450 }
1451
1452 if (cSize > 0) { /* compression is ongoing or completed */
1453 size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
1454 DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
1455 (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
1456 assert(mtctx->doneJobID < mtctx->nextJobID);
1457 assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
1458 assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
1459 if (toFlush > 0) {
1460 ZSTD_memcpy((char*)output->dst + output->pos,
1461 (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
1462 toFlush);
1463 }
1464 output->pos += toFlush;
1465 mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
1466
1467 if ( (srcConsumed == srcSize) /* job is completed */
1468 && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
1469 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
1470 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1471 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
1472 DEBUGLOG(5, "dstBuffer released");
1473 mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
1474 mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
1475 mtctx->consumed += srcSize;
1476 mtctx->produced += cSize;
1477 mtctx->doneJobID++;
1478 } }
1479
1480 /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
1481 if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
1482 if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
1483 }
1484 if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
1485 if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
1486 if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
1487 mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
1488 if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
1489 return 0; /* internal buffers fully flushed */
1490 }
1491
1492 /**
1493 * Returns the range of data used by the earliest job that is not yet complete.
1494 * If the data of the first job is broken up into two segments, we cover both
1495 * sections.
1496 */
ZSTDMT_getInputDataInUse(ZSTDMT_CCtx * mtctx)1497 static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
1498 {
1499 unsigned const firstJobID = mtctx->doneJobID;
1500 unsigned const lastJobID = mtctx->nextJobID;
1501 unsigned jobID;
1502
1503 for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
1504 unsigned const wJobID = jobID & mtctx->jobIDMask;
1505 size_t consumed;
1506
1507 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1508 consumed = mtctx->jobs[wJobID].consumed;
1509 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1510
1511 if (consumed < mtctx->jobs[wJobID].src.size) {
1512 range_t range = mtctx->jobs[wJobID].prefix;
1513 if (range.size == 0) {
1514 /* Empty prefix */
1515 range = mtctx->jobs[wJobID].src;
1516 }
1517 /* Job source in multiple segments not supported yet */
1518 assert(range.start <= mtctx->jobs[wJobID].src.start);
1519 return range;
1520 }
1521 }
1522 return kNullRange;
1523 }
1524
1525 /**
1526 * Returns non-zero iff buffer and range overlap.
1527 */
ZSTDMT_isOverlapped(buffer_t buffer,range_t range)1528 static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
1529 {
1530 BYTE const* const bufferStart = (BYTE const*)buffer.start;
1531 BYTE const* const bufferEnd = bufferStart + buffer.capacity;
1532 BYTE const* const rangeStart = (BYTE const*)range.start;
1533 BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart;
1534
1535 if (rangeStart == NULL || bufferStart == NULL)
1536 return 0;
1537 /* Empty ranges cannot overlap */
1538 if (bufferStart == bufferEnd || rangeStart == rangeEnd)
1539 return 0;
1540
1541 return bufferStart < rangeEnd && rangeStart < bufferEnd;
1542 }
1543
ZSTDMT_doesOverlapWindow(buffer_t buffer,ZSTD_window_t window)1544 static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
1545 {
1546 range_t extDict;
1547 range_t prefix;
1548
1549 DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
1550 extDict.start = window.dictBase + window.lowLimit;
1551 extDict.size = window.dictLimit - window.lowLimit;
1552
1553 prefix.start = window.base + window.dictLimit;
1554 prefix.size = window.nextSrc - (window.base + window.dictLimit);
1555 DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
1556 (size_t)extDict.start,
1557 (size_t)extDict.start + extDict.size);
1558 DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
1559 (size_t)prefix.start,
1560 (size_t)prefix.start + prefix.size);
1561
1562 return ZSTDMT_isOverlapped(buffer, extDict)
1563 || ZSTDMT_isOverlapped(buffer, prefix);
1564 }
1565
ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx * mtctx,buffer_t buffer)1566 static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
1567 {
1568 if (mtctx->params.ldmParams.enableLdm) {
1569 ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
1570 DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
1571 DEBUGLOG(5, "source [0x%zx, 0x%zx)",
1572 (size_t)buffer.start,
1573 (size_t)buffer.start + buffer.capacity);
1574 ZSTD_PTHREAD_MUTEX_LOCK(mutex);
1575 while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
1576 DEBUGLOG(5, "Waiting for LDM to finish...");
1577 ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
1578 }
1579 DEBUGLOG(6, "Done waiting for LDM to finish");
1580 ZSTD_pthread_mutex_unlock(mutex);
1581 }
1582 }
1583
1584 /**
1585 * Attempts to set the inBuff to the next section to fill.
1586 * If any part of the new section is still in use we give up.
1587 * Returns non-zero if the buffer is filled.
1588 */
ZSTDMT_tryGetInputRange(ZSTDMT_CCtx * mtctx)1589 static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
1590 {
1591 range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
1592 size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
1593 size_t const target = mtctx->targetSectionSize;
1594 buffer_t buffer;
1595
1596 DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
1597 assert(mtctx->inBuff.buffer.start == NULL);
1598 assert(mtctx->roundBuff.capacity >= target);
1599
1600 if (spaceLeft < target) {
1601 /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
1602 * Simply copy the prefix to the beginning in that case.
1603 */
1604 BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
1605 size_t const prefixSize = mtctx->inBuff.prefix.size;
1606
1607 buffer.start = start;
1608 buffer.capacity = prefixSize;
1609 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1610 DEBUGLOG(5, "Waiting for buffer...");
1611 return 0;
1612 }
1613 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1614 ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
1615 mtctx->inBuff.prefix.start = start;
1616 mtctx->roundBuff.pos = prefixSize;
1617 }
1618 buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
1619 buffer.capacity = target;
1620
1621 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1622 DEBUGLOG(5, "Waiting for buffer...");
1623 return 0;
1624 }
1625 assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
1626
1627 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1628
1629 DEBUGLOG(5, "Using prefix range [%zx, %zx)",
1630 (size_t)mtctx->inBuff.prefix.start,
1631 (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
1632 DEBUGLOG(5, "Using source range [%zx, %zx)",
1633 (size_t)buffer.start,
1634 (size_t)buffer.start + buffer.capacity);
1635
1636
1637 mtctx->inBuff.buffer = buffer;
1638 mtctx->inBuff.filled = 0;
1639 assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
1640 return 1;
1641 }
1642
1643 typedef struct {
1644 size_t toLoad; /* The number of bytes to load from the input. */
1645 int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
1646 } syncPoint_t;
1647
1648 /**
1649 * Searches through the input for a synchronization point. If one is found, we
1650 * will instruct the caller to flush, and return the number of bytes to load.
1651 * Otherwise, we will load as many bytes as possible and instruct the caller
1652 * to continue as normal.
1653 */
1654 static syncPoint_t
findSynchronizationPoint(ZSTDMT_CCtx const * mtctx,ZSTD_inBuffer const input)1655 findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
1656 {
1657 BYTE const* const istart = (BYTE const*)input.src + input.pos;
1658 U64 const primePower = mtctx->rsync.primePower;
1659 U64 const hitMask = mtctx->rsync.hitMask;
1660
1661 syncPoint_t syncPoint;
1662 U64 hash;
1663 BYTE const* prev;
1664 size_t pos;
1665
1666 syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
1667 syncPoint.flush = 0;
1668 if (!mtctx->params.rsyncable)
1669 /* Rsync is disabled. */
1670 return syncPoint;
1671 if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
1672 /* Not enough to compute the hash.
1673 * We will miss any synchronization points in this RSYNC_LENGTH byte
1674 * window. However, since it depends only in the internal buffers, if the
1675 * state is already synchronized, we will remain synchronized.
1676 * Additionally, the probability that we miss a synchronization point is
1677 * low: RSYNC_LENGTH / targetSectionSize.
1678 */
1679 return syncPoint;
1680 /* Initialize the loop variables. */
1681 if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
1682 /* We have enough bytes buffered to initialize the hash.
1683 * Start scanning at the beginning of the input.
1684 */
1685 pos = 0;
1686 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1687 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1688 if ((hash & hitMask) == hitMask) {
1689 /* We're already at a sync point so don't load any more until
1690 * we're able to flush this sync point.
1691 * This likely happened because the job table was full so we
1692 * couldn't add our job.
1693 */
1694 syncPoint.toLoad = 0;
1695 syncPoint.flush = 1;
1696 return syncPoint;
1697 }
1698 } else {
1699 /* We don't have enough bytes buffered to initialize the hash, but
1700 * we know we have at least RSYNC_LENGTH bytes total.
1701 * Start scanning after the first RSYNC_LENGTH bytes less the bytes
1702 * already buffered.
1703 */
1704 pos = RSYNC_LENGTH - mtctx->inBuff.filled;
1705 prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
1706 hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
1707 hash = ZSTD_rollingHash_append(hash, istart, pos);
1708 }
1709 /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
1710 * through the input. If we hit a synchronization point, then cut the
1711 * job off, and tell the compressor to flush the job. Otherwise, load
1712 * all the bytes and continue as normal.
1713 * If we go too long without a synchronization point (targetSectionSize)
1714 * then a block will be emitted anyways, but this is okay, since if we
1715 * are already synchronized we will remain synchronized.
1716 */
1717 for (; pos < syncPoint.toLoad; ++pos) {
1718 BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
1719 /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
1720 hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
1721 if ((hash & hitMask) == hitMask) {
1722 syncPoint.toLoad = pos + 1;
1723 syncPoint.flush = 1;
1724 break;
1725 }
1726 }
1727 return syncPoint;
1728 }
1729
ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx * mtctx)1730 size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
1731 {
1732 size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
1733 if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
1734 return hintInSize;
1735 }
1736
1737 /** ZSTDMT_compressStream_generic() :
1738 * internal use only - exposed to be invoked from zstd_compress.c
1739 * assumption : output and input are valid (pos <= size)
1740 * @return : minimum amount of data remaining to flush, 0 if none */
ZSTDMT_compressStream_generic(ZSTDMT_CCtx * mtctx,ZSTD_outBuffer * output,ZSTD_inBuffer * input,ZSTD_EndDirective endOp)1741 size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
1742 ZSTD_outBuffer* output,
1743 ZSTD_inBuffer* input,
1744 ZSTD_EndDirective endOp)
1745 {
1746 unsigned forwardInputProgress = 0;
1747 DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
1748 (U32)endOp, (U32)(input->size - input->pos));
1749 assert(output->pos <= output->size);
1750 assert(input->pos <= input->size);
1751
1752 if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
1753 /* current frame being ended. Only flush/end are allowed */
1754 return ERROR(stage_wrong);
1755 }
1756
1757 /* fill input buffer */
1758 if ( (!mtctx->jobReady)
1759 && (input->size > input->pos) ) { /* support NULL input */
1760 if (mtctx->inBuff.buffer.start == NULL) {
1761 assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
1762 if (!ZSTDMT_tryGetInputRange(mtctx)) {
1763 /* It is only possible for this operation to fail if there are
1764 * still compression jobs ongoing.
1765 */
1766 DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
1767 assert(mtctx->doneJobID != mtctx->nextJobID);
1768 } else
1769 DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
1770 }
1771 if (mtctx->inBuff.buffer.start != NULL) {
1772 syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
1773 if (syncPoint.flush && endOp == ZSTD_e_continue) {
1774 endOp = ZSTD_e_flush;
1775 }
1776 assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
1777 DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
1778 (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
1779 ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
1780 input->pos += syncPoint.toLoad;
1781 mtctx->inBuff.filled += syncPoint.toLoad;
1782 forwardInputProgress = syncPoint.toLoad>0;
1783 }
1784 }
1785 if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
1786 /* Can't end yet because the input is not fully consumed.
1787 * We are in one of these cases:
1788 * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
1789 * - We filled the input buffer: flush this job but don't end the frame.
1790 * - We hit a synchronization point: flush this job but don't end the frame.
1791 */
1792 assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
1793 endOp = ZSTD_e_flush;
1794 }
1795
1796 if ( (mtctx->jobReady)
1797 || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
1798 || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
1799 || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
1800 size_t const jobSize = mtctx->inBuff.filled;
1801 assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
1802 FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
1803 }
1804
1805 /* check for potential compressed data ready to be flushed */
1806 { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
1807 if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
1808 DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
1809 return remainingToFlush;
1810 }
1811 }
1812