Lines Matching refs:r
39 void ring_buffer_init(struct ring_buffer* r) { in ring_buffer_init() argument
40 r->host_version = 1; in ring_buffer_init()
41 r->write_pos = 0; in ring_buffer_init()
42 r->read_pos = 0; in ring_buffer_init()
44 r->read_live_count = 0; in ring_buffer_init()
45 r->read_yield_count = 0; in ring_buffer_init()
46 r->read_sleep_us_count = 0; in ring_buffer_init()
48 r->state = 0; in ring_buffer_init()
55 bool ring_buffer_can_write(const struct ring_buffer* r, uint32_t bytes) { in ring_buffer_can_write() argument
57 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST); in ring_buffer_can_write()
58 return get_ring_pos(read_view - r->write_pos - 1) >= bytes; in ring_buffer_can_write()
61 bool ring_buffer_can_read(const struct ring_buffer* r, uint32_t bytes) { in ring_buffer_can_read() argument
63 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST); in ring_buffer_can_read()
64 return get_ring_pos(write_view - r->read_pos) >= bytes; in ring_buffer_can_read()
68 struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps) { in ring_buffer_write() argument
73 if (!ring_buffer_can_write(r, step_size)) { in ring_buffer_write()
80 RING_BUFFER_SIZE - get_ring_pos(r->write_pos); in ring_buffer_write()
85 &r->buf[get_ring_pos(r->write_pos)], in ring_buffer_write()
89 &r->buf[get_ring_pos(r->write_pos + available_at_end)], in ring_buffer_write()
94 &r->buf[get_ring_pos(r->write_pos)], in ring_buffer_write()
99 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST); in ring_buffer_write()
107 struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps) { in ring_buffer_read() argument
112 if (!ring_buffer_can_read(r, step_size)) { in ring_buffer_read()
119 RING_BUFFER_SIZE - get_ring_pos(r->read_pos); in ring_buffer_read()
125 &r->buf[get_ring_pos(r->read_pos)], in ring_buffer_read()
129 &r->buf[get_ring_pos(r->read_pos + available_at_end)], in ring_buffer_read()
134 &r->buf[get_ring_pos(r->read_pos)], in ring_buffer_read()
138 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST); in ring_buffer_read()
146 struct ring_buffer* r, uint32_t step_size, uint32_t steps) { in ring_buffer_advance_write() argument
150 if (!ring_buffer_can_write(r, step_size)) { in ring_buffer_advance_write()
155 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST); in ring_buffer_advance_write()
163 struct ring_buffer* r, uint32_t step_size, uint32_t steps) { in ring_buffer_advance_read() argument
167 if (!ring_buffer_can_read(r, step_size)) { in ring_buffer_advance_read()
172 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST); in ring_buffer_advance_read()
193 struct ring_buffer* r, in ring_buffer_view_init() argument
200 ring_buffer_init(r); in ring_buffer_view_init()
226 const struct ring_buffer* r, in ring_buffer_view_can_write() argument
230 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST); in ring_buffer_view_can_write()
232 v, read_view - r->write_pos - 1) >= bytes; in ring_buffer_view_can_write()
236 const struct ring_buffer* r, in ring_buffer_view_can_read() argument
240 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST); in ring_buffer_view_can_read()
242 v, write_view - r->read_pos) >= bytes; in ring_buffer_view_can_read()
246 const struct ring_buffer* r, in ring_buffer_available_read() argument
249 __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST); in ring_buffer_available_read()
252 v, write_view - r->read_pos); in ring_buffer_available_read()
254 return get_ring_pos(write_view - r->read_pos); in ring_buffer_available_read()
259 const struct ring_buffer* r, in ring_buffer_available_write() argument
262 __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST); in ring_buffer_available_write()
265 v, read_view - r->write_pos - 1); in ring_buffer_available_write()
267 return get_ring_pos(read_view - r->write_pos - 1); in ring_buffer_available_write()
272 const struct ring_buffer* r, in ring_buffer_copy_contents() argument
278 ring_buffer_available_read(r, v); in ring_buffer_copy_contents()
283 v->size - ring_buffer_view_get_ring_pos(v, r->read_pos); in ring_buffer_copy_contents()
286 RING_BUFFER_SIZE - get_ring_pos(r->write_pos); in ring_buffer_copy_contents()
297 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)], in ring_buffer_copy_contents()
300 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)], in ring_buffer_copy_contents()
304 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)], in ring_buffer_copy_contents()
311 &r->buf[get_ring_pos(r->read_pos)], in ring_buffer_copy_contents()
314 &r->buf[get_ring_pos(r->read_pos + available_at_end)], in ring_buffer_copy_contents()
318 &r->buf[get_ring_pos(r->read_pos)], in ring_buffer_copy_contents()
326 struct ring_buffer* r, in ring_buffer_view_write() argument
334 if (!ring_buffer_view_can_write(r, v, step_size)) { in ring_buffer_view_write()
341 v->size - ring_buffer_view_get_ring_pos(v, r->write_pos); in ring_buffer_view_write()
346 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)], in ring_buffer_view_write()
350 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos + available_at_end)], in ring_buffer_view_write()
355 &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)], in ring_buffer_view_write()
360 __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST); in ring_buffer_view_write()
369 struct ring_buffer* r, in ring_buffer_view_read() argument
376 if (!ring_buffer_view_can_read(r, v, step_size)) { in ring_buffer_view_read()
383 v->size - ring_buffer_view_get_ring_pos(v, r->read_pos); in ring_buffer_view_read()
389 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)], in ring_buffer_view_read()
393 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)], in ring_buffer_view_read()
397 &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)], in ring_buffer_view_read()
400 __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST); in ring_buffer_view_read()
435 const struct ring_buffer* r, in ring_buffer_wait_write() argument
444 v ? ring_buffer_view_can_write(r, v, bytes) : in ring_buffer_wait_write()
445 ring_buffer_can_write(r, bytes); in ring_buffer_wait_write()
466 v ? ring_buffer_view_can_write(r, v, bytes) : in ring_buffer_wait_write()
467 ring_buffer_can_write(r, bytes); in ring_buffer_wait_write()
474 const struct ring_buffer* r, in ring_buffer_wait_read() argument
483 v ? ring_buffer_view_can_read(r, v, bytes) : in ring_buffer_wait_read()
484 ring_buffer_can_read(r, bytes); in ring_buffer_wait_read()
495 ((struct ring_buffer*)r)->read_yield_count++; in ring_buffer_wait_read()
500 ((struct ring_buffer*)r)->read_sleep_us_count += 2000; in ring_buffer_wait_read()
508 v ? ring_buffer_view_can_read(r, v, bytes) : in ring_buffer_wait_read()
509 ring_buffer_can_read(r, bytes); in ring_buffer_wait_read()
512 ((struct ring_buffer*)r)->read_live_count++; in ring_buffer_wait_read()
517 struct ring_buffer* r, in get_step_size() argument
528 struct ring_buffer* r, in ring_buffer_write_fully() argument
532 ring_buffer_write_fully_with_abort(r, v, data, bytes, 0, 0); in ring_buffer_write_fully()
536 struct ring_buffer* r, in ring_buffer_read_fully() argument
540 ring_buffer_read_fully_with_abort(r, v, data, bytes, 0, 0); in ring_buffer_read_fully()
544 struct ring_buffer* r, in ring_buffer_write_fully_with_abort() argument
551 uint32_t candidate_step = get_step_size(r, v, bytes); in ring_buffer_write_fully_with_abort()
562 ring_buffer_wait_write(r, v, candidate_step, (uint64_t)(-1)); in ring_buffer_write_fully_with_abort()
565 processed_here = ring_buffer_view_write(r, v, dst + processed, candidate_step, 1); in ring_buffer_write_fully_with_abort()
567 processed_here = ring_buffer_write(r, dst + processed, candidate_step, 1); in ring_buffer_write_fully_with_abort()
581 struct ring_buffer* r, in ring_buffer_read_fully_with_abort() argument
588 uint32_t candidate_step = get_step_size(r, v, bytes); in ring_buffer_read_fully_with_abort()
602 ring_buffer_wait_read(r, v, candidate_step, (uint64_t)(-1)); in ring_buffer_read_fully_with_abort()
605 processed_here = ring_buffer_view_read(r, v, dst + processed, candidate_step, 1); in ring_buffer_read_fully_with_abort()
607 processed_here = ring_buffer_read(r, dst + processed, candidate_step, 1); in ring_buffer_read_fully_with_abort()
620 void ring_buffer_sync_init(struct ring_buffer* r) { in ring_buffer_sync_init() argument
621 __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST); in ring_buffer_sync_init()
624 bool ring_buffer_producer_acquire(struct ring_buffer* r) { in ring_buffer_producer_acquire() argument
627 &r->state, in ring_buffer_producer_acquire()
636 bool ring_buffer_producer_acquire_from_hangup(struct ring_buffer* r) { in ring_buffer_producer_acquire_from_hangup() argument
639 &r->state, in ring_buffer_producer_acquire_from_hangup()
648 void ring_buffer_producer_wait_hangup(struct ring_buffer* r) { in ring_buffer_producer_wait_hangup() argument
649 while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) != in ring_buffer_producer_wait_hangup()
655 void ring_buffer_producer_idle(struct ring_buffer* r) { in ring_buffer_producer_idle() argument
656 __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST); in ring_buffer_producer_idle()
659 bool ring_buffer_consumer_hangup(struct ring_buffer* r) { in ring_buffer_consumer_hangup() argument
662 &r->state, in ring_buffer_consumer_hangup()
671 void ring_buffer_consumer_wait_producer_idle(struct ring_buffer* r) { in ring_buffer_consumer_wait_producer_idle() argument
672 while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) != in ring_buffer_consumer_wait_producer_idle()
678 void ring_buffer_consumer_hung_up(struct ring_buffer* r) { in ring_buffer_consumer_hung_up() argument
679 __atomic_store_n(&r->state, RING_BUFFER_SYNC_CONSUMER_HUNG_UP, __ATOMIC_SEQ_CST); in ring_buffer_consumer_hung_up()