CLEANUP: dynbuf: remove b_alloc_margin()
It's not used anymore, let's completely remove it before anyone uses it again by accident.
This commit is contained in:
parent
d68d4f1002
commit
f44ca97fcb
@ -572,18 +572,6 @@ __b_free | buffer *buf | releases <buf> which must be allocated
|
||||
b_free | buffer *buf | releases <buf> only if it is allocated
|
||||
| ret: void | and marks it empty
|
||||
--------------------+------------------+---------------------------------------
|
||||
b_alloc_margin | buffer *buf | ensures that <buf> is allocated. If an
|
||||
| int margin | allocation is needed, it ensures that
|
||||
| ret: buffer * | there are still at least <margin>
|
||||
| | buffers available in the pool after
|
||||
| | this allocation so that we don't leave
|
||||
| | the pool in a condition where a
|
||||
| | session or a response buffer could not
|
||||
| | be allocated anymore, resulting in a
|
||||
| | deadlock. This means that we sometimes
|
||||
| | need to try to allocate extra entries
|
||||
| | even if only one buffer is needed
|
||||
--------------------+------------------+---------------------------------------
|
||||
offer_buffers() | void *from | offer a buffer currently belonging to
|
||||
| uint threshold | target <from> to whoever needs
|
||||
| ret: void | one. Any pointer is valid for <from>,
|
||||
|
@ -122,72 +122,6 @@ static inline void b_free(struct buffer *buf)
|
||||
__b_free(buf);
|
||||
}
|
||||
|
||||
/* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
|
||||
* there are still at least <margin> buffers available in the pool after this
|
||||
* allocation so that we don't leave the pool in a condition where a session or
|
||||
* a response buffer could not be allocated anymore, resulting in a deadlock.
|
||||
* This means that we sometimes need to try to allocate extra entries even if
|
||||
* only one buffer is needed.
|
||||
*
|
||||
* We need to lock the pool here to be sure to have <margin> buffers available
|
||||
* after the allocation, regardless how many threads that doing it in the same
|
||||
* time. So, we use internal and lockless memory functions (prefixed with '__').
|
||||
*/
|
||||
static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
|
||||
{
|
||||
char *area;
|
||||
ssize_t idx __maybe_unused;
|
||||
unsigned int cached;
|
||||
|
||||
if (buf->size)
|
||||
return buf;
|
||||
|
||||
cached = 0;
|
||||
#ifdef CONFIG_HAP_LOCAL_POOLS
|
||||
if (likely(area = __pool_get_from_cache(pool_head_buffer)))
|
||||
goto done;
|
||||
|
||||
idx = pool_get_index(pool_head_buffer);
|
||||
if (idx >= 0)
|
||||
cached = pool_cache[tid][idx].count;
|
||||
#endif
|
||||
|
||||
*buf = BUF_WANTED;
|
||||
|
||||
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
#endif
|
||||
|
||||
/* fast path */
|
||||
if ((pool_head_buffer->allocated - pool_head_buffer->used + cached) > margin) {
|
||||
area = __pool_get_first(pool_head_buffer);
|
||||
if (likely(area)) {
|
||||
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
#endif
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* slow path, uses malloc() */
|
||||
area = __pool_refill_alloc(pool_head_buffer, margin);
|
||||
|
||||
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
#endif
|
||||
|
||||
if (unlikely(!area)) {
|
||||
activity[tid].buf_wait++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
done:
|
||||
buf->area = area;
|
||||
buf->size = pool_head_buffer->size;
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
/* Offer one or multiple buffer currently belonging to target <from> to whoever
|
||||
* needs one. Any pointer is valid for <from>, including NULL. Its purpose is
|
||||
* to avoid passing a buffer to oneself in case of failed allocations (e.g.
|
||||
|
Loading…
x
Reference in New Issue
Block a user