MINOR: event_hdl: add event_hdl_async_equeue_size() function

Use event_hdl_async_equeue_size() in advanced async task handler to
get the near real-time event queue size.

By near real-time, you should understand that the queue size is not
updated during element insertion/removal, but shortly before insertion
and shortly after removal, so the size should reflect the approximate
queue size at a given time but should definitely not be used as a
unique source of truth.

If 68e692da0 ("MINOR: event_hdl: add event handler base api")
is being backported, then this commit should be backported with it.
This commit is contained in:
Aurelien DARRAGON 2023-03-01 15:02:04 +01:00 committed by Christopher Faulet
parent 9e98a27d6a
commit b4b7320a6a
3 changed files with 27 additions and 7 deletions

View File

@ -79,8 +79,14 @@ struct event_hdl_sub_list_head {
/* event_hdl_sub_list is an alias (please use this for portability) */
typedef struct event_hdl_sub_list_head event_hdl_sub_list;
struct event_hdl_async_equeue_head {
struct mt_list head;
uint32_t size; /* near realtime size, not fully synced with head (to be used as a hint) */
};
/* event_hdl_async_equeue is an alias to mt_list (please use this for portability) */
typedef struct mt_list event_hdl_async_equeue;
typedef struct event_hdl_async_equeue_head event_hdl_async_equeue;
/* subscription mgmt from event */
struct event_hdl_sub_mgmt

View File

@ -416,19 +416,31 @@ void event_hdl_async_free_event(struct event_hdl_async_event *e);
/* use this for advanced async mode to initialize event queue */
static inline void event_hdl_async_equeue_init(event_hdl_async_equeue *queue)
{
MT_LIST_INIT(queue);
MT_LIST_INIT(&queue->head);
queue->size = 0;
}
/* use this for advanced async mode to pop an event from event queue */
static inline struct event_hdl_async_event *event_hdl_async_equeue_pop(event_hdl_async_equeue *queue)
{
return MT_LIST_POP(queue, struct event_hdl_async_event *, mt_list);
struct event_hdl_async_event *event;
event = MT_LIST_POP(&queue->head, struct event_hdl_async_event *, mt_list);
if (event)
HA_ATOMIC_DEC(&queue->size);
return event;
}
/* use this for advanced async mode to check if the event queue is empty */
static inline int event_hdl_async_equeue_isempty(event_hdl_async_equeue *queue)
{
return MT_LIST_ISEMPTY(queue);
return MT_LIST_ISEMPTY(&queue->head);
}
/* use this for advanced async mode to check if the event queue size */
static inline uint32_t event_hdl_async_equeue_size(event_hdl_async_equeue *queue)
{
return HA_ATOMIC_LOAD(&queue->size);
}
/* use this to initialize <sub_list> event subscription list */

View File

@ -317,7 +317,8 @@ static inline void _event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
* consumed the END event before the wakeup, and some tasks
* kill themselves (ie: normal async mode) when they receive such event
*/
lock = MT_LIST_APPEND_LOCKED(del_sub->hdl.async_equeue, &del_sub->async_end->mt_list);
HA_ATOMIC_INC(&del_sub->hdl.async_equeue->size);
lock = MT_LIST_APPEND_LOCKED(&del_sub->hdl.async_equeue->head, &del_sub->async_end->mt_list);
/* wake up the task */
event_hdl_task_wakeup(del_sub->hdl.async_task);
@ -462,7 +463,7 @@ struct event_hdl_sub *event_hdl_subscribe_ptr(event_hdl_sub_list *sub_list,
/* memory error */
goto memory_error;
}
MT_LIST_INIT(&task_ctx->e_queue);
event_hdl_async_equeue_init(&task_ctx->e_queue);
task_ctx->func = new_sub->hdl.async_ptr;
new_sub->hdl.async_equeue = &task_ctx->e_queue;
@ -785,7 +786,8 @@ static int _event_hdl_publish(event_hdl_sub_list *sub_list, struct event_hdl_sub
/* appending new event to event hdl queue */
MT_LIST_INIT(&new_event->mt_list);
MT_LIST_APPEND(cur_sub->hdl.async_equeue, &new_event->mt_list);
HA_ATOMIC_INC(&cur_sub->hdl.async_equeue->size);
MT_LIST_APPEND(&cur_sub->hdl.async_equeue->head, &new_event->mt_list);
/* wake up the task */
event_hdl_task_wakeup(cur_sub->hdl.async_task);