MINOR: mt_list: Implement mt_list_try_lock_prev().

Implement mt_list_try_lock_prev(), that does the same thing
as mt_list_lock_prev(), exceot if the list is locked, it
returns { NULL, NULL } instaed of waiting.
This commit is contained in:
Olivier Houchard 2025-03-25 16:17:56 +00:00 committed by Olivier Houchard
parent fdcb97614c
commit 17059098e7
2 changed files with 25 additions and 0 deletions

View File

@ -376,6 +376,9 @@ mt_list_lock_prev(elt)
Return A elt
value: <===>
mt_list_try_lock_prev(elt)
Does the same thing as mt_list_lock_prev(), except if the list is
locked already, it returns { NULL, NULL } instead of waiting.
mt_list_lock_elem(elt)
Locks the element only. Both of its pointers are replaced by two locked

View File

@ -780,6 +780,28 @@ static MT_INLINE struct mt_list mt_list_lock_prev(struct mt_list *lh)
return el;
}
/*
* Same as mt_list_lock_prev(), except it doesn't wait if the prev
* is locked already, and just returns { NULL, NULL }
*/
static MT_INLINE struct mt_list mt_list_try_lock_prev(struct mt_list *lh)
{
struct mt_list el;
struct mt_list missed = { NULL, NULL };
el.prev = __atomic_exchange_n(&lh->prev, MT_LIST_BUSY, __ATOMIC_RELAXED);
if (el.prev == MT_LIST_BUSY)
return missed;
el.next = __atomic_exchange_n(&el.prev->next, MT_LIST_BUSY, __ATOMIC_RELAXED);
if (el.next == MT_LIST_BUSY) {
lh->prev = el.prev;
__atomic_thread_fence(__ATOMIC_RELEASE);
return missed;
}
return el;
}
/* Element <el> is locked on both sides, but the list around it isn't touched.
* A copy of the previous element is returned, and may be used to pass to