MINOR: lb/map: use seek lock and read locks where appropriate
- map_get_server_hash() doesn't need a write lock since it only
reads the array, let's only use a read lock here.
- map_get_server_rr() only needs exclusivity to adjust the rr_idx
while looking for its entry. Since this one is not used by
map_get_server_hash(), let's turn this lock to a seek lock that
doesn't block reads.
With 8 threads, no significant performance difference was noticed
given that lookups are usually instant with this LB algo so the
lock contention is rare.
diff --git a/src/lb_map.c b/src/lb_map.c
index 1432913..b735678 100644
--- a/src/lb_map.c
+++ b/src/lb_map.c
@@ -216,7 +216,7 @@
int newidx, avoididx;
struct server *srv, *avoided;
- HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_SKLOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight == 0) {
avoided = NULL;
goto out;
@@ -248,7 +248,7 @@
px->lbprm.map.rr_idx = avoididx;
out:
- HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_SKUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
/* return NULL or srvtoavoid if found */
return avoided;
}
@@ -265,10 +265,10 @@
{
struct server *srv = NULL;
- HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &px->lbprm.lock);
if (px->lbprm.tot_weight)
srv = px->lbprm.map.srv[hash % px->lbprm.tot_weight];
- HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
return srv;
}