MAJOR: counters: dispatch counters over thread groups

Most fe and be counters are good candidates for being shared between
processes. They are now grouped inside "shared" struct sub member under
be_counters and fe_counters.

Now they are properly identified, they would greatly benefit from being
shared over thread groups to reduce the cost of atomic operations when
updating them. For this, we take the current tgid into account so each
thread group only updates its own counters. For this to work, it is
mandatory that the "shared" member from {fe,be}_counters is initialized
AFTER global.nbtgroups is known, because each shared counter causes the stat
to be allocated lobal.nbtgroups times. When updating a counter without
concurrency, the first counter from the array may be updated.

To consult the shared counters (which requires aggregation of per-tgid
individual counters), some helper functions were added to counter.h to
ease code maintenance and avoid computing errors.
This commit is contained in:
Aurelien DARRAGON 2025-05-15 19:55:11 +02:00
parent 12c3ffbb48
commit 16eb0fab31
29 changed files with 522 additions and 383 deletions

View File

@ -86,7 +86,7 @@ static inline int be_usable_srv(struct proxy *be)
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
HA_ATOMIC_STORE(&be->be_counters.shared->last_sess, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be

View File

@ -32,6 +32,9 @@
#define COUNTERS_SHARED \
struct { \
uint16_t flags; /* COUNTERS_SHARED_F flags */\
};
#define COUNTERS_SHARED_TG \
struct { \
unsigned long last_change; /* last time, when the state was changed */\
long long srv_aborts; /* aborted responses during DATA phase caused by the server */\
long long cli_aborts; /* aborted responses during DATA phase caused by the client */\
@ -52,11 +55,14 @@
// for convenience (generic pointer)
struct counters_shared {
COUNTERS_SHARED;
struct {
COUNTERS_SHARED_TG;
} *tg[MAX_TGROUPS];
};
/* counters used by listeners and frontends */
struct fe_counters_shared {
COUNTERS_SHARED;
struct fe_counters_shared_tg {
COUNTERS_SHARED_TG;
long long denied_sess; /* denied session requests (tcp-req-sess rules) */
long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
@ -74,14 +80,17 @@ struct fe_counters_shared {
long long cache_lookups;/* cache lookups */
long long comp_rsp; /* number of compressed responses */
long long rsp[6]; /* http response codes */
} http;
} p; /* protocol-specific stats */
long long failed_req; /* failed requests (eg: invalid or timeout) */
};
struct fe_counters_shared {
COUNTERS_SHARED;
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
};
struct fe_counters {
struct fe_counters_shared *shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
@ -99,8 +108,8 @@ struct fe_counters {
} p; /* protocol-specific stats */
};
struct be_counters_shared {
COUNTERS_SHARED;
struct be_counters_shared_tg {
COUNTERS_SHARED_TG;
long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
@ -129,6 +138,11 @@ struct be_counters_shared {
long long failed_conns; /* failed connect() attempts (BE only) */
};
struct be_counters_shared {
COUNTERS_SHARED;
struct be_counters_shared_tg *tg[MAX_TGROUPS];
};
/* counters used by servers and backends */
struct be_counters {
struct be_counters_shared *shared; /* shared counters */

View File

@ -22,6 +22,8 @@
#ifndef _HAPROXY_COUNTERS_H
# define _HAPROXY_COUNTERS_H
#include <stddef.h>
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
@ -31,4 +33,70 @@ struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid);
void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters);
/* time oriented helper: get last time (relative to current time) on a given
* <scounter> array, for <elem> member (one member per thread group) which is
* assumed to be unsigned long type.
*
* wrapping is handled by taking the lowest diff between now and last counter.
* But since wrapping is expected once every ~136 years (starting 01/01/1970),
* perhaps it's not worth the extra CPU cost.. let's see.
*/
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
({ \
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
unsigned long now_seconds = ns_to_sec(now_ns); \
int it; \
\
for (it = 1; it < global.nbtgroups; it++) { \
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
if ((now_seconds - cur) < (now_seconds - last)) \
last = cur; \
} \
last; \
})
#define COUNTERS_SHARED_LAST(scounters, elem) \
({ \
int offset = offsetof(typeof(**scounters), elem); \
unsigned long last = COUNTERS_SHARED_LAST_OFFSET(scounters, typeof(scounters[0]->elem), offset); \
\
last; \
})
/* generic unsigned integer addition for all <elem> members from
* <scounters> array (one member per thread group)
* <rfunc> is function taking pointer as parameter to read from the memory
* location pointed to scounters[it].elem
*/
#define COUNTERS_SHARED_TOTAL_OFFSET(scounters, type, offset, rfunc) \
({ \
uint64_t __ret = 0; \
int it; \
\
for (it = 0; it < global.nbtgroups; it++) \
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
__ret; \
})
#define COUNTERS_SHARED_TOTAL(scounters, elem, rfunc) \
({ \
int offset = offsetof(typeof(**scounters), elem); \
uint64_t __ret = COUNTERS_SHARED_TOTAL_OFFSET(scounters, typeof(scounters[0]->elem), offset, rfunc);\
\
__ret; \
})
/* same as COUNTERS_SHARED_TOTAL but with <rfunc> taking 2 extras arguments:
* <arg1> and <arg2>
*/
#define COUNTERS_SHARED_TOTAL_ARG2(scounters, elem, rfunc, arg1, arg2) \
({ \
uint64_t __ret = 0; \
int it; \
\
for (it = 0; it < global.nbtgroups; it++) \
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
__ret; \
})
#endif /* _HAPROXY_COUNTERS_H */

View File

@ -136,10 +136,10 @@ static inline void proxy_reset_timeouts(struct proxy *proxy)
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
_HA_ATOMIC_INC(&fe->fe_counters.shared->cum_conn);
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_conn);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->cum_conn);
update_freq_ctr(&fe->fe_counters.shared->conn_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->conn_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
}
@ -148,10 +148,10 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
_HA_ATOMIC_INC(&fe->fe_counters.shared->cum_sess);
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->cum_sess);
update_freq_ctr(&fe->fe_counters.shared->sess_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
}
@ -163,19 +163,19 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
unsigned int http_ver)
{
if (http_ver == 0 ||
http_ver > sizeof(fe->fe_counters.shared->cum_sess_ver) / sizeof(*fe->fe_counters.shared->cum_sess_ver))
http_ver > sizeof(fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver))
return;
_HA_ATOMIC_INC(&fe->fe_counters.shared->cum_sess_ver[http_ver - 1]);
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->cum_sess_ver[http_ver - 1]);
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
_HA_ATOMIC_INC(&be->be_counters.shared->cum_sess);
update_freq_ctr(&be->be_counters.shared->sess_per_sec, 1);
_HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->cum_sess);
update_freq_ctr(&be->be_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
@ -187,13 +187,13 @@ static inline void proxy_inc_be_ctr(struct proxy *be)
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
unsigned int http_ver)
{
if (http_ver >= sizeof(fe->fe_counters.shared->p.http.cum_req) / sizeof(*fe->fe_counters.shared->p.http.cum_req))
if (http_ver >= sizeof(fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req))
return;
_HA_ATOMIC_INC(&fe->fe_counters.shared->p.http.cum_req[http_ver]);
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared->req_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->req_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
}

View File

@ -181,8 +181,8 @@ const struct mux_ops *srv_get_ws_proto(struct server *srv);
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
_HA_ATOMIC_INC(&s->counters.shared->cum_sess);
update_freq_ctr(&s->counters.shared->sess_per_sec, 1);
_HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->cum_sess);
update_freq_ctr(&s->counters.shared->tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
update_freq_ctr(&s->counters._sess_per_sec, 1));
}
@ -190,7 +190,7 @@ static inline void srv_inc_sess_ctr(struct server *s)
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
HA_ATOMIC_STORE(&s->counters.shared->last_sess, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* returns the current server throttle rate between 0 and 100% */

View File

@ -362,8 +362,8 @@ static inline void stream_choose_redispatch(struct stream *s)
s->scb->state = SC_ST_REQ;
} else {
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->retries);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
s->scb->state = SC_ST_ASS;
}

View File

@ -27,6 +27,7 @@
#include <haproxy/backend.h>
#include <haproxy/channel.h>
#include <haproxy/check.h>
#include <haproxy/counters.h>
#include <haproxy/frontend.h>
#include <haproxy/global.h>
#include <haproxy/hash.h>
@ -824,8 +825,8 @@ int assign_server(struct stream *s)
goto out;
}
else if (srv != prev_srv) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->cum_lbconn);
_HA_ATOMIC_INC(&srv->counters.shared->cum_lbconn);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cum_lbconn);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cum_lbconn);
}
s->target = &srv->obj_type;
}
@ -999,11 +1000,11 @@ int assign_server_and_queue(struct stream *s)
s->txn->flags |= TX_CK_DOWN;
}
s->flags |= SF_REDISP;
_HA_ATOMIC_INC(&prev_srv->counters.shared->redispatches);
_HA_ATOMIC_INC(&s->be->be_counters.shared->redispatches);
_HA_ATOMIC_INC(&prev_srv->counters.shared->tg[tgid - 1]->redispatches);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->redispatches);
} else {
_HA_ATOMIC_INC(&prev_srv->counters.shared->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->retries);
_HA_ATOMIC_INC(&prev_srv->counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
}
}
}
@ -2084,13 +2085,13 @@ int connect_server(struct stream *s)
s->scb->flags |= SC_FL_NOLINGER;
if (s->flags & SF_SRV_REUSED) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->reuse);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->reuse);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->reuse);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->reuse);
} else {
_HA_ATOMIC_INC(&s->be->be_counters.shared->connect);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->connect);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->connect);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->connect);
}
err = do_connect_server(s, srv_conn);
@ -2279,8 +2280,8 @@ int srv_redispatch_connect(struct stream *s)
s->conn_err_type = STRM_ET_QUEUE_ERR;
}
_HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
return 1;
case SRV_STATUS_NOSRV:
@ -2289,7 +2290,7 @@ int srv_redispatch_connect(struct stream *s)
s->conn_err_type = STRM_ET_CONN_ERR;
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
return 1;
case SRV_STATUS_QUEUED:
@ -2318,8 +2319,8 @@ int srv_redispatch_connect(struct stream *s)
if (srv)
srv_set_sess_last(srv);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
/* release other streams waiting for this server */
if (may_dequeue_tasks(srv, s->be))
@ -2393,8 +2394,8 @@ void back_try_conn_req(struct stream *s)
if (srv)
srv_set_sess_last(srv);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
/* release other streams waiting for this server */
sess_change_server(s, NULL);
@ -2460,8 +2461,8 @@ void back_try_conn_req(struct stream *s)
pendconn_cond_unlink(s->pend_pos);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
sc_abort(sc);
sc_shutdown(sc);
req->flags |= CF_WRITE_TIMEOUT;
@ -2716,8 +2717,8 @@ void back_handle_st_cer(struct stream *s)
}
if (objt_server(s->target))
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_conns);
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
sess_change_server(s, NULL);
if (may_dequeue_tasks(objt_server(s->target), s->be))
process_srv_queue(objt_server(s->target));
@ -2749,8 +2750,8 @@ void back_handle_st_cer(struct stream *s)
s->conn_err_type = STRM_ET_CONN_OTHER;
if (objt_server(s->target))
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
sess_change_server(s, NULL);
if (may_dequeue_tasks(objt_server(s->target), s->be))
process_srv_queue(objt_server(s->target));
@ -2887,8 +2888,8 @@ void back_handle_st_rdy(struct stream *s)
*/
void set_backend_down(struct proxy *be)
{
HA_ATOMIC_STORE(&be->be_counters.shared->last_change, ns_to_sec(now_ns));
_HA_ATOMIC_INC(&be->be_counters.shared->down_trans);
HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
_HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->down_trans);
if (!(global.mode & MODE_STARTING)) {
ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
@ -2960,7 +2961,7 @@ no_cookie:
}
int be_downtime(struct proxy *px) {
unsigned long last_change = HA_ATOMIC_LOAD(&px->be_counters.shared->last_change);
unsigned long last_change = COUNTERS_SHARED_LAST(px->be_counters.shared->tg, last_change);
if (px->lbprm.tot_weight && last_change < ns_to_sec(now_ns)) // ignore negative time
return px->down_time;
@ -3403,7 +3404,7 @@ smp_fetch_be_sess_rate(const struct arg *args, struct sample *smp, const char *k
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = read_freq_ctr(&px->be_counters.shared->sess_per_sec);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, sess_per_sec, read_freq_ctr);
return 1;
}
@ -3586,7 +3587,7 @@ smp_fetch_srv_sess_rate(const struct arg *args, struct sample *smp, const char *
{
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = read_freq_ctr(&args->data.srv->counters.shared->sess_per_sec);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(args->data.srv->counters.shared->tg, sess_per_sec, read_freq_ctr);
return 1;
}

View File

@ -2133,9 +2133,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
return ACT_RET_CONT;
if (px == strm_fe(s))
_HA_ATOMIC_INC(&px->fe_counters.shared->p.http.cache_lookups);
_HA_ATOMIC_INC(&px->fe_counters.shared->tg[tgid - 1]->p.http.cache_lookups);
else
_HA_ATOMIC_INC(&px->be_counters.shared->p.http.cache_lookups);
_HA_ATOMIC_INC(&px->be_counters.shared->tg[tgid - 1]->p.http.cache_lookups);
cache_tree = get_cache_tree_from_hash(cache, read_u32(s->txn->cache_hash));
@ -2222,9 +2222,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
if (px == strm_fe(s))
_HA_ATOMIC_INC(&px->fe_counters.shared->p.http.cache_hits);
_HA_ATOMIC_INC(&px->fe_counters.shared->tg[tgid - 1]->p.http.cache_hits);
else
_HA_ATOMIC_INC(&px->be_counters.shared->p.http.cache_hits);
_HA_ATOMIC_INC(&px->be_counters.shared->tg[tgid - 1]->p.http.cache_hits);
return ACT_RET_CONT;
} else {
s->target = NULL;

View File

@ -34,6 +34,7 @@
#include <haproxy/cfgparse.h>
#include <haproxy/check.h>
#include <haproxy/chunk.h>
#include <haproxy/counters.h>
#include <haproxy/dgram.h>
#include <haproxy/dynbuf.h>
#include <haproxy/extcheck.h>
@ -515,7 +516,7 @@ void set_server_check_status(struct check *check, short status, const char *desc
if ((!(check->state & CHK_ST_AGENT) ||
(check->status >= HCHK_STATUS_L57DATA)) &&
(check->health > 0)) {
_HA_ATOMIC_INC(&s->counters.shared->failed_checks);
_HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->failed_checks);
report = 1;
check->health--;
if (check->health < check->rise)
@ -743,7 +744,7 @@ void __health_adjust(struct server *s, short status)
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
HA_ATOMIC_STORE(&s->consecutive_errors, 0);
_HA_ATOMIC_INC(&s->counters.shared->failed_hana);
_HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->failed_hana);
if (s->check.fastinter) {
/* timer might need to be advanced, it might also already be
@ -995,7 +996,7 @@ int httpchk_build_status_header(struct server *s, struct buffer *buf)
"UP %d/%d", "UP",
"NOLB %d/%d", "NOLB",
"no check" };
unsigned long last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
unsigned long last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
if (!(s->check.state & CHK_ST_ENABLED))
sv_state = 6;

View File

@ -22,46 +22,22 @@
#include <haproxy/atomic.h>
#include <haproxy/clock.h>
#include <haproxy/counters.h>
#include <haproxy/global.h>
#include <haproxy/time.h>
/* retrieved shared counters pointer for a given <guid> object
* <size> hint is expected to reflect the actual type size (fe/be)
* if <guid> is not set, then sharing is disabled
* Returns the pointer on success or NULL on failure
*/
static void*_counters_shared_get(const struct guid_node *guid, size_t size)
{
struct counters_shared *shared;
uint last_change;
/* no shared memory for now, simply allocate a memory block
* for the counters (zero-initialized), ignore guid
*/
shared = calloc(1, size);
if (!shared)
return NULL;
if (!guid->node.key)
shared->flags |= COUNTERS_SHARED_F_LOCAL;
last_change = ns_to_sec(now_ns);
HA_ATOMIC_STORE(&shared->last_change, last_change);
return shared;
}
/* retrieve shared fe counters pointer for a given <guid> object */
struct fe_counters_shared *counters_fe_shared_get(const struct guid_node *guid)
{
return _counters_shared_get(guid, sizeof(struct fe_counters_shared));
}
/* retrieve shared be counters pointer for a given <guid> object */
struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid)
{
return _counters_shared_get(guid, sizeof(struct be_counters_shared));
}
static void _counters_shared_drop(void *counters)
{
struct counters_shared *shared = counters;
int it = 0;
if (!shared)
return;
/* memory was allocated using calloc(), simply free it */
while (it < global.nbtgroups) {
free(shared->tg[it]);
it += 1;
}
free(counters);
}
@ -76,3 +52,49 @@ void counters_be_shared_drop(struct be_counters_shared *counters)
{
_counters_shared_drop(counters);
}
/* retrieved shared counters pointer for a given <guid> object
* <size> hint is expected to reflect the actual tg member size (fe/be)
* if <guid> is not set, then sharing is disabled
* Returns the pointer on success or NULL on failure
*/
static void*_counters_shared_get(const struct guid_node *guid, size_t size)
{
struct counters_shared *shared;
uint last_change;
int it = 0;
/* no shared memory for now, simply allocate a memory block
* for the counters (zero-initialized), ignore guid
*/
shared = calloc(1, sizeof(*shared));
if (!shared)
return NULL;
if (!guid->node.key)
shared->flags |= COUNTERS_SHARED_F_LOCAL;
while (it < global.nbtgroups) {
shared->tg[it] = calloc(1, size);
if (!shared->tg[it]) {
_counters_shared_drop(shared);
return NULL;
}
it += 1;
}
last_change = ns_to_sec(now_ns);
/* only set one group, only latest value is considered */
HA_ATOMIC_STORE(&shared->tg[0]->last_change, last_change);
return shared;
}
/* retrieve shared fe counters pointer for a given <guid> object */
struct fe_counters_shared *counters_fe_shared_get(const struct guid_node *guid)
{
return _counters_shared_get(guid, sizeof(struct fe_counters_shared_tg));
}
/* retrieve shared be counters pointer for a given <guid> object */
struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid)
{
return _counters_shared_get(guid, sizeof(struct be_counters_shared_tg));
}

View File

@ -446,12 +446,12 @@ static int fcgi_flt_http_headers(struct stream *s, struct filter *filter, struct
goto end;
rewrite_err:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
hdr_rule_err:
node = ebpt_first(&hdr_rules);
while (node) {

View File

@ -393,14 +393,14 @@ comp_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
if (st->comp_ctx[dir] && st->comp_ctx[dir]->cur_lvl > 0) {
update_freq_ctr(&global.comp_bps_in, consumed);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->comp_in[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->comp_in[dir], consumed);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_in[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_in[dir], consumed);
update_freq_ctr(&global.comp_bps_out, to_forward);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->comp_out[dir], to_forward);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->comp_out[dir], to_forward);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_out[dir], to_forward);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_out[dir], to_forward);
} else {
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->comp_byp[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->comp_byp[dir], consumed);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_byp[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_byp[dir], consumed);
}
return to_forward;
@ -419,9 +419,9 @@ comp_http_end(struct stream *s, struct filter *filter,
goto end;
if (strm_fe(s)->mode == PR_MODE_HTTP)
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->p.http.comp_rsp);
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->p.http.comp_rsp);
if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
_HA_ATOMIC_INC(&s->be->be_counters.shared->p.http.comp_rsp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.comp_rsp);
end:
return 1;
}

View File

@ -26,6 +26,7 @@
#include <haproxy/arg.h>
#include <haproxy/chunk.h>
#include <haproxy/connection.h>
#include <haproxy/counters.h>
#include <haproxy/fd.h>
#include <haproxy/frontend.h>
#include <haproxy/global.h>
@ -260,7 +261,7 @@ smp_fetch_fe_req_rate(const struct arg *args, struct sample *smp, const char *kw
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = read_freq_ctr(&px->fe_counters.shared->req_per_sec);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, req_per_sec, read_freq_ctr);
return 1;
}
@ -280,7 +281,7 @@ smp_fetch_fe_sess_rate(const struct arg *args, struct sample *smp, const char *k
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = read_freq_ctr(&px->fe_counters.shared->sess_per_sec);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, sess_per_sec, read_freq_ctr);
return 1;
}

View File

@ -75,6 +75,7 @@
#include <haproxy/cli.h>
#include <haproxy/clock.h>
#include <haproxy/connection.h>
#include <haproxy/counters.h>
#ifdef USE_CPU_AFFINITY
#include <haproxy/cpuset.h>
#include <haproxy/cpu_topo.h>
@ -815,10 +816,10 @@ static void sig_dump_state(struct sig_handler *sh)
send_log(p, LOG_NOTICE, "SIGHUP received, dumping servers states for proxy %s.\n", p->id);
while (s) {
chunk_printf(&trash,
"SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %lld tot.",
"SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %llu tot.",
p->id, s->id,
(s->cur_state != SRV_ST_STOPPED) ? "UP" : "DOWN",
s->cur_sess, s->queueslength, HA_ATOMIC_LOAD(&s->counters.shared->cum_sess));
s->cur_sess, s->queueslength, (ullong)COUNTERS_SHARED_TOTAL(s->counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
ha_warning("%s\n", trash.area);
send_log(p, LOG_NOTICE, "%s\n", trash.area);
s = s->next;
@ -827,21 +828,21 @@ static void sig_dump_state(struct sig_handler *sh)
/* FIXME: those info are a bit outdated. We should be able to distinguish between FE and BE. */
if (!p->srv) {
chunk_printf(&trash,
"SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
"SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id,
p->feconn, p->beconn, p->totpend, p->queueslength, HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn), HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess));
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
} else if (p->srv_act == 0) {
chunk_printf(&trash,
"SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
"SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id,
(p->srv_bck) ? "is running on backup servers" : "has no server available",
p->feconn, p->beconn, p->totpend, p->queueslength, HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn), HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess));
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
} else {
chunk_printf(&trash,
"SIGHUP: Proxy %s has %d active servers and %d backup servers available."
" Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
" Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id, p->srv_act, p->srv_bck,
p->feconn, p->beconn, p->totpend, p->queueslength, HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn), HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess));
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
}
ha_warning("%s\n", trash.area);
send_log(p, LOG_NOTICE, "%s\n", trash.area);

View File

@ -9070,7 +9070,7 @@ __LJMP static int hlua_txn_done(lua_State *L)
/* let's log the request time */
s->logs.request_ts = now_ns;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
}
done:

View File

@ -116,13 +116,13 @@ static enum act_return http_action_set_req_line(struct act_rule *rule, struct pr
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -386,13 +386,13 @@ static enum act_return http_action_normalize_uri(struct act_rule *rule, struct p
goto leave;
fail_rewrite:
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->failed_rewrites, 1);
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites, 1);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_ADD(&s->be->be_counters.shared->failed_rewrites, 1);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites, 1);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_ADD(&sess->listener->counters->shared->failed_rewrites, 1);
_HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites, 1);
if (objt_server(s->target))
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->failed_rewrites, 1);
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites, 1);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -562,13 +562,13 @@ static enum act_return http_action_replace_uri(struct act_rule *rule, struct pro
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -642,13 +642,13 @@ static enum act_return action_http_set_status(struct act_rule *rule, struct prox
struct session *sess, struct stream *s, int flags)
{
if (http_res_set_status(rule->arg.http.i, rule->arg.http.str, s) == -1) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
if (!(s->flags & SF_ERR_MASK))
@ -717,10 +717,10 @@ static enum act_return http_action_reject(struct act_rule *rule, struct proxy *p
s->req.analysers &= AN_REQ_FLT_END;
s->res.analysers &= AN_RES_FLT_END;
_HA_ATOMIC_INC(&s->be->be_counters.shared->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
@ -1281,7 +1281,7 @@ static enum act_return http_action_auth(struct act_rule *rule, struct proxy *px,
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_LOCAL;
@ -1449,13 +1449,13 @@ static enum act_return http_action_set_header(struct act_rule *rule, struct prox
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -1581,13 +1581,13 @@ static enum act_return http_action_replace_header(struct act_rule *rule, struct
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -2315,7 +2315,7 @@ static enum act_return http_action_return(struct act_rule *rule, struct proxy *p
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
}
return ACT_RET_ABRT;

View File

@ -233,7 +233,7 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
struct acl_cond *cond;
s->flags |= SF_MONITOR;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
/* Check if we want to fail this monitor request or not */
list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
@ -342,17 +342,17 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_prx_cond;
return_bad_req:
txn->status = 400;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scb, strm_tevt_type_proto_err);
/* fall through */
@ -486,7 +486,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
/* Proceed with the applets now. */
if (unlikely(objt_applet(s->target))) {
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
if (http_handle_expect_hdr(s, htx, msg) == -1)
goto return_int_err;
@ -562,11 +562,11 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
if (!req->analyse_exp)
req->analyse_exp = tick_add(now_ms, 0);
stream_inc_http_err_ctr(s);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->denied_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto done_without_exp;
@ -579,43 +579,43 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
s->logs.request_ts = now_ns;
stream_inc_http_err_ctr(s);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->denied_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto return_prx_err;
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto return_prx_err;
return_bad_req:
txn->status = 400;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
/* fall through */
@ -748,24 +748,24 @@ int http_process_request(struct stream *s, struct channel *req, int an_bit)
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
http_set_term_flags(s);
@ -871,19 +871,19 @@ int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto return_prx_err;
return_bad_req: /* let's centralize all bad requests */
txn->status = 400;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
/* fall through */
@ -1100,24 +1100,24 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
return 0;
return_cli_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((req->flags & CF_READ_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
status = 400;
goto return_prx_cond;
return_srv_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->srv_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->srv_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((req->flags & CF_WRITE_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
status = 502;
@ -1126,20 +1126,20 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
return_int_err:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
status = 500;
goto return_prx_cond;
return_bad_req:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
status = 400;
/* fall through */
@ -1173,9 +1173,9 @@ static __inline int do_l7_retry(struct stream *s, struct stconn *sc)
s->flags &= ~SF_CURR_SESS;
_HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
}
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->retries);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->retries);
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
req = &s->req;
res = &s->res;
@ -1292,9 +1292,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
/* if the server refused the early data, just send a 425 */
if (conn && conn->err_code == CO_ER_SSL_EARLY_FAILED)
@ -1329,9 +1329,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 0;
}
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
txn->status = 504;
stream_inc_http_fail_ctr(s);
@ -1350,12 +1350,12 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
/* 3: client abort with an abortonclose */
else if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->scb->flags & SC_FL_SHUT_DONE) &&
(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
txn->status = 400;
@ -1388,9 +1388,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
txn->status = 502;
stream_inc_http_fail_ctr(s);
@ -1411,9 +1411,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
rep->analysers &= AN_RES_FLT_END;
if (!(s->flags & SF_ERR_MASK))
@ -1517,8 +1517,8 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (n < 1 || n > 5)
n = 0;
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->p.http.rsp[n]);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->p.http.cum_req);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->p.http.cum_req);
}
/*
@ -1662,12 +1662,12 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 1;
return_int_err:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
@ -1683,9 +1683,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 0;
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
txn->status = 502;
stream_inc_http_fail_ctr(s);
@ -1982,36 +1982,36 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
return 1;
deny:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->denied_resp);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_resp);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_resp);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->denied_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->denied_resp);
stream_report_term_evt(s->scb, strm_tevt_type_intercepted);
goto return_prx_err;
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_prx_err;
@ -2019,9 +2019,9 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
s->logs.t_data = -1; /* was not a valid response */
txn->status = 502;
stream_inc_http_fail_ctr(s);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target)) {
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
}
stream_report_term_evt(s->scb, strm_tevt_type_proto_err);
@ -2251,44 +2251,44 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
return 0;
return_srv_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->srv_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->srv_aborts);
stream_inc_http_fail_ctr(s);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((res->flags & CF_READ_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
goto return_error;
return_cli_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((res->flags & CF_WRITE_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
goto return_error;
return_int_err:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_error;
return_bad_res:
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target)) {
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
}
stream_inc_http_fail_ctr(s);
@ -2571,7 +2571,7 @@ int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struc
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
}
out:
@ -4282,9 +4282,9 @@ enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
txn->status = 408;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLITO;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
goto abort;
abort_res:

View File

@ -22,6 +22,7 @@
#include <haproxy/cfgparse.h>
#include <haproxy/cli-t.h>
#include <haproxy/connection.h>
#include <haproxy/counters.h>
#include <haproxy/errors.h>
#include <haproxy/fd.h>
#include <haproxy/freq_ctr.h>
@ -1089,11 +1090,22 @@ void listener_accept(struct listener *l)
}
#endif
if (p && p->fe_sps_lim) {
int max = freq_ctr_remain(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0);
int max = 0;
for (int it = 0; it < global.nbtgroups; it++)
max += freq_ctr_remain(&p->fe_counters.shared->tg[it]->sess_per_sec, p->fe_sps_lim, 0);
if (unlikely(!max)) {
unsigned int min_wait = 0;
for (int it = 0; it < global.nbtgroups; it++) {
unsigned int cur_wait = next_event_delay(&p->fe_counters.shared->tg[it]->sess_per_sec, p->fe_sps_lim, 0);
if (!it || cur_wait < min_wait)
min_wait = cur_wait;
}
/* frontend accept rate limit was reached */
expire = tick_add(now_ms, next_event_delay(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0));
expire = tick_add(now_ms, min_wait);
goto limit_proxy;
}
@ -1573,7 +1585,7 @@ void listener_accept(struct listener *l)
dequeue_all_listeners();
if (p && !MT_LIST_ISEMPTY(&p->listener_queue) &&
(!p->fe_sps_lim || freq_ctr_remain(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0) > 0))
(!p->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared->tg, sess_per_sec, freq_ctr_remain, p->fe_sps_lim, 0) > 0))
dequeue_proxy_listeners(p, 0);
}
return;
@ -1632,14 +1644,14 @@ void listener_release(struct listener *l)
dequeue_all_listeners();
if (fe && !MT_LIST_ISEMPTY(&fe->listener_queue) &&
(!fe->fe_sps_lim || freq_ctr_remain(&fe->fe_counters.shared->sess_per_sec, fe->fe_sps_lim, 0) > 0))
(!fe->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared->tg, sess_per_sec, freq_ctr_remain, fe->fe_sps_lim, 0) > 0))
dequeue_proxy_listeners(fe, 0);
else if (fe) {
unsigned int wait;
int expire = TICK_ETERNITY;
if (fe->task && fe->fe_sps_lim &&
(wait = next_event_delay(&fe->fe_counters.shared->sess_per_sec,fe->fe_sps_lim, 0))) {
(wait = COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared->tg, sess_per_sec, next_event_delay, fe->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the

View File

@ -5946,15 +5946,15 @@ missing_budget:
parse_error:
if (l->counters)
_HA_ATOMIC_INC(&l->counters->shared->failed_req);
_HA_ATOMIC_INC(&frontend->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&frontend->fe_counters.shared->tg[tgid - 1]->failed_req);
goto error;
cli_abort:
if (l->counters)
_HA_ATOMIC_INC(&l->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&frontend->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&frontend->fe_counters.shared->tg[tgid - 1]->cli_aborts);
error:
se_fl_set(appctx->sedesc, SE_FL_ERROR);

View File

@ -3729,10 +3729,10 @@ static int h1_handle_internal_err(struct h1c *h1c)
}
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[5]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[5]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
h1c->errcode = 500;
ret = h1_send_error(h1c);
@ -3765,10 +3765,10 @@ static int h1_handle_parsing_error(struct h1c *h1c)
session_inc_http_req_ctr(sess);
session_inc_http_err_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
if (!h1c->errcode)
h1c->errcode = 400;
@ -3802,10 +3802,10 @@ static int h1_handle_not_impl_err(struct h1c *h1c)
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
h1c->errcode = 501;
ret = h1_send_error(h1c);
@ -3837,10 +3837,10 @@ static int h1_handle_req_tout(struct h1c *h1c)
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
h1c->errcode = 408;
ret = h1_send_error(h1c);

View File

@ -2130,9 +2130,9 @@ void proxy_cond_disable(struct proxy *p)
* the data plane but on the control plane.
*/
if (p->cap & PR_CAP_FE)
cum_conn = HA_ATOMIC_LOAD(&p->fe_counters.shared->cum_conn);
cum_conn = COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD);
if (p->cap & PR_CAP_BE)
cum_sess = HA_ATOMIC_LOAD(&p->be_counters.shared->cum_sess);
cum_sess = COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD);
if ((p->mode == PR_MODE_TCP || p->mode == PR_MODE_HTTP || p->mode == PR_MODE_SYSLOG || p->mode == PR_MODE_SPOP) && !(p->cap & PR_CAP_INT))
ha_warning("Proxy %s stopped (cumulated conns: FE: %lld, BE: %lld).\n",
@ -2227,7 +2227,8 @@ struct task *manage_proxy(struct task *t, void *context, unsigned int state)
goto out;
if (p->fe_sps_lim &&
(wait = next_event_delay(&p->fe_counters.shared->sess_per_sec, p->fe_sps_lim, 0))) {
(wait = COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared->tg, sess_per_sec, next_event_delay, p->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the
@ -2971,7 +2972,7 @@ static int dump_servers_state(struct appctx *appctx)
dump_server_addr(&srv->check.addr, srv_check_addr);
dump_server_addr(&srv->agent.addr, srv_agent_addr);
srv_time_since_last_change = ns_to_sec(now_ns) - HA_ATOMIC_LOAD(&srv->counters.shared->last_change);
srv_time_since_last_change = ns_to_sec(now_ns) - COUNTERS_SHARED_LAST(srv->counters.shared->tg, last_change);
bk_f_forced_id = px->options & PR_O_FORCED_ID ? 1 : 0;
srv_f_forced_id = srv->flags & SRV_F_FORCED_ID ? 1 : 0;

View File

@ -72,6 +72,7 @@ s * queue's lock.
#include <import/eb32tree.h>
#include <haproxy/api.h>
#include <haproxy/backend.h>
#include <haproxy/counters.h>
#include <haproxy/http_rules.h>
#include <haproxy/pool.h>
#include <haproxy/queue.h>
@ -114,7 +115,7 @@ unsigned int srv_dynamic_maxconn(const struct server *s)
else max = MAX(s->minconn,
s->proxy->beconn * s->maxconn / s->proxy->fullconn);
last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
last_change = COUNTERS_SHARED_LAST(s->counters.shared->tg, last_change);
if ((s->cur_state == SRV_ST_STARTING) &&
ns_to_sec(now_ns) < last_change + s->slowstart &&

View File

@ -143,7 +143,7 @@ const char *srv_op_st_chg_cause(enum srv_op_st_chg_cause cause)
int srv_downtime(const struct server *s)
{
unsigned long last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
unsigned long last_change = COUNTERS_SHARED_LAST(s->proxy->be_counters.shared->tg, last_change);
if ((s->cur_state != SRV_ST_STOPPED) || last_change >= ns_to_sec(now_ns)) // ignore negative time
return s->down_time;
@ -2459,7 +2459,7 @@ INITCALL1(STG_REGISTER, srv_register_keywords, &srv_kws);
*/
void server_recalc_eweight(struct server *sv, int must_update)
{
unsigned long last_change = HA_ATOMIC_LOAD(&sv->counters.shared->last_change);
unsigned long last_change = COUNTERS_SHARED_LAST(sv->proxy->be_counters.shared->tg, last_change);
struct proxy *px = sv->proxy;
unsigned w;
@ -5803,7 +5803,7 @@ static int init_srv_slowstart(struct server *srv)
if (srv->next_state == SRV_ST_STARTING) {
task_schedule(srv->warmup,
tick_add(now_ms,
MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - HA_ATOMIC_LOAD(&srv->counters.shared->last_change))) / 20)));
MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - COUNTERS_SHARED_LAST(srv->proxy->be_counters.shared->tg, last_change))) / 20)));
}
}
@ -7006,7 +7006,7 @@ static void srv_update_status(struct server *s, int type, int cause)
/* check if server stats must be updated due the the server state change */
if (srv_prev_state != s->cur_state) {
if (srv_prev_state == SRV_ST_STOPPED) {
unsigned long last_change = HA_ATOMIC_LOAD(&s->counters.shared->last_change);
unsigned long last_change = COUNTERS_SHARED_LAST(s->proxy->be_counters.shared->tg, last_change);
/* server was down and no longer is */
if (last_change < ns_to_sec(now_ns)) // ignore negative times
@ -7015,7 +7015,7 @@ static void srv_update_status(struct server *s, int type, int cause)
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
HA_ATOMIC_INC(&s->counters.shared->down_trans);
HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->down_trans);
_srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, cb_data.common, s);
}
@ -7026,7 +7026,7 @@ static void srv_update_status(struct server *s, int type, int cause)
if (s->cur_state != SRV_ST_RUNNING && s->proxy->ready_srv == s)
HA_ATOMIC_STORE(&s->proxy->ready_srv, NULL);
HA_ATOMIC_STORE(&s->counters.shared->last_change, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
/* publish the state change */
_srv_event_hdl_prepare_state(&cb_data.state,
@ -7038,14 +7038,14 @@ static void srv_update_status(struct server *s, int type, int cause)
if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
set_backend_down(s->proxy); /* backend going down */
else if (!prev_srv_count && (s->proxy->srv_bck || s->proxy->srv_act)) {
unsigned long last_change = HA_ATOMIC_LOAD(&s->proxy->be_counters.shared->last_change);
unsigned long last_change = COUNTERS_SHARED_LAST(s->proxy->be_counters.shared->tg, last_change);
/* backend was down and is back up again:
* no helper function, updating last_change and backend downtime stats
*/
if (last_change < ns_to_sec(now_ns)) // ignore negative times
s->proxy->down_time += ns_to_sec(now_ns) - last_change;
HA_ATOMIC_STORE(&s->proxy->be_counters.shared->last_change, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&s->proxy->be_counters.shared->tg[tgid - 1]->last_change, ns_to_sec(now_ns));
}
}

View File

@ -321,7 +321,7 @@ static void srv_state_srv_update(struct server *srv, int version, char **params)
srv_adm_set_drain(srv);
}
HA_ATOMIC_STORE(&srv->counters.shared->last_change, ns_to_sec(now_ns) - srv_last_time_change);
HA_ATOMIC_STORE(&srv->counters.shared->tg[0]->last_change, ns_to_sec(now_ns) - srv_last_time_change);
srv->check.status = srv_check_status;
srv->check.result = srv_check_result;

View File

@ -281,7 +281,7 @@ static int parse_stat_line(struct ist line,
if (!(px->cap & PR_CAP_FE))
return 0; /* silently ignored fe/be mismatch */
base_off_shared = (char *)px->fe_counters.shared;
base_off_shared = (char *)px->fe_counters.shared->tg[0];
base_off = (char *)&px->fe_counters;
off = 0;
@ -290,7 +290,7 @@ static int parse_stat_line(struct ist line,
if (!(px->cap & PR_CAP_BE))
return 0; /* silently ignored fe/be mismatch */
base_off_shared = (char *)px->be_counters.shared;
base_off_shared = (char *)px->be_counters.shared->tg[0];
base_off = (char *)&px->be_counters;
off = 1;
@ -310,7 +310,7 @@ static int parse_stat_line(struct ist line,
if (!li->counters)
return 0;
base_off_shared = (char *)li->counters->shared;
base_off_shared = (char *)li->counters->shared->tg[0];
base_off = (char *)li->counters;
off = 0;
@ -321,7 +321,7 @@ static int parse_stat_line(struct ist line,
goto err;
srv = __objt_server(node->obj_type);
base_off_shared = (char *)srv->counters.shared;
base_off_shared = (char *)srv->counters.shared->tg[0];
base_off = (char *)&srv->counters;
off = 1;

View File

@ -7,6 +7,7 @@
#include <haproxy/backend.h>
#include <haproxy/check.h>
#include <haproxy/chunk.h>
#include <haproxy/counters.h>
#include <haproxy/freq_ctr.h>
#include <haproxy/list.h>
#include <haproxy/listener.h>
@ -39,8 +40,8 @@
{ \
ME_NEW_COMMON(name_f, alt_n, nature, format, offset_f, cap_f, desc_f) \
.flags = STAT_COL_FL_GENERIC | STAT_COL_FL_SHARED, \
.metric.offset[0] = offsetof(struct fe_counters_shared, offset_f), \
.metric.offset[1] = offsetof(struct be_counters_shared, offset_f), \
.metric.offset[0] = offsetof(struct fe_counters_shared_tg, offset_f), \
.metric.offset[1] = offsetof(struct be_counters_shared_tg, offset_f), \
}
/* Define a new generic metric for frontend side only. */
@ -56,7 +57,7 @@
{ \
ME_NEW_COMMON(name_f, alt_n, nature, format, offset_f, cap_f, desc_f) \
.flags = STAT_COL_FL_GENERIC | STAT_COL_FL_SHARED, \
.metric.offset[0] = offsetof(struct fe_counters_shared, offset_f), \
.metric.offset[0] = offsetof(struct fe_counters_shared_tg, offset_f), \
}
/* Define a new generic metric for backend side only. */
@ -72,7 +73,7 @@
{ \
ME_NEW_COMMON(name_f, alt_n, nature, format, offset_f, cap_f, desc_f) \
.flags = STAT_COL_FL_GENERIC | STAT_COL_FL_SHARED, \
.metric.offset[1] = offsetof(struct be_counters_shared, offset_f), \
.metric.offset[1] = offsetof(struct be_counters_shared_tg, offset_f), \
}
const struct stat_col stat_cols_px[ST_I_PX_MAX] = {
@ -245,9 +246,9 @@ static int stcol_hide(enum stat_idx_px idx, enum obj_type *objt)
case ST_I_PX_LASTSESS:
if (srv)
return !HA_ATOMIC_LOAD(&srv->counters.shared->last_sess);
return !COUNTERS_SHARED_LAST(srv->counters.shared->tg, last_sess);
else if (px)
return !HA_ATOMIC_LOAD(&px->be_counters.shared->last_sess);
return !COUNTERS_SHARED_LAST(px->be_counters.shared->tg, last_sess);
else
return 0;
@ -271,6 +272,7 @@ static struct field me_generate_field(const struct stat_col *col,
enum field_nature fn;
struct field value;
void *counter = NULL;
int offset = 0;
int wrong_side = 0;
/* Only generic stat column must be used as input. */
@ -281,8 +283,10 @@ static struct field me_generate_field(const struct stat_col *col,
switch (cap) {
case STATS_PX_CAP_FE:
case STATS_PX_CAP_LI:
if (col->flags & STAT_COL_FL_SHARED)
counter = (char *)((struct fe_counters *)counters)->shared + col->metric.offset[0];
if (col->flags & STAT_COL_FL_SHARED) {
counter = (char *)&((struct fe_counters *)counters)->shared->tg;
offset = col->metric.offset[0];
}
else
counter = (char *)counters + col->metric.offset[0];
wrong_side = !(col->cap & (STATS_PX_CAP_FE|STATS_PX_CAP_LI));
@ -290,8 +294,10 @@ static struct field me_generate_field(const struct stat_col *col,
case STATS_PX_CAP_BE:
case STATS_PX_CAP_SRV:
if (col->flags & STAT_COL_FL_SHARED)
counter = (char *)((struct be_counters *)counters)->shared + col->metric.offset[1];
if (col->flags & STAT_COL_FL_SHARED) {
counter = (char *)&((struct be_counters *)counters)->shared->tg;
offset = col->metric.offset[1];
}
else
counter = (char *)counters + col->metric.offset[1];
wrong_side = !(col->cap & (STATS_PX_CAP_BE|STATS_PX_CAP_SRV));
@ -309,13 +315,13 @@ static struct field me_generate_field(const struct stat_col *col,
if (idx == ST_I_PX_REQ_TOT && cap == STATS_PX_CAP_FE && !stat_file) {
struct proxy *px = __objt_proxy(objt);
const size_t nb_reqs =
sizeof(px->fe_counters.shared->p.http.cum_req) /
sizeof(*px->fe_counters.shared->p.http.cum_req);
sizeof(px->fe_counters.shared->tg[0]->p.http.cum_req) /
sizeof(*px->fe_counters.shared->tg[0]->p.http.cum_req);
uint64_t total_req = 0;
int i;
for (i = 0; i < nb_reqs; i++)
total_req += HA_ATOMIC_LOAD(&px->fe_counters.shared->p.http.cum_req[i]);
total_req += COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, p.http.cum_req[i], HA_ATOMIC_LOAD);
return mkf_u64(FN_COUNTER, total_req);
}
@ -342,9 +348,12 @@ static struct field me_generate_field(const struct stat_col *col,
if (fn == FN_COUNTER) {
switch (stcol_format(col)) {
case FF_U64:
if (col->flags & STAT_COL_FL_SHARED)
value = mkf_u64(FN_COUNTER, HA_ATOMIC_LOAD((uint64_t *)counter));
if (col->flags & STAT_COL_FL_SHARED) {
uint64_t total;
total = COUNTERS_SHARED_TOTAL_OFFSET(((char **)counter), uint64_t, offset, HA_ATOMIC_LOAD);
value = mkf_u64(FN_COUNTER, total);
}
else
value = mkf_u64(FN_COUNTER, *(uint64_t *)counter);
break;
@ -356,13 +365,20 @@ static struct field me_generate_field(const struct stat_col *col,
else if (fn == FN_RATE) {
/* freq-ctr always uses FF_U32 */
BUG_ON(stcol_format(col) != FF_U32);
value = mkf_u32(FN_RATE, read_freq_ctr(counter));
if (col->flags & STAT_COL_FL_SHARED) {
uint64_t total;
total = COUNTERS_SHARED_TOTAL_OFFSET(((char **)counter), struct freq_ctr, offset, read_freq_ctr);
value = mkf_u32(FN_RATE, total);
}
else
value = mkf_u32(FN_RATE, read_freq_ctr(counter));
}
else if (fn == FN_AGE) {
unsigned long age;
if (col->flags & STAT_COL_FL_SHARED)
age = HA_ATOMIC_LOAD((unsigned long *)counter);
age = COUNTERS_SHARED_LAST_OFFSET(((char **)counter), unsigned long, offset);
else
age = *(unsigned long *)counter;
@ -472,11 +488,12 @@ int stats_fill_fe_line(struct proxy *px, int flags, struct field *line, int len,
int i;
uint64_t total_sess;
size_t nb_sess =
sizeof(px->fe_counters.shared->cum_sess_ver) / sizeof(*px->fe_counters.shared->cum_sess_ver);
sizeof(px->fe_counters.shared->tg[0]->cum_sess_ver) / sizeof(*px->fe_counters.shared->tg[0]->cum_sess_ver);
total_sess = HA_ATOMIC_LOAD(&px->fe_counters.shared->cum_sess);
total_sess = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD);
for (i = 0; i < nb_sess; i++)
total_sess -= HA_ATOMIC_LOAD(&px->fe_counters.shared->cum_sess_ver[i]);
total_sess -= COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, cum_sess_ver[i], HA_ATOMIC_LOAD);
total_sess = (int64_t)total_sess < 0 ? 0 : total_sess;
field = mkf_u64(FN_COUNTER, total_sess);
break;
@ -811,7 +828,7 @@ int stats_fill_sv_line(struct proxy *px, struct server *sv, int flags,
if (index == NULL || *index == ST_I_PX_QTIME ||
*index == ST_I_PX_CTIME || *index == ST_I_PX_RTIME ||
*index == ST_I_PX_TTIME) {
srv_samples_counter = (px->mode == PR_MODE_HTTP) ? HA_ATOMIC_LOAD(&sv->counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&sv->counters.shared->cum_lbconn);
srv_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(sv->counters.shared->tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(sv->counters.shared->tg, cum_lbconn, HA_ATOMIC_LOAD);
if (srv_samples_counter < TIME_STATS_SAMPLES && srv_samples_counter > 0)
srv_samples_window = srv_samples_counter;
}
@ -1190,7 +1207,7 @@ int stats_fill_be_line(struct proxy *px, int flags, struct field *line, int len,
if (!index || *index == ST_I_PX_QTIME ||
*index == ST_I_PX_CTIME || *index == ST_I_PX_RTIME ||
*index == ST_I_PX_TTIME) {
be_samples_counter = (px->mode == PR_MODE_HTTP) ? HA_ATOMIC_LOAD(&px->be_counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&px->be_counters.shared->cum_lbconn);
be_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, cum_lbconn, HA_ATOMIC_LOAD);
if (be_samples_counter < TIME_STATS_SAMPLES && be_samples_counter > 0)
be_samples_window = be_samples_counter;
}

View File

@ -823,14 +823,14 @@ void stream_process_counters(struct stream *s)
bytes = s->req.total - s->logs.bytes_in;
s->logs.bytes_in = s->req.total;
if (bytes) {
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->bytes_in, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->bytes_in, bytes);
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->bytes_in, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->bytes_in, bytes);
if (objt_server(s->target))
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->bytes_in, bytes);
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->bytes_in, bytes);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_ADD(&sess->listener->counters->shared->bytes_in, bytes);
_HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->bytes_in, bytes);
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
@ -841,14 +841,14 @@ void stream_process_counters(struct stream *s)
bytes = s->res.total - s->logs.bytes_out;
s->logs.bytes_out = s->res.total;
if (bytes) {
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->bytes_out, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->bytes_out, bytes);
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->bytes_out, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->bytes_out, bytes);
if (objt_server(s->target))
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->bytes_out, bytes);
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->bytes_out, bytes);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_ADD(&sess->listener->counters->shared->bytes_out, bytes);
_HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->bytes_out, bytes);
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
@ -1012,9 +1012,9 @@ void sess_set_term_flags(struct stream *s)
if (!(s->flags & SF_FINST_MASK)) {
if (s->scb->state == SC_ST_INI) {
/* anything before REQ in fact */
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->failed_req);
if (strm_li(s) && strm_li(s)->counters)
_HA_ATOMIC_INC(&strm_li(s)->counters->shared->failed_req);
_HA_ATOMIC_INC(&strm_li(s)->counters->shared->tg[tgid - 1]->failed_req);
s->flags |= SF_FINST_R;
}
@ -1061,7 +1061,7 @@ enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
if (rule->from != ACT_F_HTTP_REQ) {
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->intercepted_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
/* The flag SF_ASSIGNED prevent from server assignment. */
s->flags |= SF_ASSIGNED;
@ -1856,12 +1856,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
sc_shutdown(scf);
if (!(req->analysers) && !(res->analysers)) {
COUNT_IF(1, "Report a client abort (no analysers)");
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLICL;
if (!(s->flags & SF_FINST_MASK))
@ -1874,17 +1874,17 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS)) {
sc_abort(scb);
sc_shutdown(scb);
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->failed_resp);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_resp);
if (!(req->analysers) && !(res->analysers)) {
COUNT_IF(1, "Report a client abort (no analysers)");
_HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL;
if (!(s->flags & SF_FINST_MASK))
@ -2188,32 +2188,32 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
req->analysers &= AN_REQ_FLT_END;
channel_auto_close(req);
if (scf->flags & SC_FL_ERROR) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLICL;
COUNT_IF(1, "Report unhandled client error");
}
else if (req->flags & CF_READ_TIMEOUT) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLITO;
COUNT_IF(1, "Report unhandled client timeout (RD)");
}
else {
_HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVTO;
COUNT_IF(1, "Report unhandled server timeout (WR)");
}
@ -2237,32 +2237,32 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
res->analysers &= AN_RES_FLT_END;
channel_auto_close(res);
if (scb->flags & SC_FL_ERROR) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVCL;
COUNT_IF(1, "Report unhandled server error");
}
else if (res->flags & CF_READ_TIMEOUT) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVTO;
COUNT_IF(1, "Report unhandled server timeout (RD)");
}
else {
_HA_ATOMIC_INC(&s->be->be_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLITO;
COUNT_IF(1, "Report unhandled client timeout (WR)");
}
@ -2633,12 +2633,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
n = 0;
if (sess->fe->mode == PR_MODE_HTTP) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->p.http.rsp[n]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[n]);
}
if ((s->flags & SF_BE_ASSIGNED) &&
(s->be->mode == PR_MODE_HTTP)) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->p.http.rsp[n]);
_HA_ATOMIC_INC(&s->be->be_counters.shared->p.http.cum_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.cum_req);
}
}
@ -2703,7 +2703,7 @@ void stream_update_time_stats(struct stream *s)
srv = objt_server(s->target);
if (srv) {
samples_window = (((s->be->mode == PR_MODE_HTTP) ?
HA_ATOMIC_LOAD(&srv->counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&srv->counters.shared->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
HA_ATOMIC_LOAD(&srv->counters.shared->tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&srv->counters.shared->tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
@ -2714,7 +2714,7 @@ void stream_update_time_stats(struct stream *s)
HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
}
samples_window = (((s->be->mode == PR_MODE_HTTP) ?
HA_ATOMIC_LOAD(&s->be->be_counters.shared->p.http.cum_req) : HA_ATOMIC_LOAD(&s->be->be_counters.shared->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
HA_ATOMIC_LOAD(&s->be->be_counters.shared->tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&s->be->be_counters.shared->tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);

View File

@ -397,16 +397,16 @@ static enum act_return tcp_exec_action_silent_drop(struct act_rule *rule, struct
strm->req.analysers &= AN_REQ_FLT_END;
strm->res.analysers &= AN_RES_FLT_END;
if (strm->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&strm->be->be_counters.shared->denied_req);
_HA_ATOMIC_INC(&strm->be->be_counters.shared->tg[tgid - 1]->denied_req);
if (!(strm->flags & SF_ERR_MASK))
strm->flags |= SF_ERR_PRXCOND;
if (!(strm->flags & SF_FINST_MASK))
strm->flags |= SF_FINST_R;
}
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
return ACT_RET_ABRT;
}

View File

@ -264,25 +264,25 @@ resume_execution:
return 0;
deny:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto reject;
internal:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto reject;
invalid:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
reject:
@ -486,31 +486,31 @@ resume_execution:
return 0;
deny:
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->denied_resp);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_resp);
if (s->sess->listener && s->sess->listener->counters)
_HA_ATOMIC_INC(&s->sess->listener->counters->shared->denied_resp);
_HA_ATOMIC_INC(&s->sess->listener->counters->shared->tg[tgid - 1]->denied_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->denied_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->denied_resp);
stream_report_term_evt(s->scb, strm_tevt_type_intercepted);
goto reject;
internal:
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->internal_errors);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
if (s->sess->listener && s->sess->listener->counters)
_HA_ATOMIC_INC(&s->sess->listener->counters->shared->internal_errors);
_HA_ATOMIC_INC(&s->sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto reject;
invalid:
_HA_ATOMIC_INC(&s->be->be_counters.shared->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
reject:
@ -585,9 +585,9 @@ int tcp_exec_l4_rules(struct session *sess)
goto end;
}
else if (rule->action == ACT_ACTION_DENY) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_conn);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_conn);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_conn);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_conn);
result = 0;
goto end;
@ -673,9 +673,9 @@ int tcp_exec_l5_rules(struct session *sess)
goto end;
}
else if (rule->action == ACT_ACTION_DENY) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->denied_sess);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_sess);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->denied_sess);
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_sess);
result = 0;
goto end;