MINOR: quic: take out xprt snd_buf operation
Rename quic_conn_to_buf to qc_snd_buf and remove it from xprt ops. This
is done to reflect the true usage of this function which is only a
wrapper around sendto but cannot be called by the upper layer.
qc_snd_buf is moved in quic-sock because to mark its link with
quic_sock_fd_iocb which is the recvfrom counterpart.
diff --git a/include/haproxy/quic_sock.h b/include/haproxy/quic_sock.h
index 1f2628f..6976283 100644
--- a/include/haproxy/quic_sock.h
+++ b/include/haproxy/quic_sock.h
@@ -38,6 +38,8 @@
int quic_sock_accepting_conn(const struct receiver *rx);
struct connection *quic_sock_accept_conn(struct listener *l, int *status);
void quic_sock_fd_iocb(int fd);
+size_t qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count,
+ int flags);
void quic_accept_push_qc(struct quic_conn *qc);
diff --git a/src/quic_sock.c b/src/quic_sock.c
index 9a1e5b0..ddfd865 100644
--- a/src/quic_sock.c
+++ b/src/quic_sock.c
@@ -252,6 +252,61 @@
MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->mt_list);
}
+/* TODO standardize this function for a generic UDP sendto wrapper. This can be
+ * done by removing the <qc> arg and replace it with address/port.
+ */
+size_t qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count,
+ int flags)
+{
+ ssize_t ret;
+ size_t try, done;
+ int send_flag;
+
+ done = 0;
+ /* send the largest possible block. For this we perform only one call
+ * to send() unless the buffer wraps and we exactly fill the first hunk,
+ * in which case we accept to do it once again.
+ */
+ while (count) {
+ try = b_contig_data(buf, done);
+ if (try > count)
+ try = count;
+
+ send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
+ if (try < count || flags & CO_SFL_MSG_MORE)
+ send_flag |= MSG_MORE;
+
+ ret = sendto(qc->li->rx.fd, b_peek(buf, done), try, send_flag,
+ (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr));
+ if (ret > 0) {
+ /* TODO remove partial sending support for UDP */
+ count -= ret;
+ done += ret;
+
+ if (ret < try)
+ break;
+ }
+ else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) {
+ /* TODO must be handle properly. It is justified for UDP ? */
+ ABORT_NOW();
+ }
+ else if (errno != EINTR) {
+ /* TODO must be handle properly. It is justified for UDP ? */
+ ABORT_NOW();
+ }
+ }
+
+ if (done > 0) {
+ /* we count the total bytes sent, and the send rate for 32-byte
+ * blocks. The reason for the latter is that freq_ctr are
+ * limited to 4GB and that it's not enough per second.
+ */
+ _HA_ATOMIC_ADD(&global.out_bytes, done);
+ update_freq_ctr(&global.out_32bps, (done + 16) / 32);
+ }
+ return done;
+}
+
/*********************** QUIC accept queue management ***********************/
/* per-thread accept queues */
diff --git a/src/xprt_quic.c b/src/xprt_quic.c
index 7ad4382..361a643 100644
--- a/src/xprt_quic.c
+++ b/src/xprt_quic.c
@@ -2751,8 +2751,7 @@
TRACE_PROTO("to send", QUIC_EV_CONN_SPPKTS, qc);
for (pkt = first_pkt; pkt; pkt = pkt->next)
quic_tx_packet_refinc(pkt);
- if (ctx->xprt->snd_buf(NULL, qc->xprt_ctx,
- &tmpbuf, tmpbuf.data, 0) <= 0) {
+ if(qc_snd_buf(qc, &tmpbuf, tmpbuf.data, 0) <= 0) {
for (pkt = first_pkt; pkt; pkt = pkt->next)
quic_tx_packet_refdec(pkt);
break;
@@ -5144,66 +5143,6 @@
}
-/* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s
- * socket. <flags> may contain some CO_SFL_* flags to hint the system about
- * other pending data for example, but this flag is ignored at the moment.
- * Only one call to send() is performed, unless the buffer wraps, in which case
- * a second call may be performed. The connection's flags are updated with
- * whatever special event is detected (error, empty). The caller is responsible
- * for taking care of those events and avoiding the call if inappropriate. The
- * function does not call the connection's polling update function, so the caller
- * is responsible for this. It's up to the caller to update the buffer's contents
- * based on the return value.
- */
-static size_t quic_conn_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
-{
- ssize_t ret;
- size_t try, done;
- int send_flag;
- struct quic_conn *qc = ((struct ssl_sock_ctx *)xprt_ctx)->qc;
-
- done = 0;
- /* send the largest possible block. For this we perform only one call
- * to send() unless the buffer wraps and we exactly fill the first hunk,
- * in which case we accept to do it once again.
- */
- while (count) {
- try = b_contig_data(buf, done);
- if (try > count)
- try = count;
-
- send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
- if (try < count || flags & CO_SFL_MSG_MORE)
- send_flag |= MSG_MORE;
-
- ret = sendto(qc->li->rx.fd, b_peek(buf, done), try, send_flag,
- (struct sockaddr *)&qc->peer_addr, get_addr_len(&qc->peer_addr));
- if (ret > 0) {
- count -= ret;
- done += ret;
-
- if (ret < try)
- break;
- }
- else if (ret == 0 || errno == EAGAIN || errno == ENOTCONN || errno == EINPROGRESS) {
- ABORT_NOW();
- }
- else if (errno != EINTR) {
- ABORT_NOW();
- }
- }
-
- if (done > 0) {
- /* we count the total bytes sent, and the send rate for 32-byte
- * blocks. The reason for the latter is that freq_ctr are
- * limited to 4GB and that it's not enough per second.
- */
- _HA_ATOMIC_ADD(&global.out_bytes, done);
- update_freq_ctr(&global.out_32bps, (done + 16) / 32);
- }
- return done;
-}
-
/* Called from the upper layer, to subscribe <es> to events <event_type>. The
* event subscriber <es> is not allowed to change from a previous call as long
* as at least one event is still subscribed. The <event_type> must only be a
@@ -5281,7 +5220,6 @@
/* transport-layer operations for QUIC connections. */
static struct xprt_ops ssl_quic = {
.close = quic_close,
- .snd_buf = quic_conn_from_buf,
.subscribe = quic_conn_subscribe,
.unsubscribe = quic_conn_unsubscribe,
.init = qc_conn_init,