l2 tap queue entries are currently initialized at system start, and reused with preset headers through its whole life time. The only fields we need to update per message are things like payload size and checksums. If we want to reuse these entries between ipv4 and ipv6 messages we will need to set the pointer to the right header on the fly per message, since the header type may differ between entries in the same queue. The same needs to be done for the ethernet header. We do these changes here. Signed-off-by: Jon Maloy <jmaloy(a)redhat.com> --- v2: Setting pointers to pre-initialized IP and MAC headers instead of copying them in on the fly. --- tcp_buf.c | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/tcp_buf.c b/tcp_buf.c index c31e9f3..af80cc5 100644 --- a/tcp_buf.c +++ b/tcp_buf.c @@ -159,8 +159,7 @@ void tcp_sock4_iov_init(const struct ctx *c) iov = tcp4_l2_iov[i]; iov[TCP_IOV_TAP] = tap_hdr_iov(c, &tcp4_payload_tap_hdr[i]); - iov[TCP_IOV_ETH] = IOV_OF_LVALUE(tcp4_eth_src); - iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp4_payload_ip[i]); + iov[TCP_IOV_ETH].iov_len = sizeof(struct ethhdr); iov[TCP_IOV_PAYLOAD].iov_base = &tcp4_payload[i]; } @@ -203,8 +202,7 @@ void tcp_sock6_iov_init(const struct ctx *c) iov = tcp6_l2_iov[i]; iov[TCP_IOV_TAP] = tap_hdr_iov(c, &tcp6_payload_tap_hdr[i]); - iov[TCP_IOV_ETH] = IOV_OF_LVALUE(tcp6_eth_src); - iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp6_payload_ip[i]); + iov[TCP_IOV_ETH].iov_len = sizeof(struct ethhdr); iov[TCP_IOV_PAYLOAD].iov_base = &tcp6_payload[i]; } @@ -303,11 +301,15 @@ int tcp_buf_send_flag(struct ctx *c, struct tcp_tap_conn *conn, int flags) uint32_t seq; int ret; - if (CONN_V4(conn)) - iov = tcp4_l2_flags_iov[tcp4_flags_used++]; - else - iov = tcp6_l2_flags_iov[tcp6_flags_used++]; - + if (CONN_V4(conn)) { + iov = tcp4_l2_flags_iov[tcp4_flags_used]; + iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp4_flags_ip[tcp4_flags_used++]); + iov[TCP_IOV_ETH].iov_base = &tcp4_eth_src; + } else { + iov = tcp6_l2_flags_iov[tcp6_flags_used]; + iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp6_flags_ip[tcp6_flags_used++]); + iov[TCP_IOV_ETH].iov_base = &tcp6_eth_src; + } payload = iov[TCP_IOV_PAYLOAD].iov_base; seq = conn->seq_to_tap; @@ -328,11 +330,15 @@ int tcp_buf_send_flag(struct ctx *c, struct tcp_tap_conn *conn, int flags) struct iovec *dup_iov; int i; - if (CONN_V4(conn)) - dup_iov = tcp4_l2_flags_iov[tcp4_flags_used++]; - else - dup_iov = tcp6_l2_flags_iov[tcp6_flags_used++]; - + if (CONN_V4(conn)) { + dup_iov = tcp4_l2_flags_iov[tcp4_flags_used]; + dup_iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp4_flags_ip[tcp4_flags_used++]); + dup_iov[TCP_IOV_ETH].iov_base = &tcp4_eth_src; + } else { + dup_iov = tcp4_l2_flags_iov[tcp6_flags_used]; + dup_iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp6_flags_ip[tcp6_flags_used++]); + dup_iov[TCP_IOV_ETH].iov_base = &tcp6_eth_src; + } for (i = 0; i < TCP_NUM_IOVS; i++) memcpy(dup_iov[i].iov_base, iov[i].iov_base, iov[i].iov_len); @@ -377,7 +383,9 @@ static void tcp_data_to_tap(struct ctx *c, struct tcp_tap_conn *conn, tcp4_frame_conns[tcp4_payload_used] = conn; - iov = tcp4_l2_iov[tcp4_payload_used++]; + iov = tcp4_l2_iov[tcp4_payload_used]; + iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp4_payload_ip[tcp4_payload_used++]); + iov[TCP_IOV_ETH].iov_base = &tcp4_eth_src; l4len = tcp_l2_buf_fill_headers(conn, iov, dlen, check, seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (tcp4_payload_used > TCP_FRAMES_MEM - 1) @@ -385,7 +393,9 @@ static void tcp_data_to_tap(struct ctx *c, struct tcp_tap_conn *conn, } else if (CONN_V6(conn)) { tcp6_frame_conns[tcp6_payload_used] = conn; - iov = tcp6_l2_iov[tcp6_payload_used++]; + iov = tcp6_l2_iov[tcp6_payload_used]; + iov[TCP_IOV_IP] = IOV_OF_LVALUE(tcp6_payload_ip[tcp6_payload_used++]); + iov[TCP_IOV_ETH].iov_base = &tcp6_eth_src; l4len = tcp_l2_buf_fill_headers(conn, iov, dlen, NULL, seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (tcp6_payload_used > TCP_FRAMES_MEM - 1) -- 2.45.2