27 #ifdef GRPC_LINUX_ERRQUEUE
28 #include <netinet/in.h>
37 void fill_gpr_from_timestamp(
gpr_timespec* gts,
const struct timespec* ts) {
43 void default_timestamps_callback(
void* , Timestamps* ,
50 void (*timestamps_callback)(
void*, Timestamps*,
52 default_timestamps_callback;
57 T read_unaligned(
const void*
ptr) {
64 void extract_opt_stats_from_tcp_info(ConnectionMetrics* metrics,
65 const tcp_info* info) {
66 if (info ==
nullptr) {
69 if (info->length > offsetof(tcp_info, tcpi_sndbuf_limited)) {
70 metrics->recurring_retrans.emplace(info->tcpi_retransmits);
71 metrics->is_delivery_rate_app_limited.emplace(
72 info->tcpi_delivery_rate_app_limited);
73 metrics->congestion_window.emplace(info->tcpi_snd_cwnd);
74 metrics->reordering.emplace(info->tcpi_reordering);
75 metrics->packet_retx.emplace(info->tcpi_total_retrans);
76 metrics->pacing_rate.emplace(info->tcpi_pacing_rate);
77 metrics->data_notsent.emplace(info->tcpi_notsent_bytes);
79 metrics->min_rtt.emplace(info->tcpi_min_rtt);
81 metrics->packet_sent.emplace(info->tcpi_data_segs_out);
82 metrics->delivery_rate.emplace(info->tcpi_delivery_rate);
83 metrics->busy_usec.emplace(info->tcpi_busy_time);
84 metrics->rwnd_limited_usec.emplace(info->tcpi_rwnd_limited);
85 metrics->sndbuf_limited_usec.emplace(info->tcpi_sndbuf_limited);
87 if (info->length > offsetof(tcp_info, tcpi_dsack_dups)) {
88 metrics->data_sent.emplace(info->tcpi_bytes_sent);
89 metrics->data_retx.emplace(info->tcpi_bytes_retrans);
90 metrics->packet_spurious_retx.emplace(info->tcpi_dsack_dups);
96 void extract_opt_stats_from_cmsg(ConnectionMetrics* metrics,
97 const cmsghdr* opt_stats) {
98 if (opt_stats ==
nullptr) {
101 const auto*
data = CMSG_DATA(opt_stats);
102 constexpr
int64_t cmsg_hdr_len = CMSG_ALIGN(
sizeof(
struct cmsghdr));
103 const int64_t len = opt_stats->cmsg_len - cmsg_hdr_len;
107 const auto*
attr =
reinterpret_cast<const nlattr*
>(
data +
offset);
109 switch (
attr->nla_type) {
111 metrics->busy_usec.emplace(read_unaligned<uint64_t>(val));
114 case TCP_NLA_RWND_LIMITED: {
115 metrics->rwnd_limited_usec.emplace(read_unaligned<uint64_t>(val));
118 case TCP_NLA_SNDBUF_LIMITED: {
119 metrics->sndbuf_limited_usec.emplace(read_unaligned<uint64_t>(val));
122 case TCP_NLA_PACING_RATE: {
123 metrics->pacing_rate.emplace(read_unaligned<uint64_t>(val));
126 case TCP_NLA_DELIVERY_RATE: {
127 metrics->delivery_rate.emplace(read_unaligned<uint64_t>(val));
130 case TCP_NLA_DELIVERY_RATE_APP_LMT: {
131 metrics->is_delivery_rate_app_limited.emplace(
132 read_unaligned<uint8_t>(val));
135 case TCP_NLA_SND_CWND: {
136 metrics->congestion_window.emplace(read_unaligned<uint32_t>(val));
139 case TCP_NLA_MIN_RTT: {
140 metrics->min_rtt.emplace(read_unaligned<uint32_t>(val));
144 metrics->srtt.emplace(read_unaligned<uint32_t>(val));
147 case TCP_NLA_RECUR_RETRANS: {
148 metrics->recurring_retrans.emplace(read_unaligned<uint8_t>(val));
151 case TCP_NLA_BYTES_SENT: {
152 metrics->data_sent.emplace(read_unaligned<uint64_t>(val));
155 case TCP_NLA_DATA_SEGS_OUT: {
156 metrics->packet_sent.emplace(read_unaligned<uint64_t>(val));
159 case TCP_NLA_TOTAL_RETRANS: {
160 metrics->packet_retx.emplace(read_unaligned<uint64_t>(val));
163 case TCP_NLA_DELIVERED: {
164 metrics->packet_delivered.emplace(read_unaligned<uint32_t>(val));
167 case TCP_NLA_DELIVERED_CE: {
168 metrics->packet_delivered_ce.emplace(read_unaligned<uint32_t>(val));
171 case TCP_NLA_BYTES_RETRANS: {
172 metrics->data_retx.emplace(read_unaligned<uint64_t>(val));
175 case TCP_NLA_DSACK_DUPS: {
176 metrics->packet_spurious_retx.emplace(read_unaligned<uint32_t>(val));
179 case TCP_NLA_REORDERING: {
180 metrics->reordering.emplace(read_unaligned<uint32_t>(val));
183 case TCP_NLA_SND_SSTHRESH: {
184 metrics->snd_ssthresh.emplace(read_unaligned<uint32_t>(val));
192 int get_socket_tcp_info(tcp_info* info,
int fd) {
193 memset(info, 0,
sizeof(*info));
194 info->length = offsetof(tcp_info,
length);
195 return getsockopt(fd, IPPROTO_TCP, TCP_INFO, info, &(info->length));
199 void TracedBuffer::AddNewEntry(TracedBuffer** head,
uint32_t seq_no,
int fd,
202 TracedBuffer* new_elem =
new TracedBuffer(seq_no,
arg);
209 if (get_socket_tcp_info(&new_elem->ts_.info, fd) == 0) {
210 extract_opt_stats_from_tcp_info(&new_elem->ts_.sendmsg_time.metrics,
211 &new_elem->ts_.info);
213 if (*head ==
nullptr) {
218 TracedBuffer*
ptr = *head;
219 while (
ptr->next_ !=
nullptr) {
222 ptr->next_ = new_elem;
225 void TracedBuffer::ProcessTimestamp(TracedBuffer** head,
226 struct sock_extended_err* serr,
227 struct cmsghdr* opt_stats,
228 struct scm_timestamping* tss) {
230 TracedBuffer*
elem = *head;
231 TracedBuffer*
next =
nullptr;
232 while (
elem !=
nullptr) {
235 if (serr->ee_data >=
elem->seq_no_) {
236 switch (serr->ee_info) {
237 case SCM_TSTAMP_SCHED:
238 fill_gpr_from_timestamp(&(
elem->ts_.scheduled_time.time),
240 extract_opt_stats_from_cmsg(&(
elem->ts_.scheduled_time.metrics),
245 fill_gpr_from_timestamp(&(
elem->ts_.sent_time.time), &(tss->ts[0]));
246 extract_opt_stats_from_cmsg(&(
elem->ts_.sent_time.metrics),
251 fill_gpr_from_timestamp(&(
elem->ts_.acked_time.time), &(tss->ts[0]));
252 extract_opt_stats_from_cmsg(&(
elem->ts_.acked_time.metrics),
259 delete static_cast<TracedBuffer*
>(
elem);
274 TracedBuffer*
elem = *head;
275 while (
elem !=
nullptr) {
276 timestamps_callback(
elem->arg_, &(
elem->ts_), shutdown_err);
282 if (remaining !=
nullptr) {
283 timestamps_callback(remaining,
nullptr, shutdown_err);
290 timestamps_callback =
fn;