Properly enforce the far end max datagram when constructing UDPTL packets
This commit is contained in:
parent
6cb3be7d8b
commit
063d59922b
|
@ -349,6 +349,8 @@ int udptl_build_packet(udptl_state_t *s, uint8_t buf[], const uint8_t msg[], int
|
||||||
int len;
|
int len;
|
||||||
int limit;
|
int limit;
|
||||||
int high_tide;
|
int high_tide;
|
||||||
|
int len_before_entries;
|
||||||
|
int previous_len;
|
||||||
|
|
||||||
/* UDPTL cannot cope with zero length messages, and our buffering for redundancy limits their
|
/* UDPTL cannot cope with zero length messages, and our buffering for redundancy limits their
|
||||||
maximum length. */
|
maximum length. */
|
||||||
|
@ -392,15 +394,26 @@ int udptl_build_packet(udptl_state_t *s, uint8_t buf[], const uint8_t msg[], int
|
||||||
entries = s->error_correction_entries;
|
entries = s->error_correction_entries;
|
||||||
else
|
else
|
||||||
entries = s->tx_seq_no;
|
entries = s->tx_seq_no;
|
||||||
|
len_before_entries = len;
|
||||||
/* The number of entries will always be small, so it is pointless allowing
|
/* The number of entries will always be small, so it is pointless allowing
|
||||||
for the fragmented case here. */
|
for the fragmented case here. */
|
||||||
if (encode_length(buf, &len, entries) < 0)
|
if (encode_length(buf, &len, entries) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
/* Encode the elements */
|
/* Encode the elements */
|
||||||
for (i = 0; i < entries; i++) {
|
for (m = 0; m < entries; m++) {
|
||||||
j = (entry - i - 1) & UDPTL_BUF_MASK;
|
previous_len = len;
|
||||||
|
j = (entry - m - 1) & UDPTL_BUF_MASK;
|
||||||
if (encode_open_type(buf, &len, s->tx[j].buf, s->tx[j].buf_len) < 0)
|
if (encode_open_type(buf, &len, s->tx[j].buf, s->tx[j].buf_len) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
/* If we have exceeded the far end's max datagram size, don't include this last chunk,
|
||||||
|
and stop trying to add more. */
|
||||||
|
if (len > s->far_max_datagram_size) {
|
||||||
|
len = previous_len;
|
||||||
|
if (encode_length(buf, &len_before_entries, m) < 0)
|
||||||
|
return -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case UDPTL_ERROR_CORRECTION_FEC:
|
case UDPTL_ERROR_CORRECTION_FEC:
|
||||||
|
@ -417,11 +430,13 @@ int udptl_build_packet(udptl_state_t *s, uint8_t buf[], const uint8_t msg[], int
|
||||||
/* Span is defined as an inconstrained integer, which it dumb. It will only
|
/* Span is defined as an inconstrained integer, which it dumb. It will only
|
||||||
ever be a small value. Treat it as such. */
|
ever be a small value. Treat it as such. */
|
||||||
buf[len++] = 1;
|
buf[len++] = 1;
|
||||||
buf[len++] = (uint8_t)span;
|
buf[len++] = (uint8_t) span;
|
||||||
|
len_before_entries = len;
|
||||||
/* The number of entries is defined as a length, but will only ever be a small
|
/* The number of entries is defined as a length, but will only ever be a small
|
||||||
value. Treat it as such. */
|
value. Treat it as such. */
|
||||||
buf[len++] = (uint8_t)entries;
|
buf[len++] = (uint8_t) entries;
|
||||||
for (m = 0; m < entries; m++) {
|
for (m = 0; m < entries; m++) {
|
||||||
|
previous_len = len;
|
||||||
/* Make an XOR'ed entry the maximum length */
|
/* Make an XOR'ed entry the maximum length */
|
||||||
limit = (entry + m) & UDPTL_BUF_MASK;
|
limit = (entry + m) & UDPTL_BUF_MASK;
|
||||||
high_tide = 0;
|
high_tide = 0;
|
||||||
|
@ -439,6 +454,14 @@ int udptl_build_packet(udptl_state_t *s, uint8_t buf[], const uint8_t msg[], int
|
||||||
}
|
}
|
||||||
if (encode_open_type(buf, &len, fec, high_tide) < 0)
|
if (encode_open_type(buf, &len, fec, high_tide) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
/* If we have exceeded the far end's max datagram size, don't include this last chunk,
|
||||||
|
and stop trying to add more. */
|
||||||
|
if (len > s->far_max_datagram_size) {
|
||||||
|
len = previous_len;
|
||||||
|
buf[len_before_entries] = (uint8_t) m;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue