2016-06-09 11:08:53 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016 Martine Lenders <mlenders@inf.fu-berlin.de>
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
|
|
|
* General Public License v2.1. See the file LICENSE in the top level
|
|
|
|
* directory for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @{
|
|
|
|
*
|
|
|
|
* @file
|
|
|
|
* @author Martine Lenders <mlenders@inf.fu-berlin.de>
|
|
|
|
*/
|
|
|
|
|
2020-10-21 15:58:33 +02:00
|
|
|
#include <assert.h>
|
2016-06-09 11:08:53 +02:00
|
|
|
#include <errno.h>
|
2020-01-17 17:06:09 +01:00
|
|
|
#include <stdlib.h>
|
2016-06-09 11:08:53 +02:00
|
|
|
|
2019-10-31 15:33:28 +01:00
|
|
|
#include "log.h"
|
2016-06-09 11:08:53 +02:00
|
|
|
#include "net/af.h"
|
|
|
|
#include "net/ipv6/hdr.h"
|
2018-09-28 13:02:31 +02:00
|
|
|
#include "net/gnrc/ipv6.h"
|
2016-06-09 11:08:53 +02:00
|
|
|
#include "net/gnrc/ipv6/hdr.h"
|
|
|
|
#include "net/gnrc/netreg.h"
|
sys/net/gnrc/tx_sync: new module
The new `gnrc_tx_sync` module allows users of the GNRC network stack to
synchronize with the actual transmission of outgoing packets. This is directly
integrated into gnrc_sock. Hence, if `gnrc_tx_sync` is used, calls to e.g.
sock_udp_send() will block until the network stack has processed the message.
Use cases:
1. Prevent packet drop when sending at high rate
- If the application is sending faster than the stack can handle, the
message queues will overflow and outgoing packets are lost
2. Passing auxiliary data about the transmission back the stack
- When e.g. the number of required retransmissions, the transmission time
stamp, etc. should be made available to a user of an UDP sock, a
synchronization mechanism is needed
3. Simpler error reporting without footguns
- The current approach of using `core/msg` for passing up error messages is
difficult to use if other message come in. Currently, gnrc_sock is
busy-waiting and fetching messages from the message queue until the number
of expected status reports is received. It will enqueue all
non-status-report messages again at the end of the queue. This has
multiple issues:
- Busy waiting is especially in lower power scenarios with time slotted
MAC protocols harmful, as the CPU will remain active and consume
power even though the it could sleep until the TX slot is reached
- The status reports from the network stack are send to the user thread
blocking. If the message queue of the user thread is full, the network
stack would block until the user stack can fetch the messages. If
another higher priority thread would start sending a message, it
would busy wait for its status reports to completely come in. Hence,
the first thread doesn't get CPU time to fetch messages and unblock
the network stack. As a result, the system would lock up completely.
- Just adding the error/status code to the gnrc_tx_sync_t would preallocate
and reserve memory for the error reporting. That way gnrc_sock does not
need to search through the message queue for status reports and the
network stack does not need to block for the user thread fetching it.
2020-12-23 15:46:59 +01:00
|
|
|
#include "net/gnrc/tx_sync.h"
|
2016-06-09 11:08:53 +02:00
|
|
|
#include "net/udp.h"
|
|
|
|
#include "utlist.h"
|
2022-03-21 18:17:37 +01:00
|
|
|
#if IS_USED(MODULE_ZTIMER_USEC)
|
|
|
|
#include "ztimer.h"
|
|
|
|
#endif
|
|
|
|
#if IS_USED(MODULE_XTIMER)
|
2016-06-09 11:08:53 +02:00
|
|
|
#include "xtimer.h"
|
2021-12-13 17:10:32 +01:00
|
|
|
#endif
|
2016-06-09 11:08:53 +02:00
|
|
|
|
|
|
|
#include "sock_types.h"
|
|
|
|
#include "gnrc_sock_internal.h"
|
|
|
|
|
2020-01-17 17:06:09 +01:00
|
|
|
#ifdef MODULE_FUZZING
|
|
|
|
extern gnrc_pktsnip_t *gnrc_pktbuf_fuzzptr;
|
2020-04-18 12:35:58 +02:00
|
|
|
gnrc_pktsnip_t *gnrc_sock_prevpkt = NULL;
|
2020-01-17 17:06:09 +01:00
|
|
|
#endif
|
|
|
|
|
2022-03-21 18:17:37 +01:00
|
|
|
#if IS_USED(MODULE_XTIMER) || IS_USED(MODULE_ZTIMER_USEC)
|
2016-06-09 11:08:53 +02:00
|
|
|
#define _TIMEOUT_MAGIC (0xF38A0B63U)
|
|
|
|
#define _TIMEOUT_MSG_TYPE (0x8474)
|
|
|
|
|
|
|
|
static void _callback_put(void *arg)
|
|
|
|
{
|
|
|
|
msg_t timeout_msg = { .sender_pid = KERNEL_PID_UNDEF,
|
|
|
|
.type = _TIMEOUT_MSG_TYPE,
|
|
|
|
.content = { .value = _TIMEOUT_MAGIC } };
|
|
|
|
gnrc_sock_reg_t *reg = arg;
|
|
|
|
|
|
|
|
/* should be safe, because otherwise if mbox were filled this callback is
|
|
|
|
* senseless */
|
|
|
|
mbox_try_put(®->mbox, &timeout_msg);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-10-31 15:33:28 +01:00
|
|
|
#ifdef SOCK_HAS_ASYNC
|
|
|
|
static void _netapi_cb(uint16_t cmd, gnrc_pktsnip_t *pkt, void *ctx)
|
|
|
|
{
|
|
|
|
if (cmd == GNRC_NETAPI_MSG_TYPE_RCV) {
|
|
|
|
msg_t msg = { .type = GNRC_NETAPI_MSG_TYPE_RCV,
|
|
|
|
.content = { .ptr = pkt } };
|
|
|
|
gnrc_sock_reg_t *reg = ctx;
|
|
|
|
|
|
|
|
if (mbox_try_put(®->mbox, &msg) < 1) {
|
|
|
|
LOG_WARNING("gnrc_sock: dropped message to %p (was full)\n",
|
|
|
|
(void *)®->mbox);
|
2020-05-15 18:24:42 +02:00
|
|
|
/* packet could not be delivered so it should be dropped */
|
|
|
|
gnrc_pktbuf_release(pkt);
|
|
|
|
return;
|
2019-10-31 15:33:28 +01:00
|
|
|
}
|
|
|
|
if (reg->async_cb.generic) {
|
2020-03-11 12:22:10 +01:00
|
|
|
reg->async_cb.generic(reg, SOCK_ASYNC_MSG_RECV, reg->async_cb_arg);
|
2019-10-31 15:33:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* SOCK_HAS_ASYNC */
|
|
|
|
|
2016-06-09 11:08:53 +02:00
|
|
|
void gnrc_sock_create(gnrc_sock_reg_t *reg, gnrc_nettype_t type, uint32_t demux_ctx)
|
|
|
|
{
|
2020-05-18 10:12:17 +02:00
|
|
|
mbox_init(®->mbox, reg->mbox_queue, GNRC_SOCK_MBOX_SIZE);
|
2019-10-31 15:33:28 +01:00
|
|
|
#ifdef SOCK_HAS_ASYNC
|
|
|
|
reg->async_cb.generic = NULL;
|
|
|
|
reg->netreg_cb.cb = _netapi_cb;
|
|
|
|
reg->netreg_cb.ctx = reg;
|
|
|
|
gnrc_netreg_entry_init_cb(®->entry, demux_ctx, ®->netreg_cb);
|
|
|
|
#else /* SOCK_HAS_ASYNC */
|
2016-06-09 11:08:53 +02:00
|
|
|
gnrc_netreg_entry_init_mbox(®->entry, demux_ctx, ®->mbox);
|
2019-10-31 15:33:28 +01:00
|
|
|
#endif /* SOCK_HAS_ASYNC */
|
2016-06-09 11:08:53 +02:00
|
|
|
gnrc_netreg_register(type, ®->entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t gnrc_sock_recv(gnrc_sock_reg_t *reg, gnrc_pktsnip_t **pkt_out,
|
2020-07-27 16:07:19 +02:00
|
|
|
uint32_t timeout, sock_ip_ep_t *remote,
|
2020-12-04 11:11:46 +01:00
|
|
|
gnrc_sock_recv_aux_t *aux)
|
2016-06-09 11:08:53 +02:00
|
|
|
{
|
2020-07-27 16:07:19 +02:00
|
|
|
/* only used when some sock_aux_% module is used */
|
|
|
|
(void)aux;
|
2018-09-28 13:02:31 +02:00
|
|
|
gnrc_pktsnip_t *pkt, *netif;
|
2016-06-09 11:08:53 +02:00
|
|
|
msg_t msg;
|
|
|
|
|
2020-05-26 17:10:58 +02:00
|
|
|
/* The fuzzing module is only enabled when building a fuzzing
|
|
|
|
* application from the fuzzing/ subdirectory. When using gnrc_sock
|
|
|
|
* the fuzzer assumes that gnrc_sock_recv is called in a loop. If it
|
|
|
|
* is called again and the previous return value was the special
|
|
|
|
* crafted fuzzing packet, the fuzzing application terminates.
|
|
|
|
*
|
|
|
|
* sock_async_event has its on fuzzing termination condition. */
|
2020-04-18 12:36:40 +02:00
|
|
|
#if defined(MODULE_FUZZING) && !defined(MODULE_SOCK_ASYNC_EVENT)
|
2020-04-18 12:35:58 +02:00
|
|
|
if (gnrc_sock_prevpkt && gnrc_sock_prevpkt == gnrc_pktbuf_fuzzptr) {
|
2020-01-17 17:06:09 +01:00
|
|
|
exit(EXIT_SUCCESS);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-11-20 10:33:21 +01:00
|
|
|
if (mbox_size(®->mbox) != GNRC_SOCK_MBOX_SIZE) {
|
2017-06-12 13:53:11 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2022-03-21 18:17:37 +01:00
|
|
|
#if IS_USED(MODULE_ZTIMER_USEC)
|
2022-03-24 15:23:50 +01:00
|
|
|
ztimer_t timeout_timer = { .base = { .next = NULL } };
|
2022-03-21 18:17:37 +01:00
|
|
|
if ((timeout != SOCK_NO_TIMEOUT) && (timeout != 0)) {
|
|
|
|
timeout_timer.callback = _callback_put;
|
|
|
|
timeout_timer.arg = reg;
|
|
|
|
ztimer_set(ZTIMER_USEC, &timeout_timer, timeout);
|
|
|
|
}
|
|
|
|
#elif IS_USED(MODULE_XTIMER)
|
2022-03-24 15:23:50 +01:00
|
|
|
xtimer_t timeout_timer = { .callback = NULL };
|
2016-06-09 11:08:53 +02:00
|
|
|
|
2021-09-09 15:47:31 +02:00
|
|
|
/* xtimer_spin would make this never receive anything.
|
|
|
|
* Avoid that by setting the minimal not spinning timeout. */
|
|
|
|
if (timeout < XTIMER_BACKOFF && timeout > 0) {
|
|
|
|
timeout = XTIMER_BACKOFF;
|
|
|
|
}
|
|
|
|
|
2016-06-09 11:08:53 +02:00
|
|
|
if ((timeout != SOCK_NO_TIMEOUT) && (timeout != 0)) {
|
|
|
|
timeout_timer.callback = _callback_put;
|
|
|
|
timeout_timer.arg = reg;
|
|
|
|
xtimer_set(&timeout_timer, timeout);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (timeout != 0) {
|
2022-02-23 15:53:57 +01:00
|
|
|
#if defined(DEVELHELP) && IS_ACTIVE(SOCK_HAS_ASYNC)
|
|
|
|
if (reg->async_cb.generic) {
|
|
|
|
/* this warning is a false positive when sock_*_recv() was not called from
|
|
|
|
* the asynchronous handler */
|
|
|
|
LOG_WARNING("gnrc_sock: timeout != 0 within the asynchronous callback lead "
|
|
|
|
"to unexpected delays within the asynchronous handler.\n");
|
|
|
|
}
|
|
|
|
#endif
|
2016-06-09 11:08:53 +02:00
|
|
|
mbox_get(®->mbox, &msg);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (!mbox_try_get(®->mbox, &msg)) {
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
}
|
2022-03-21 18:17:37 +01:00
|
|
|
#if IS_USED(MODULE_ZTIMER_USEC)
|
|
|
|
ztimer_remove(ZTIMER_USEC, &timeout_timer);
|
|
|
|
#elif IS_USED(MODULE_XTIMER)
|
2016-06-09 11:08:53 +02:00
|
|
|
xtimer_remove(&timeout_timer);
|
|
|
|
#endif
|
|
|
|
switch (msg.type) {
|
|
|
|
case GNRC_NETAPI_MSG_TYPE_RCV:
|
|
|
|
pkt = msg.content.ptr;
|
|
|
|
break;
|
2022-03-21 18:17:37 +01:00
|
|
|
#if IS_USED(MODULE_XTIMER) || IS_USED(MODULE_ZTIMER_USEC)
|
2016-06-09 11:08:53 +02:00
|
|
|
case _TIMEOUT_MSG_TYPE:
|
|
|
|
if (msg.content.value == _TIMEOUT_MAGIC) {
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
#endif
|
2017-07-25 21:12:30 +02:00
|
|
|
/* Falls Through. */
|
2016-06-09 11:08:53 +02:00
|
|
|
default:
|
2017-06-12 13:53:11 +02:00
|
|
|
return -EINVAL;
|
2016-06-09 11:08:53 +02:00
|
|
|
}
|
|
|
|
/* TODO: discern NETTYPE from remote->family (set in caller), when IPv4
|
|
|
|
* was implemented */
|
2018-09-28 13:02:31 +02:00
|
|
|
ipv6_hdr_t *ipv6_hdr = gnrc_ipv6_get_header(pkt);
|
|
|
|
assert(ipv6_hdr != NULL);
|
2016-06-09 11:08:53 +02:00
|
|
|
memcpy(&remote->addr, &ipv6_hdr->src, sizeof(ipv6_addr_t));
|
|
|
|
remote->family = AF_INET6;
|
2020-07-27 16:07:19 +02:00
|
|
|
#if IS_USED(MODULE_SOCK_AUX_LOCAL)
|
2020-12-04 11:11:46 +01:00
|
|
|
if (aux->local != NULL) {
|
|
|
|
memcpy(&aux->local->addr, &ipv6_hdr->dst, sizeof(ipv6_addr_t));
|
|
|
|
aux->local->family = AF_INET6;
|
2020-07-27 16:07:19 +02:00
|
|
|
}
|
|
|
|
#endif /* MODULE_SOCK_AUX_LOCAL */
|
2016-06-09 11:08:53 +02:00
|
|
|
netif = gnrc_pktsnip_search_type(pkt, GNRC_NETTYPE_NETIF);
|
|
|
|
if (netif == NULL) {
|
|
|
|
remote->netif = SOCK_ADDR_ANY_NETIF;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
gnrc_netif_hdr_t *netif_hdr = netif->data;
|
|
|
|
/* TODO: use API in #5511 */
|
|
|
|
remote->netif = (uint16_t)netif_hdr->if_pid;
|
2020-12-04 11:11:46 +01:00
|
|
|
#if IS_USED(MODULE_SOCK_AUX_TIMESTAMP)
|
|
|
|
if (aux->timestamp != NULL) {
|
|
|
|
if (gnrc_netif_hdr_get_timestamp(netif_hdr, aux->timestamp) == 0) {
|
|
|
|
aux->flags |= GNRC_SOCK_RECV_AUX_FLAG_TIMESTAMP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* MODULE_SOCK_AUX_TIMESTAMP */
|
2021-01-12 21:59:09 +01:00
|
|
|
#if IS_USED(MODULE_SOCK_AUX_RSSI)
|
|
|
|
if ((aux->rssi) && (netif_hdr->rssi != GNRC_NETIF_HDR_NO_RSSI)) {
|
|
|
|
aux->flags |= GNRC_SOCK_RECV_AUX_FLAG_RSSI;
|
|
|
|
*aux->rssi = netif_hdr->rssi;
|
|
|
|
}
|
|
|
|
#endif /* MODULE_SOCK_AUX_RSSI */
|
2016-06-09 11:08:53 +02:00
|
|
|
}
|
|
|
|
*pkt_out = pkt; /* set out parameter */
|
2020-01-17 17:06:09 +01:00
|
|
|
|
2020-05-12 14:01:30 +02:00
|
|
|
#if IS_ACTIVE(SOCK_HAS_ASYNC)
|
2020-11-20 10:33:21 +01:00
|
|
|
if (reg->async_cb.generic && mbox_avail(®->mbox)) {
|
2020-05-12 14:01:30 +02:00
|
|
|
reg->async_cb.generic(reg, SOCK_ASYNC_MSG_RECV, reg->async_cb_arg);
|
|
|
|
}
|
|
|
|
#endif
|
2020-01-17 17:06:09 +01:00
|
|
|
#ifdef MODULE_FUZZING
|
2020-04-18 12:35:58 +02:00
|
|
|
gnrc_sock_prevpkt = pkt;
|
2020-01-17 17:06:09 +01:00
|
|
|
#endif
|
|
|
|
|
2016-06-09 11:08:53 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t gnrc_sock_send(gnrc_pktsnip_t *payload, sock_ip_ep_t *local,
|
|
|
|
const sock_ip_ep_t *remote, uint8_t nh)
|
|
|
|
{
|
|
|
|
gnrc_pktsnip_t *pkt;
|
|
|
|
kernel_pid_t iface = KERNEL_PID_UNDEF;
|
|
|
|
gnrc_nettype_t type;
|
|
|
|
size_t payload_len = gnrc_pkt_len(payload);
|
2019-11-10 12:45:48 +01:00
|
|
|
#ifdef MODULE_GNRC_NETERR
|
|
|
|
unsigned status_subs = 0;
|
|
|
|
#endif
|
sys/net/gnrc/tx_sync: new module
The new `gnrc_tx_sync` module allows users of the GNRC network stack to
synchronize with the actual transmission of outgoing packets. This is directly
integrated into gnrc_sock. Hence, if `gnrc_tx_sync` is used, calls to e.g.
sock_udp_send() will block until the network stack has processed the message.
Use cases:
1. Prevent packet drop when sending at high rate
- If the application is sending faster than the stack can handle, the
message queues will overflow and outgoing packets are lost
2. Passing auxiliary data about the transmission back the stack
- When e.g. the number of required retransmissions, the transmission time
stamp, etc. should be made available to a user of an UDP sock, a
synchronization mechanism is needed
3. Simpler error reporting without footguns
- The current approach of using `core/msg` for passing up error messages is
difficult to use if other message come in. Currently, gnrc_sock is
busy-waiting and fetching messages from the message queue until the number
of expected status reports is received. It will enqueue all
non-status-report messages again at the end of the queue. This has
multiple issues:
- Busy waiting is especially in lower power scenarios with time slotted
MAC protocols harmful, as the CPU will remain active and consume
power even though the it could sleep until the TX slot is reached
- The status reports from the network stack are send to the user thread
blocking. If the message queue of the user thread is full, the network
stack would block until the user stack can fetch the messages. If
another higher priority thread would start sending a message, it
would busy wait for its status reports to completely come in. Hence,
the first thread doesn't get CPU time to fetch messages and unblock
the network stack. As a result, the system would lock up completely.
- Just adding the error/status code to the gnrc_tx_sync_t would preallocate
and reserve memory for the error reporting. That way gnrc_sock does not
need to search through the message queue for status reports and the
network stack does not need to block for the user thread fetching it.
2020-12-23 15:46:59 +01:00
|
|
|
#if IS_USED(MODULE_GNRC_TX_SYNC)
|
|
|
|
gnrc_tx_sync_t tx_sync;
|
|
|
|
#endif
|
2016-06-09 11:08:53 +02:00
|
|
|
|
|
|
|
if (local->family != remote->family) {
|
|
|
|
gnrc_pktbuf_release(payload);
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
}
|
sys/net/gnrc/tx_sync: new module
The new `gnrc_tx_sync` module allows users of the GNRC network stack to
synchronize with the actual transmission of outgoing packets. This is directly
integrated into gnrc_sock. Hence, if `gnrc_tx_sync` is used, calls to e.g.
sock_udp_send() will block until the network stack has processed the message.
Use cases:
1. Prevent packet drop when sending at high rate
- If the application is sending faster than the stack can handle, the
message queues will overflow and outgoing packets are lost
2. Passing auxiliary data about the transmission back the stack
- When e.g. the number of required retransmissions, the transmission time
stamp, etc. should be made available to a user of an UDP sock, a
synchronization mechanism is needed
3. Simpler error reporting without footguns
- The current approach of using `core/msg` for passing up error messages is
difficult to use if other message come in. Currently, gnrc_sock is
busy-waiting and fetching messages from the message queue until the number
of expected status reports is received. It will enqueue all
non-status-report messages again at the end of the queue. This has
multiple issues:
- Busy waiting is especially in lower power scenarios with time slotted
MAC protocols harmful, as the CPU will remain active and consume
power even though the it could sleep until the TX slot is reached
- The status reports from the network stack are send to the user thread
blocking. If the message queue of the user thread is full, the network
stack would block until the user stack can fetch the messages. If
another higher priority thread would start sending a message, it
would busy wait for its status reports to completely come in. Hence,
the first thread doesn't get CPU time to fetch messages and unblock
the network stack. As a result, the system would lock up completely.
- Just adding the error/status code to the gnrc_tx_sync_t would preallocate
and reserve memory for the error reporting. That way gnrc_sock does not
need to search through the message queue for status reports and the
network stack does not need to block for the user thread fetching it.
2020-12-23 15:46:59 +01:00
|
|
|
|
|
|
|
#if IS_USED(MODULE_GNRC_TX_SYNC)
|
|
|
|
if (gnrc_tx_sync_append(payload, &tx_sync)) {
|
|
|
|
gnrc_pktbuf_release(payload);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-06-09 11:08:53 +02:00
|
|
|
switch (local->family) {
|
|
|
|
#ifdef SOCK_HAS_IPV6
|
|
|
|
case AF_INET6: {
|
|
|
|
ipv6_hdr_t *hdr;
|
|
|
|
pkt = gnrc_ipv6_hdr_build(payload, (ipv6_addr_t *)&local->addr.ipv6,
|
|
|
|
(ipv6_addr_t *)&remote->addr.ipv6);
|
|
|
|
if (pkt == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
if (payload->type == GNRC_NETTYPE_UNDEF) {
|
|
|
|
payload->type = GNRC_NETTYPE_IPV6;
|
|
|
|
type = GNRC_NETTYPE_IPV6;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
type = payload->type;
|
|
|
|
}
|
|
|
|
hdr = pkt->data;
|
|
|
|
hdr->nh = nh;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
(void)nh;
|
|
|
|
gnrc_pktbuf_release(payload);
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
}
|
|
|
|
if (local->netif != SOCK_ADDR_ANY_NETIF) {
|
|
|
|
/* TODO: use API in #5511 */
|
|
|
|
iface = (kernel_pid_t)local->netif;
|
|
|
|
}
|
|
|
|
else if (remote->netif != SOCK_ADDR_ANY_NETIF) {
|
|
|
|
/* TODO: use API in #5511 */
|
|
|
|
iface = (kernel_pid_t)remote->netif;
|
|
|
|
}
|
|
|
|
if (iface != KERNEL_PID_UNDEF) {
|
|
|
|
gnrc_pktsnip_t *netif = gnrc_netif_hdr_build(NULL, 0, NULL, 0);
|
|
|
|
gnrc_netif_hdr_t *netif_hdr;
|
|
|
|
|
|
|
|
if (netif == NULL) {
|
|
|
|
gnrc_pktbuf_release(pkt);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
netif_hdr = netif->data;
|
|
|
|
netif_hdr->if_pid = iface;
|
2020-10-12 15:51:30 +02:00
|
|
|
pkt = gnrc_pkt_prepend(pkt, netif);
|
2016-06-09 11:08:53 +02:00
|
|
|
}
|
|
|
|
#ifdef MODULE_GNRC_NETERR
|
2019-11-10 12:45:48 +01:00
|
|
|
/* cppcheck-suppress uninitvar
|
|
|
|
* (reason: pkt is initialized in AF_INET6 case above, otherwise function
|
|
|
|
* will return early) */
|
|
|
|
for (gnrc_pktsnip_t *ptr = pkt; ptr != NULL; ptr = ptr->next) {
|
|
|
|
/* no error should occur since pkt was created here */
|
|
|
|
gnrc_neterr_reg(ptr);
|
|
|
|
status_subs++;
|
|
|
|
}
|
2016-06-09 11:08:53 +02:00
|
|
|
#endif
|
|
|
|
if (!gnrc_netapi_dispatch_send(type, GNRC_NETREG_DEMUX_CTX_ALL, pkt)) {
|
|
|
|
/* this should not happen, but just in case */
|
|
|
|
gnrc_pktbuf_release(pkt);
|
|
|
|
return -EBADMSG;
|
|
|
|
}
|
sys/net/gnrc/tx_sync: new module
The new `gnrc_tx_sync` module allows users of the GNRC network stack to
synchronize with the actual transmission of outgoing packets. This is directly
integrated into gnrc_sock. Hence, if `gnrc_tx_sync` is used, calls to e.g.
sock_udp_send() will block until the network stack has processed the message.
Use cases:
1. Prevent packet drop when sending at high rate
- If the application is sending faster than the stack can handle, the
message queues will overflow and outgoing packets are lost
2. Passing auxiliary data about the transmission back the stack
- When e.g. the number of required retransmissions, the transmission time
stamp, etc. should be made available to a user of an UDP sock, a
synchronization mechanism is needed
3. Simpler error reporting without footguns
- The current approach of using `core/msg` for passing up error messages is
difficult to use if other message come in. Currently, gnrc_sock is
busy-waiting and fetching messages from the message queue until the number
of expected status reports is received. It will enqueue all
non-status-report messages again at the end of the queue. This has
multiple issues:
- Busy waiting is especially in lower power scenarios with time slotted
MAC protocols harmful, as the CPU will remain active and consume
power even though the it could sleep until the TX slot is reached
- The status reports from the network stack are send to the user thread
blocking. If the message queue of the user thread is full, the network
stack would block until the user stack can fetch the messages. If
another higher priority thread would start sending a message, it
would busy wait for its status reports to completely come in. Hence,
the first thread doesn't get CPU time to fetch messages and unblock
the network stack. As a result, the system would lock up completely.
- Just adding the error/status code to the gnrc_tx_sync_t would preallocate
and reserve memory for the error reporting. That way gnrc_sock does not
need to search through the message queue for status reports and the
network stack does not need to block for the user thread fetching it.
2020-12-23 15:46:59 +01:00
|
|
|
|
|
|
|
#if IS_USED(MODULE_GNRC_TX_SYNC)
|
|
|
|
gnrc_tx_sync(&tx_sync);
|
|
|
|
#endif
|
|
|
|
|
2016-06-09 11:08:53 +02:00
|
|
|
#ifdef MODULE_GNRC_NETERR
|
2019-11-10 12:45:48 +01:00
|
|
|
uint32_t last_status = GNRC_NETERR_SUCCESS;
|
|
|
|
|
|
|
|
while (status_subs--) {
|
|
|
|
msg_t err_report;
|
|
|
|
err_report.type = 0;
|
2016-06-09 11:08:53 +02:00
|
|
|
|
2019-11-10 12:45:48 +01:00
|
|
|
while (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
|
|
|
msg_try_receive(&err_report);
|
|
|
|
if (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
2020-08-23 21:25:54 +02:00
|
|
|
msg_try_send(&err_report, thread_getpid());
|
2019-11-10 12:45:48 +01:00
|
|
|
}
|
2016-06-09 11:08:53 +02:00
|
|
|
}
|
2019-11-10 12:45:48 +01:00
|
|
|
if (err_report.content.value != last_status) {
|
|
|
|
int res = (int)(-err_report.content.value);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < status_subs; i++) {
|
|
|
|
err_report.type = 0;
|
|
|
|
/* remove remaining status reports from queue */
|
|
|
|
while (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
|
|
|
msg_try_receive(&err_report);
|
|
|
|
if (err_report.type != GNRC_NETERR_MSG_TYPE) {
|
2020-08-23 21:25:54 +02:00
|
|
|
msg_try_send(&err_report, thread_getpid());
|
2019-11-10 12:45:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
last_status = err_report.content.value;
|
2016-06-09 11:08:53 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return payload_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** @} */
|