1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2025-01-18 12:52:44 +01:00

sys/net/gnrc/netif: provide support for confirm_send()

This adds support for netdevs implementing the new API that provides
`netdev_driver_t::confirm_send()`. This allows implementing netdevs
in an event based non-blocking fashion, making live of driver
developers a bit easier. In addition, `gnrc_tx_sync` will now throttle
users of `sock_udp_send()` so that they can only send datagrams as
fast as the network stack and hardware is able to send out.

Finally, this lays the groundwork to fetch TX statistics (such as
TX timestamps, reception of layer 2 ACKs/NACKs, etc.) from the network
devices.
This commit is contained in:
Marian Buschsieweke 2022-08-09 15:07:23 +02:00
parent 777e148bc2
commit 1cb6f9e11a
No known key found for this signature in database
GPG Key ID: CB8E3238CE715A94
3 changed files with 156 additions and 54 deletions

View File

@ -118,7 +118,7 @@ typedef struct {
const gnrc_netif_ops_t *ops; /**< Operations of the network interface */
netdev_t *dev; /**< Network device of the network interface */
rmutex_t mutex; /**< Mutex of the interface */
#ifdef MODULE_NETSTATS_L2
#if IS_USED(MODULE_NETSTATS_L2) || defined(DOXYGEN)
netstats_t stats; /**< transceiver's statistics */
#endif
#if IS_USED(MODULE_GNRC_NETIF_LORAWAN) || defined(DOXYGEN)
@ -147,6 +147,22 @@ typedef struct {
* @brief ISR event for the network device
*/
event_t event_isr;
#if IS_USED(MODULE_NETDEV_NEW_API) || defined(DOXYGEN)
/**
* @brief TX done event for the network device
*
* @details Only provided with module `netdev_new_api`
*/
event_t event_tx_done;
/**
* @brief Outgoing frame that is currently transmitted
*
* @details Only provided with module `netdev_new_api`
*
* This needs to be freed by gnrc_netif once TX is done
*/
gnrc_pktsnip_t *tx_pkt;
#endif
#if (GNRC_NETIF_L2ADDR_MAXLEN > 0) || DOXYGEN
/**
* @brief The link-layer address currently used as the source address

View File

@ -90,6 +90,12 @@ enum {
*/
#define GNRC_NETIF_FLAGS_IPV6_ADV_O_FLAG (0x00000080U)
/**
* @brief Used when module gnrc_netif_pktq is used to indicate that
* @ref gnrc_netif_t::tx_pkt is from the packet queue.
*/
#define GNRC_NETIF_FLAGS_TX_FROM_PKTQUEUE (0x00000100U)
/**
* @brief This interface uses 6Lo header compression
*

View File

@ -1728,53 +1728,14 @@ static void _send_queued_pkt(gnrc_netif_t *netif)
#endif /* IS_USED(MODULE_GNRC_NETIF_PKTQ) */
}
static void _send(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt, bool push_back)
static void _tx_done(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt,
gnrc_pktsnip_t *tx_sync , int res, bool push_back)
{
(void)push_back; /* only used with IS_USED(MODULE_GNRC_NETIF_PKTQ) */
int res;
#if IS_USED(MODULE_GNRC_NETIF_PKTQ)
/* send queued packets first to keep order */
if (!push_back && !gnrc_netif_pktq_empty(netif)) {
int put_res;
uint32_t err = (res < 0) ? -res : GNRC_NETERR_SUCCESS;
put_res = gnrc_netif_pktq_put(netif, pkt);
if (put_res == 0) {
DEBUG("gnrc_netif: (re-)queued pkt %p\n", (void *)pkt);
_send_queued_pkt(netif);
return;
}
else {
LOG_WARNING("gnrc_netif: can't queue packet for sending\n");
/* try to send anyway */
}
}
/* hold in case device was busy to not having to rewrite *all* the link
* layer implementations in case `gnrc_netif_pktq` is included */
gnrc_pktbuf_hold(pkt, 1);
#endif /* IS_USED(MODULE_GNRC_NETIF_PKTQ) */
/* Record send in neighbor statistics if destination is unicast */
if (IS_USED(MODULE_NETSTATS_NEIGHBOR)) {
gnrc_netif_hdr_t *netif_hdr = pkt->data;
if (netif_hdr->flags &
(GNRC_NETIF_HDR_FLAGS_BROADCAST | GNRC_NETIF_HDR_FLAGS_MULTICAST)) {
DEBUG("l2 stats: Destination is multicast or unicast, NULL recorded\n");
netstats_nb_record(&netif->netif, NULL, 0);
} else {
DEBUG("l2 stats: recording transmission\n");
netstats_nb_record(&netif->netif,
gnrc_netif_hdr_get_dst_addr(netif_hdr),
netif_hdr->dst_l2addr_len);
}
}
/* Split off the TX sync snip */
gnrc_pktsnip_t *tx_sync = IS_USED(MODULE_GNRC_TX_SYNC)
? gnrc_tx_sync_split(pkt) : NULL;
res = netif->ops->send(netif, pkt);
if (tx_sync != NULL) {
uint32_t err = (res < 0) ? -res : GNRC_NETERR_SUCCESS;
gnrc_pktbuf_release_error(tx_sync, err);
}
@ -1826,6 +1787,114 @@ static void _send(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt, bool push_back)
gnrc_pktbuf_release(pkt);
}
#endif /* IS_USED(MODULE_GNRC_NETIF_PKTQ) */
if (gnrc_netif_netdev_new_api(netif)) {
/* with new netdev (with confirm_send), the netif remains owner of the
* pkt and is in charge of releasing it once TX is completed */
gnrc_pktbuf_release_error(pkt, err);
}
}
#if IS_USED(MODULE_NETDEV_NEW_API)
/**
* @brief Call the confirm_send handler from an event
*
* @param[in] evp pointer to the event
*/
static void _event_handler_tx_done(event_t *evp)
{
gnrc_netif_t *netif = container_of(evp, gnrc_netif_t, event_tx_done);
int res = netif->dev->driver->confirm_send(netif->dev, NULL);
/* after confirm_send() is called, the device is ready to send the next
* frame. So clear netif->tx_pkt to signal readiness */
gnrc_pktsnip_t *pkt = netif->tx_pkt;
netif->tx_pkt = NULL;
bool push_back = netif->flags & GNRC_NETIF_FLAGS_TX_FROM_PKTQUEUE;
netif->flags &= ~GNRC_NETIF_FLAGS_TX_FROM_PKTQUEUE;
_tx_done(netif, pkt, NULL, res, push_back);
}
#endif
static void _send(gnrc_netif_t *netif, gnrc_pktsnip_t *pkt, bool push_back)
{
#if IS_USED(MODULE_NETDEV_NEW_API)
if (netif->tx_pkt != NULL) {
/* Upper layer is handing out frames faster than hardware can transmit.
* Note that not only doesn't it make sense to bother the driver if it
* is still busy, but overwriting netif->tx_pkt would leak the memory
* of the current sent frame.
*
* Also note: If gnrc_netif_pktq is used, it will (try to) queue the
* outgoing frame to send it later on.
* */
_tx_done(netif, pkt, NULL, -EBUSY, push_back);
return;
}
#endif
#if IS_USED(MODULE_GNRC_NETIF_PKTQ)
/* send queued packets first to keep order */
if (!push_back && !gnrc_netif_pktq_empty(netif)) {
int put_res;
/* try to send pkt from queue first. At least with the legacy blocking
* API, this may make room in the pktqueue */
_send_queued_pkt(netif);
put_res = gnrc_netif_pktq_put(netif, pkt);
if (put_res == 0) {
DEBUG("gnrc_netif: (re-)queued pkt %p\n", (void *)pkt);
return;
}
else {
LOG_WARNING("gnrc_netif: can't queue packet for sending\n");
/* try to send anyway */
}
}
/* hold in case device was busy to not having to rewrite *all* the link
* layer implementations in case `gnrc_netif_pktq` is included */
gnrc_pktbuf_hold(pkt, 1);
#endif /* IS_USED(MODULE_GNRC_NETIF_PKTQ) */
/* Record send in neighbor statistics if destination is unicast */
if (IS_USED(MODULE_NETSTATS_NEIGHBOR)) {
gnrc_netif_hdr_t *netif_hdr = pkt->data;
if (netif_hdr->flags &
(GNRC_NETIF_HDR_FLAGS_BROADCAST | GNRC_NETIF_HDR_FLAGS_MULTICAST)) {
DEBUG("l2 stats: Destination is multicast or unicast, NULL recorded\n");
netstats_nb_record(&netif->netif, NULL, 0);
} else {
DEBUG("l2 stats: recording transmission\n");
netstats_nb_record(&netif->netif,
gnrc_netif_hdr_get_dst_addr(netif_hdr),
netif_hdr->dst_l2addr_len);
}
}
/* Split off the TX sync snip */
gnrc_pktsnip_t *tx_sync = IS_USED(MODULE_GNRC_TX_SYNC)
? gnrc_tx_sync_split(pkt) : NULL;
int res = netif->ops->send(netif, pkt);
/* For legacy netdevs (no confirm_send) TX is blocking, thus it is always
* completed. For new netdevs (with confirm_send), TX is async. It is only
* done if TX failed right away (res < 0).
*/
if (gnrc_netif_netdev_legacy_api(netif) || (res < 0)) {
_tx_done(netif, pkt, tx_sync, res, push_back);
}
#if IS_USED(MODULE_NETDEV_NEW_API)
else {
/* new API *and* send() was a success --> block netif and memorize
* frame to free memory later */
netif->tx_pkt = pkt;
}
gnrc_pkt_append(pkt, tx_sync);
if (IS_USED(MODULE_GNRC_NETIF_PKTQ) && push_back) {
netif->flags |= GNRC_NETIF_FLAGS_TX_FROM_PKTQUEUE;
}
#endif
}
static void *_gnrc_netif_thread(void *args)
@ -1842,7 +1911,10 @@ static void *_gnrc_netif_thread(void *args)
gnrc_netif_acquire(netif);
netif->pid = thread_getpid();
netif->event_isr.handler = _event_handler_isr,
netif->event_isr.handler = _event_handler_isr;
#if IS_USED(MODULE_NETDEV_NEW_API)
netif->event_tx_done.handler = _event_handler_tx_done;
#endif
/* set up the event queue */
event_queue_init(&netif->evq);
@ -1954,11 +2026,17 @@ static void _pass_on_packet(gnrc_pktsnip_t *pkt)
static void _event_cb(netdev_t *dev, netdev_event_t event)
{
gnrc_netif_t *netif = (gnrc_netif_t *) dev->context;
gnrc_netif_t *netif = (gnrc_netif_t *)dev->context;
if (event == NETDEV_EVENT_ISR) {
event_post(&netif->evq, &netif->event_isr);
}
#if IS_USED(MODULE_NETDEV_NEW_API)
else if (gnrc_netif_netdev_new_api(netif)
&& (event == NETDEV_EVENT_TX_COMPLETE)) {
event_post(&netif->evq, &netif->event_tx_done);
}
#endif
else {
DEBUG("gnrc_netif: event triggered -> %i\n", event);
gnrc_pktsnip_t *pkt = NULL;
@ -1974,7 +2052,8 @@ static void _event_cb(netdev_t *dev, netdev_event_t event)
_pass_on_packet(pkt);
}
break;
#if IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ)
#if IS_USED(MODULE_NETDEV_LEGACY_API)
# if IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ)
case NETDEV_EVENT_TX_COMPLETE:
case NETDEV_EVENT_TX_COMPLETE_DATA_PENDING:
/* send packet previously queued within netif due to the lower
@ -1982,20 +2061,20 @@ static void _event_cb(netdev_t *dev, netdev_event_t event)
* Further packets will be sent on later TX_COMPLETE or
* TX_MEDIUM_BUSY */
_send_queued_pkt(netif);
#if IS_USED(MODULE_NETSTATS_L2)
# if IS_USED(MODULE_NETSTATS_L2)
/* we are the only ones supposed to touch this variable,
* so no acquire necessary */
netif->stats.tx_success++;
#endif /* IS_USED(MODULE_NETSTATS_L2) */
# endif /* IS_USED(MODULE_NETSTATS_L2) */
if (IS_USED(MODULE_NETSTATS_NEIGHBOR)) {
int8_t retries = -1;
dev->driver->get(dev, NETOPT_TX_RETRIES_NEEDED, &retries, sizeof(retries));
netstats_nb_update_tx(&netif->netif, NETSTATS_NB_SUCCESS, retries + 1);
}
break;
#endif /* IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ) */
#if IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ) || \
IS_USED(MODULE_NETSTATS_NEIGHBOR)
# endif /* IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ) */
# if IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ) || \
IS_USED(MODULE_NETSTATS_NEIGHBOR)
case NETDEV_EVENT_TX_MEDIUM_BUSY:
case NETDEV_EVENT_TX_NOACK:
/* update neighbor statistics */
@ -2015,13 +2094,14 @@ static void _event_cb(netdev_t *dev, netdev_event_t event)
* Further packets will be sent on later TX_COMPLETE or
* TX_MEDIUM_BUSY */
_send_queued_pkt(netif);
#if IS_USED(MODULE_NETSTATS_L2)
# if IS_USED(MODULE_NETSTATS_L2)
/* we are the only ones supposed to touch this variable,
* so no acquire necessary */
netif->stats.tx_failed++;
#endif /* IS_USED(MODULE_NETSTATS_L2) */
# endif /* IS_USED(MODULE_NETSTATS_L2) */
break;
#endif /* IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ) */
# endif /* IS_USED(MODULE_NETSTATS_L2) || IS_USED(MODULE_GNRC_NETIF_PKTQ) */
#endif /* IS_USED(MODULE_NETDEV_LEGACY_API) */
default:
DEBUG("gnrc_netif: warning: unhandled event %u.\n", event);
}