1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

Merge pull request #14594 from maribu/stm32-eth-cleanup

cpu/stm32: Clean up / fix periph_eth
This commit is contained in:
benpicco 2020-08-17 21:16:27 +02:00 committed by GitHub
commit 22d3bf7c51
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 408 additions and 360 deletions

View File

@ -274,12 +274,6 @@ static const eth_conf_t eth_config = {
}
};
#define ETH_RX_BUFFER_COUNT (4)
#define ETH_TX_BUFFER_COUNT (4)
#define ETH_RX_BUFFER_SIZE (1524)
#define ETH_TX_BUFFER_SIZE (1524)
#define ETH_DMA_ISR isr_dma2_stream0
/** @} */

View File

@ -178,12 +178,6 @@ static const eth_conf_t eth_config = {
}
};
#define ETH_RX_BUFFER_COUNT (4)
#define ETH_TX_BUFFER_COUNT (4)
#define ETH_RX_BUFFER_SIZE (1524)
#define ETH_TX_BUFFER_SIZE (1524)
#define ETH_DMA_ISR isr_dma2_stream0
/** @} */

View File

@ -16,6 +16,13 @@ ifneq (,$(filter periph_uart_nonblocking,$(USEMODULE)))
USEMODULE += tsrb
endif
ifneq (,$(filter stm32_eth,$(USEMODULE)))
FEATURES_REQUIRED += periph_eth
USEMODULE += netdev_eth
USEMODULE += iolist
USEMODULE += luid
endif
ifneq (STM32F030x4, $(CPU_LINE))
# Retrieve vendor CMSIS headers from ST cmsis_device_<family> repositories on
# GitHub, https://github.com/STMicroelectronics

View File

@ -1022,6 +1022,103 @@ typedef struct {
supported by all modes. */
} eth_conf_t;
/**
* @brief Layout of enhanced RX/TX DMA descriptor
*
* @note Don't confuse this with the normal RX/TX descriptor format.
* @warning The content of the status and control bits is different for RX and
* TX DMA descriptors
*/
typedef struct eth_dma_desc {
volatile uint32_t status; /**< Mostly status bits, some control bits */
volatile uint32_t control; /**< Control bits */
char * volatile buffer_addr; /**< RX/TX buffer */
struct eth_dma_desc * volatile desc_next; /**< Address of next DMA descriptor */
volatile uint32_t reserved1_ext; /**< RX: Extended status, TX: reserved */
volatile uint32_t reserved2; /**< Reserved for future use */
/**
* @brief Sub-second part of PTP timestamp of transmitted / sent frame
*
* For TX: If PTP timestamping is enabled and the TTSE bit in the
* transmit descriptor word 0 (struct eth_dma_desc::status) is set, the
* MAC will store the PTP timestamp of when the Start of Frame Delimiter
* was sent. The TTSS bit is send by the hardware if the timestamp was
* correctly set.
*
* For RX: If PTP timestamping is enabled, the timestamp of all received
* frames is captured.
*/
volatile uint32_t ts_low;
volatile uint32_t ts_high; /**< Second part of PTP timestamp */
} edma_desc_t;
/**
* @name Flags in the status word of the Ethernet enhanced RX DMA descriptor
* @{
*/
#define RX_DESC_STAT_LS (BIT8) /**< If set, descriptor is the last of a frame */
#define RX_DESC_STAT_FS (BIT9) /**< If set, descriptor is the first of a frame */
/**
* @brief Frame length
*
* The length of the frame in host memory order including CRC. Only valid if
* @ref RX_DESC_STAT_LS is set and @ref RX_DESC_STAT_DE is not set.
*/
#define RX_DESC_STAT_FL (0x3FFF0000) /* bits 16-29 */
#define RX_DESC_STAT_DE (BIT14) /**< If set, a frame too large to fit buffers given by descriptors was received */
#define RX_DESC_STAT_ES (BIT14) /**< If set, an error occurred during RX */
#define RX_DESC_STAT_OWN (BIT31) /**< If set, descriptor is owned by DMA, otherwise by CPU */
/** @} */
/**
* @name Flags in the control word of the Ethernet enhanced RX DMA descriptor
* @{
*/
/**
* @brief Indicates if RDES3 points to the next DMA descriptor (1), or to a second buffer (0)
*
* If the bit is set, RDES3 (@ref edma_desc_t::desc_next) will point to the
* next DMA descriptor rather than to a second frame-segment buffer. This is
* always set by the driver
*/
#define RX_DESC_CTRL_RCH (BIT14)
/** @} */
/**
* @name Flags in the status word of the Ethernet enhanced TX DMA descriptor
* @{
*/
#define TX_DESC_STAT_UF (BIT1) /**< If set, an underflow occurred while sending */
#define TX_DESC_STAT_EC (BIT8) /**< If set, TX was aborted due to excessive collisions (half-duplex only) */
#define TX_DESC_STAT_NC (BIT10) /**< If set, no carrier was detected (TX aborted) */
#define TX_DESC_STAT_ES (BIT15) /**< If set, one or more error occurred */
#define TX_DESC_STAT_TTSS (BIT17) /**< If set, the descriptor contains a valid PTP timestamp */
/**
* @brief Indicates if TDES3 points to the next DMA descriptor (1), or to a second buffer (0)
*
* If the bit is set, TDES3 (@ref edma_desc_t::desc_next) will point to the
* next DMA descriptor rather than to a second frame-segment buffer. This is
* always set by the driver
*/
#define TX_DESC_STAT_TCH (BIT20)
#define TX_DESC_STAT_TER (BIT21) /**< If set, DMA will return to first descriptor in ring afterwards */
/**
* @brief Checksum insertion control
*
* | Value | Meaning |
* |:------ |:----------------------------------------------------------------------------- |
* | `0b00` | Checksum insertion disabled |
* | `0b01` | Calculate and insert checksum in IPv4 header |
* | `0b10` | Calculate and insert IPv4 checksum, insert pre-calculated payload checksum |
* | `0b11 | Calculated and insert both IPv4 and payload checksum |
*/
#define TX_DESC_STAT_CIC (BIT22 | BIT23)
#define TX_DESC_STAT_TTSE (BIT25) /**< If set, an PTP timestamp is added to the descriptor after TX completed */
#define TX_DESC_STAT_FS (BIT28) /**< If set, buffer contains first segment of frame to transmit */
#define TX_DESC_STAT_LS (BIT29) /**< If set, buffer contains last segment of frame to transmit */
#define TX_DESC_STAT_IC (BIT30) /**< If set, trigger IRQ on completion */
#define TX_DESC_STAT_OWN (BIT31) /**< If set, descriptor is owned by DMA, otherwise by CPU */
/** @} */
/**
* @name Ethernet PHY Common Registers
* @{
@ -1136,29 +1233,6 @@ typedef struct {
#define ANER_LP_AN_ABLE (0x0001)
/** @} */
#ifdef MODULE_STM32_ETH
/**
* @brief Read a PHY register
*
* @param[in] addr address of the PHY to read
* @param[in] reg register to be read
*
* @return value in the register, or <=0 on error
*/
int32_t stm32_eth_phy_read(uint16_t addr, uint8_t reg);
/**
* @brief Write a PHY register
*
* @param[in] addr address of the PHY to write
* @param[in] reg register to be written
* @param[in] value value to write into the register
*
* @return 0 in case of success or <=0 on error
*/
int32_t stm32_eth_phy_write(uint16_t addr, uint8_t reg, uint16_t value);
#endif /* MODULE_STM32_ETH */
#ifdef __cplusplus
}
#endif

View File

@ -1,5 +1,6 @@
/*
* Copyright (C) 2016 TriaGnoSys GmbH
* 2020 Otto-von-Guericke-Universität Magdeburg
*
* This file is subject to the terms and conditions of the GNU Lesser General
* Public License v2.1. See the file LICENSE in the top level directory for more
@ -14,23 +15,24 @@
* @brief Low-level ETH driver implementation
*
* @author Víctor Ariño <victor.arino@triagnosys.com>
* @author Marian Buschsieweke <marian.buschsieweke@ovgu.de>
*
* @}
*/
#include <errno.h>
#include <string.h>
#include "mutex.h"
#include "luid.h"
#include "bitarithm.h"
#include "iolist.h"
#include "luid.h"
#include "mutex.h"
#include "net/ethernet.h"
#include "net/netdev/eth.h"
#include "periph/gpio.h"
#define ENABLE_DEBUG (0)
#include "debug.h"
/* Set the value of the divider with the clock configured */
#if !defined(CLOCK_CORECLOCK) || CLOCK_CORECLOCK < (20000000U)
#error This peripheral requires a CORECLOCK of at least 20MHz
@ -46,40 +48,43 @@
#define CLOCK_RANGE ETH_MACMIIAR_CR_Div102
#endif /* CLOCK_CORECLOCK < (20000000U) */
/* Internal flags for the DMA descriptors */
#define DESC_OWN (0x80000000)
#define RX_DESC_FL (0x3FFF0000)
#define RX_DESC_FS (0x00000200)
#define RX_DESC_LS (0x00000100)
#define RX_DESC_RCH (0x00004000)
#define TX_DESC_TCH (0x00100000)
#define TX_DESC_IC (0x40000000)
#define TX_DESC_CIC (0x00C00000)
#define TX_DESC_LS (0x20000000)
#define TX_DESC_FS (0x10000000)
/* Default DMA buffer setup */
#ifndef ETH_RX_DESCRIPTOR_COUNT
#define ETH_RX_DESCRIPTOR_COUNT (6U)
#endif
#ifndef ETH_TX_DESCRIPTOR_COUNT
#define ETH_TX_DESCRIPTOR_COUNT (8U)
#endif
#ifndef ETH_RX_BUFFER_SIZE
#define ETH_RX_BUFFER_SIZE (256U)
#endif
struct eth_dma_desc {
uint32_t status;
uint32_t control;
char *buffer_addr;
struct eth_dma_desc *desc_next;
uint32_t reserved1_ext;
uint32_t reserved2;
uint32_t ts_low;
uint32_t ts_high;
} __attribute__((packed));
#if (ETH_RX_BUFFER_SIZE % 16) != 0
/* For compatibility with 128bit memory interfaces, the buffer size needs to
* be a multiple of 16 Byte. For 64 bit memory interfaces need the size to be
* a multiple of 8 Byte, for 32 bit a multiple of 4 byte is sufficient. */
#warning "ETH_RX_BUFFER_SIZE is not a multiple of 16. (See comment above.)"
#endif
typedef struct eth_dma_desc edma_desc_t;
#if ETH_RX_DESCRIPTOR_COUNT * ETH_RX_BUFFER_SIZE < 1524U
#warning "Total RX buffers lower than MTU, you won't receive huge frames!"
#endif
#define MIN(a, b) (((a) <= (b)) ? (a) : (b))
/* Synchronization between IRQ and thread context */
mutex_t stm32_eth_tx_completed = MUTEX_INIT_LOCKED;
/* Descriptors */
static edma_desc_t rx_desc[ETH_RX_BUFFER_COUNT];
static edma_desc_t tx_desc[ETH_TX_BUFFER_COUNT];
static edma_desc_t rx_desc[ETH_RX_DESCRIPTOR_COUNT];
static edma_desc_t tx_desc[ETH_TX_DESCRIPTOR_COUNT];
static edma_desc_t *rx_curr;
static edma_desc_t *tx_curr;
/* Buffers */
static char rx_buffer[ETH_RX_BUFFER_COUNT][ETH_RX_BUFFER_SIZE];
static char tx_buffer[ETH_TX_BUFFER_COUNT][ETH_TX_BUFFER_SIZE];
/* RX Buffers */
static char rx_buffer[ETH_RX_DESCRIPTOR_COUNT][ETH_RX_BUFFER_SIZE];
/* Netdev used in RIOT's API to upper layer */
netdev_t *_netdev;
/** Read or write a phy register, to write the register ETH_MACMIIAR_MW is to
* be passed as the higher nibble of the value */
@ -88,7 +93,7 @@ static unsigned _rw_phy(unsigned addr, unsigned reg, unsigned value)
unsigned tmp;
while (ETH->MACMIIAR & ETH_MACMIIAR_MB) {}
DEBUG("stm32_eth: rw_phy %x (%x): %x\n", addr, reg, value);
DEBUG("[stm32_eth] rw_phy %x (%x): %x\n", addr, reg, value);
tmp = (ETH->MACMIIAR & ETH_MACMIIAR_CR) | ETH_MACMIIAR_MB;
tmp |= (((addr & 0x1f) << 11) | ((reg & 0x1f) << 6));
@ -98,22 +103,21 @@ static unsigned _rw_phy(unsigned addr, unsigned reg, unsigned value)
ETH->MACMIIAR = tmp;
while (ETH->MACMIIAR & ETH_MACMIIAR_MB) {}
DEBUG("stm32_eth: %lx\n", ETH->MACMIIDR);
DEBUG("[stm32_eth] %lx\n", ETH->MACMIIDR);
return (ETH->MACMIIDR & 0x0000ffff);
}
int32_t stm32_eth_phy_read(uint16_t addr, uint8_t reg)
static inline int32_t _phy_read(uint16_t addr, uint8_t reg)
{
return _rw_phy(addr, reg, 0);
}
int32_t stm32_eth_phy_write(uint16_t addr, uint8_t reg, uint16_t value)
static inline void _phy_write(uint16_t addr, uint8_t reg, uint16_t value)
{
_rw_phy(addr, reg, (value & 0xffff) | (ETH_MACMIIAR_MW << 16));
return 0;
}
void stm32_eth_get_mac(char *out)
static void stm32_eth_get_mac(char *out)
{
unsigned t;
@ -130,7 +134,7 @@ void stm32_eth_get_mac(char *out)
/** Set the mac address. The peripheral supports up to 4 MACs but only one is
* implemented */
void stm32_eth_set_mac(const char *mac)
static void stm32_eth_set_mac(const char *mac)
{
ETH->MACA0HR &= 0xffff0000;
ETH->MACA0HR |= ((mac[5] << 8) | mac[4]);
@ -140,36 +144,31 @@ void stm32_eth_set_mac(const char *mac)
/** Initialization of the DMA descriptors to be used */
static void _init_buffer(void)
{
int i;
for (i = 0; i < ETH_RX_BUFFER_COUNT; i++) {
rx_desc[i].status = DESC_OWN;
rx_desc[i].control = RX_DESC_RCH | (ETH_RX_BUFFER_SIZE & 0x0fff);
size_t i;
for (i = 0; i < ETH_RX_DESCRIPTOR_COUNT; i++) {
rx_desc[i].status = RX_DESC_STAT_OWN;
rx_desc[i].control = RX_DESC_CTRL_RCH | (ETH_RX_BUFFER_SIZE & 0x0fff);
rx_desc[i].buffer_addr = &rx_buffer[i][0];
if((i+1) < ETH_RX_BUFFER_COUNT) {
if ((i + 1) < ETH_RX_DESCRIPTOR_COUNT) {
rx_desc[i].desc_next = &rx_desc[i + 1];
}
}
rx_desc[i - 1].desc_next = &rx_desc[0];
for (i = 0; i < ETH_TX_BUFFER_COUNT; i++) {
tx_desc[i].status = TX_DESC_TCH | TX_DESC_CIC;
tx_desc[i].buffer_addr = &tx_buffer[i][0];
if ((i + 1) < ETH_RX_BUFFER_COUNT) {
tx_desc[i].desc_next = &tx_desc[i + 1];
}
for (i = 0; i < ETH_TX_DESCRIPTOR_COUNT - 1; i++) {
tx_desc[i].desc_next = &tx_desc[i + 1];
}
tx_desc[i - 1].desc_next = &tx_desc[0];
tx_desc[ETH_RX_DESCRIPTOR_COUNT - 1].desc_next = &tx_desc[0];
rx_curr = &rx_desc[0];
tx_curr = &tx_desc[0];
ETH->DMARDLAR = (uint32_t)rx_curr;
ETH->DMATDLAR = (uint32_t)tx_curr;
ETH->DMARDLAR = (uintptr_t)rx_curr;
ETH->DMATDLAR = (uintptr_t)&tx_desc[0];
}
int stm32_eth_init(void)
static int stm32_eth_init(netdev_t *netdev)
{
(void)netdev;
/* enable APB2 clock */
RCC->APB2ENR |= RCC_APB2ENR_SYSCFGEN;
@ -202,7 +201,7 @@ int stm32_eth_init(void)
/* configure the PHY (standard for all PHY's) */
/* if there's no PHY, this has no effect */
stm32_eth_phy_write(eth_config.phy_addr, PHY_BMCR, BMCR_RESET);
_phy_write(eth_config.phy_addr, PHY_BMCR, BMCR_RESET);
/* speed from conf */
ETH->MACCR |= (ETH_MACCR_ROD | ETH_MACCR_IPCO | ETH_MACCR_APCS |
@ -235,126 +234,281 @@ int stm32_eth_init(void)
NVIC_EnableIRQ(ETH_IRQn);
ETH->DMAIER |= ETH_DMAIER_NISE | ETH_DMAIER_TIE | ETH_DMAIER_RIE;
/* enable */
ETH->MACCR |= ETH_MACCR_TE;
/* enable transmitter and receiver */
ETH->MACCR |= ETH_MACCR_TE | ETH_MACCR_RE;
/* flush transmit FIFO */
ETH->DMAOMR |= ETH_DMAOMR_FTF;
ETH->MACCR |= ETH_MACCR_RE;
/* wait for FIFO flushing to complete */
while (ETH->DMAOMR & ETH_DMAOMR_FTF) { }
ETH->DMAOMR |= ETH_DMAOMR_ST;
ETH->DMAOMR |= ETH_DMAOMR_SR;
/* enable DMA TX and RX */
ETH->DMAOMR |= ETH_DMAOMR_ST | ETH_DMAOMR_SR;
/* configure speed, do it at the end so the PHY had time to
* reset */
stm32_eth_phy_write(eth_config.phy_addr, PHY_BMCR, eth_config.speed);
_phy_write(eth_config.phy_addr, PHY_BMCR, eth_config.speed);
return 0;
}
int stm32_eth_send(const struct iolist *iolist)
static int stm32_eth_send(netdev_t *netdev, const struct iolist *iolist)
{
unsigned len = iolist_size(iolist);
int ret = 0;
(void)netdev;
netdev->event_callback(netdev, NETDEV_EVENT_TX_STARTED);
unsigned bytes_to_send = iolist_size(iolist);
/* Input must not be bigger than maximum allowed frame length */
assert(bytes_to_send <= ETHERNET_FRAME_LEN);
/* This API is not thread safe, check that no other thread is sending */
assert(!(tx_desc[0].status & TX_DESC_STAT_OWN));
/* We cannot send more chunks than allocated descriptors */
assert(iolist_count(iolist) <= ETH_TX_DESCRIPTOR_COUNT);
/* safety check */
if (len > ETH_TX_BUFFER_SIZE) {
DEBUG("stm32_eth: Error iolist_size > ETH_TX_BUFFER_SIZE\n");
return -1;
for (unsigned i = 0; iolist; iolist = iolist->iol_next, i++) {
tx_desc[i].control = iolist->iol_len;
tx_desc[i].buffer_addr = iolist->iol_base;
uint32_t status = TX_DESC_STAT_IC | TX_DESC_STAT_TCH | TX_DESC_STAT_CIC
| TX_DESC_STAT_OWN;
if (!i) {
/* fist chunk */
status |= TX_DESC_STAT_FS;
}
if (!iolist->iol_next) {
/* last chunk */
status |= TX_DESC_STAT_LS | TX_DESC_STAT_TER;
}
tx_desc[i].status = status;
}
/* block until there's an available descriptor */
while (tx_curr->status & DESC_OWN) {
DEBUG("stm32_eth: not avail\n");
}
/* clear status field */
tx_curr->status &= 0x0fffffff;
dma_acquire(eth_config.dma);
for (; iolist; iolist = iolist->iol_next) {
ret += dma_transfer(eth_config.dma, eth_config.dma_chan, iolist->iol_base,
tx_curr->buffer_addr+ret, iolist->iol_len, DMA_MEM_TO_MEM, DMA_INC_BOTH_ADDR);
}
dma_release(eth_config.dma);
if (ret < 0) {
DEBUG("stm32_eth: Failure in dma_transfer\n");
return ret;
}
tx_curr->control = (len & 0x1fff);
/* set flags for first and last frames */
tx_curr->status |= TX_DESC_FS;
tx_curr->status |= TX_DESC_LS | TX_DESC_IC;
/* give the descriptors to the DMA */
tx_curr->status |= DESC_OWN;
tx_curr = tx_curr->desc_next;
/* start tx */
/* start TX */
ETH->DMATPDR = 0;
return ret;
/* await completion */
DEBUG("[stm32_eth] Started to send %u B via DMA\n", bytes_to_send);
mutex_lock(&stm32_eth_tx_completed);
DEBUG("[stm32_eth] TX completed\n");
/* Error check */
unsigned i = 0;
while (1) {
uint32_t status = tx_desc[i].status;
DEBUG("TX desc %u status: ES=%c, UF=%c, EC=%c, NC=%c, FS=%c, LS=%c\n",
i,
(status & TX_DESC_STAT_ES) ? '1' : '0',
(status & TX_DESC_STAT_UF) ? '1' : '0',
(status & TX_DESC_STAT_EC) ? '1' : '0',
(status & TX_DESC_STAT_NC) ? '1' : '0',
(status & TX_DESC_STAT_FS) ? '1' : '0',
(status & TX_DESC_STAT_LS) ? '1' : '0');
/* The Error Summary (ES) bit is set, if any error during TX occurred */
if (status & TX_DESC_STAT_ES) {
/* TODO: Report better event to reflect error */
netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE);
return -1;
}
if (status & TX_DESC_STAT_LS) {
break;
}
i++;
}
netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE);
return (int)bytes_to_send;
}
static int _try_receive(char *data, int max_len, int block)
static int get_rx_frame_size(void)
{
int copy, len = 0;
int copied = 0;
int drop = (data || max_len > 0);
edma_desc_t *p = rx_curr;
for (int i = 0; i < ETH_RX_BUFFER_COUNT && len == 0; i++) {
/* try receiving, if the block is set, simply wait for the rest of
* the packet to complete, otherwise just break */
while (p->status & DESC_OWN) {
if (!block) {
break;
}
edma_desc_t *i = rx_curr;
uint32_t status;
while (1) {
/* Wait until DMA gave up control over descriptor */
if ((status = i->status) & RX_DESC_STAT_OWN) {
DEBUG("[stm32_eth] RX not completed (spurious interrupt?)\n");
return -EAGAIN;
}
/* amount of data to copy */
copy = ETH_RX_BUFFER_SIZE;
if (p->status & (RX_DESC_LS | RX_DESC_FL)) {
len = ((p->status >> 16) & 0x3FFF) - 4;
copy = len - copied;
DEBUG("[stm32_eth] get_rx_frame_size(): FS=%c, LS=%c, ES=%c, DE=%c, FL=%lu\n",
(status & RX_DESC_STAT_FS) ? '1' : '0',
(status & RX_DESC_STAT_LS) ? '1' : '0',
(status & RX_DESC_STAT_ES) ? '1' : '0',
(status & RX_DESC_STAT_DE) ? '1' : '0',
((status >> 16) & 0x3fff) - ETHERNET_FCS_LEN);
if (status & RX_DESC_STAT_DE) {
DEBUG("[stm32_eth] Overflow during RX\n");
return -EOVERFLOW;
}
if (drop) {
/* copy the data if possible */
if (data && max_len >= copy) {
memcpy(data, p->buffer_addr, copy);
max_len -= copy;
}
else if (max_len < copy) {
len = -1;
}
p->status = DESC_OWN;
if (status & RX_DESC_STAT_ES) {
DEBUG("[stm32_eth] Error during RX\n");
return -EIO;
}
p = p->desc_next;
if (status & RX_DESC_STAT_LS) {
break;
}
i = i->desc_next;
}
if (drop) {
rx_curr = p;
/* bits 16-29 contain the frame length including 4 B frame check sequence */
return ((status >> 16) & 0x3fff) - ETHERNET_FCS_LEN;
}
static void drop_frame_and_update_rx_curr(void)
{
while (1) {
uint32_t old_status = rx_curr->status;
/* hand over old descriptor to DMA */
rx_curr->status = RX_DESC_STAT_OWN;
rx_curr = rx_curr->desc_next;
if (old_status & (RX_DESC_STAT_LS | RX_DESC_STAT_ES)) {
/* reached either last DMA descriptor of frame or error ==> done */
return;
}
}
}
static void handle_lost_rx_irqs(void)
{
edma_desc_t *iter = rx_curr;
while (1) {
uint32_t status = iter->status;
if (status & RX_DESC_STAT_OWN) {
break;
}
if (status & RX_DESC_STAT_LS) {
DEBUG("[stm32_eth] Lost RX IRQ, sending event to upper layer now\n");
/* we use the ISR event for this, as the upper layer calls recv()
* right away on an NETDEV_EVENT_RX_COMPLETE. Because there could be
* potentially quite a lot of received frames in the queue, we might
* risk a stack overflow if we would send an NETDEV_EVENT_RX_COMPLETE
*/
netdev_trigger_event_isr(_netdev);
break;
}
iter = iter->desc_next;
}
}
static int stm32_eth_recv(netdev_t *netdev, void *buf, size_t max_len,
void *info)
{
(void)info;
(void)netdev;
char *data = buf;
/* Determine the size of received frame. The frame might span multiple
* DMA buffers */
int size = get_rx_frame_size();
if (size < 0) {
if (size != -EAGAIN) {
DEBUG("[stm32_eth] Dropping frame due to error\n");
drop_frame_and_update_rx_curr();
}
return size;
}
return len;
}
if (!buf) {
if (max_len) {
DEBUG("[stm32_eth] Dropping frame as requested by upper layer\n");
drop_frame_and_update_rx_curr();
}
return size;
}
int stm32_eth_try_receive(char *data, unsigned max_len)
{
return _try_receive(data, max_len, 0);
}
if (max_len < (size_t)size) {
DEBUG("[stm32_eth] Buffer provided by upper layer is too small\n");
drop_frame_and_update_rx_curr();
return -ENOBUFS;
}
int stm32_eth_receive_blocking(char *data, unsigned max_len)
{
return _try_receive(data, max_len, 1);
}
size_t remain = size;
while (remain) {
size_t chunk = MIN(remain, ETH_RX_BUFFER_SIZE);
memcpy(data, rx_curr->buffer_addr, chunk);
data += chunk;
remain -= chunk;
/* Hand over descriptor to DMA */
rx_curr->status = RX_DESC_STAT_OWN;
rx_curr = rx_curr->desc_next;
}
int stm32_eth_get_rx_status_owned(void)
{
return (!(rx_curr->status & DESC_OWN));
handle_lost_rx_irqs();
return size;
}
void stm32_eth_isr_eth_wkup(void)
{
cortexm_isr_end();
}
static void stm32_eth_isr(netdev_t *netdev) {
netdev->event_callback(netdev, NETDEV_EVENT_RX_COMPLETE);
}
void isr_eth(void)
{
unsigned tmp = ETH->DMASR;
if ((tmp & ETH_DMASR_TS)) {
ETH->DMASR = ETH_DMASR_NIS | ETH_DMASR_TS;
DEBUG("isr_eth: TX completed\n");
mutex_unlock(&stm32_eth_tx_completed);
}
if ((tmp & ETH_DMASR_RS)) {
ETH->DMASR = ETH_DMASR_NIS | ETH_DMASR_RS;
DEBUG("isr_eth: RX completed\n");
if (_netdev) {
netdev_trigger_event_isr(_netdev);
}
}
cortexm_isr_end();
}
static int stm32_eth_set(netdev_t *dev, netopt_t opt,
const void *value, size_t max_len)
{
int res = -1;
switch (opt) {
case NETOPT_ADDRESS:
assert(max_len >= ETHERNET_ADDR_LEN);
stm32_eth_set_mac((char *)value);
res = ETHERNET_ADDR_LEN;
break;
default:
res = netdev_eth_set(dev, opt, value, max_len);
break;
}
return res;
}
static int stm32_eth_get(netdev_t *dev, netopt_t opt,
void *value, size_t max_len)
{
int res = -1;
switch (opt) {
case NETOPT_ADDRESS:
assert(max_len >= ETHERNET_ADDR_LEN);
stm32_eth_get_mac((char *)value);
res = ETHERNET_ADDR_LEN;
break;
default:
res = netdev_eth_get(dev, opt, value, max_len);
break;
}
return res;
}
static const netdev_driver_t netdev_driver_stm32f4eth = {
.send = stm32_eth_send,
.recv = stm32_eth_recv,
.init = stm32_eth_init,
.isr = stm32_eth_isr,
.get = stm32_eth_get,
.set = stm32_eth_set,
};
void stm32_eth_netdev_setup(netdev_t *netdev)
{
_netdev = netdev;
netdev->driver = &netdev_driver_stm32f4eth;
}

View File

@ -1 +0,0 @@
include $(RIOTBASE)/Makefile.base

View File

@ -1,5 +0,0 @@
FEATURES_REQUIRED += periph_eth
FEATURES_REQUIRED += periph_dma
USEMODULE += netdev_eth
USEMODULE += iolist
USEMODULE += luid

View File

@ -1,13 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
@defgroup drivers_stm32_common Driver for stm32 ethernet
@ingroup drivers_netdev
@brief Device Driver for STM32 Ethernet
*/

View File

@ -1,157 +0,0 @@
/*
* Copyright (C) 2016 TriaGnoSys GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser General
* Public License v2.1. See the file LICENSE in the top level directory for more
* details.
*/
/**
* @ingroup drivers_stm32_common
* @{
*
* @file
* @brief Netdev wrapper for stm32 ethernet
*
* @author Víctor Ariño <victor.arino@triagnosys.com>
*
* @}
*/
#include "periph_conf.h"
#include "mutex.h"
#include "net/netdev/eth.h"
#include "net/ethernet.h"
#include "iolist.h"
#define ENABLE_DEBUG (0)
#include "debug.h"
#include <string.h>
static mutex_t _tx = MUTEX_INIT;
static mutex_t _rx = MUTEX_INIT;
netdev_t *_netdev;
void stm32_eth_set_mac(const char *mac);
void stm32_eth_get_mac(char *out);
int stm32_eth_init(void);
int stm32_eth_receive_blocking(char *data, unsigned max_len);
int stm32_eth_send(const struct iolist *iolist);
int stm32_eth_get_rx_status_owned(void);
static void _isr(netdev_t *netdev) {
if(stm32_eth_get_rx_status_owned()) {
netdev->event_callback(netdev, NETDEV_EVENT_RX_COMPLETE);
}
}
void isr_eth(void)
{
volatile unsigned tmp = ETH->DMASR;
if ((tmp & ETH_DMASR_TS)) {
ETH->DMASR = ETH_DMASR_TS | ETH_DMASR_NIS;
mutex_unlock(&_tx);
}
if ((tmp & ETH_DMASR_RS)) {
ETH->DMASR = ETH_DMASR_RS | ETH_DMASR_NIS;
mutex_unlock(&_rx);
if (_netdev) {
netdev_trigger_event_isr(_netdev);
}
}
/* printf("r:%x\n\n", tmp); */
cortexm_isr_end();
}
static int _recv(netdev_t *netdev, void *buf, size_t len, void *info)
{
(void)info;
(void)netdev;
if(!stm32_eth_get_rx_status_owned()){
mutex_lock(&_rx);
}
int ret = stm32_eth_receive_blocking((char *)buf, len);
DEBUG("stm32_eth_netdev: _recev: %d\n", ret);
return ret;
}
static int _send(netdev_t *netdev, const struct iolist *iolist)
{
(void)netdev;
int ret = 0;
if(stm32_eth_get_rx_status_owned()) {
mutex_lock(&_tx);
}
netdev->event_callback(netdev, NETDEV_EVENT_TX_STARTED);
ret = stm32_eth_send(iolist);
DEBUG("stm32_eth_netdev: _send: %d %d\n", ret, iolist_size(iolist));
if (ret < 0)
{
netdev->event_callback(netdev, NETDEV_EVENT_TX_MEDIUM_BUSY);
return ret;
}
netdev->event_callback(netdev, NETDEV_EVENT_TX_COMPLETE);
return ret;
}
static int _set(netdev_t *dev, netopt_t opt, const void *value, size_t max_len)
{
int res = -1;
switch (opt) {
case NETOPT_ADDRESS:
assert(max_len >= ETHERNET_ADDR_LEN);
stm32_eth_set_mac((char *)value);
res = ETHERNET_ADDR_LEN;
break;
default:
res = netdev_eth_set(dev, opt, value, max_len);
break;
}
return res;
}
static int _get(netdev_t *dev, netopt_t opt, void *value, size_t max_len)
{
int res = -1;
switch (opt) {
case NETOPT_ADDRESS:
assert(max_len >= ETHERNET_ADDR_LEN);
stm32_eth_get_mac((char *)value);
res = ETHERNET_ADDR_LEN;
break;
default:
res = netdev_eth_get(dev, opt, value, max_len);
break;
}
return res;
}
static int _init(netdev_t *netdev)
{
(void)netdev;
return stm32_eth_init();
}
static const netdev_driver_t netdev_driver_stm32f4eth = {
.send = _send,
.recv = _recv,
.init = _init,
.isr = _isr,
.get = _get,
.set = _set,
};
void stm32_eth_netdev_setup(netdev_t *netdev)
{
_netdev = netdev;
netdev->driver = &netdev_driver_stm32f4eth;
}

View File

@ -108,9 +108,10 @@ PSEUDOMODULES += sock_tcp
PSEUDOMODULES += sock_udp
PSEUDOMODULES += soft_uart_modecfg
PSEUDOMODULES += stdin
PSEUDOMODULES += stdio_ethos
PSEUDOMODULES += stdio_cdc_acm
PSEUDOMODULES += stdio_ethos
PSEUDOMODULES += stdio_uart_rx
PSEUDOMODULES += stm32_eth
PSEUDOMODULES += suit_transport_%
PSEUDOMODULES += wakaama_objects_%
PSEUDOMODULES += wifi_enterprise