dpdk l2fwd(2) 下载本文

if (diag != 0) {

PMD_DEBUG_TRACE(\ port_id, diag); return diag; }

/* TX队列控制块内存分配 */

diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q); if (diag != 0) {

PMD_DEBUG_TRACE(\ port_id, diag);

rte_eth_dev_rx_queue_config(dev, 0); return diag; }

/* eth_em_configure, 标记intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; */

diag = (*dev->dev_ops->dev_configure)(dev); if (diag != 0) {

PMD_DEBUG_TRACE(\ port_id, diag);

rte_eth_dev_rx_queue_config(dev, 0); rte_eth_dev_tx_queue_config(dev, 0); return diag; }

return 0; }

RX queue setup

int

rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) {

struct rte_eth_dev *dev;

struct rte_pktmbuf_pool_private *mbp_priv;

struct rte_eth_dev_info dev_info;

/* This function is only safe when called from the primary process * in a multi-process setup*/

PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);

if (port_id >= nb_ports) {

PMD_DEBUG_TRACE(\ return (-EINVAL); }

dev = &rte_eth_devices[port_id];

if (rx_queue_id >= dev->data->nb_rx_queues) {

PMD_DEBUG_TRACE(\ return (-EINVAL); }

if (dev->data->dev_started) { PMD_DEBUG_TRACE( \%d must be stopped to allow configuration\\n\port_id); return -EBUSY; }

FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);

/*

* Check the size of the mbuf data buffer.

* This value must be provided in the private data of the memory pool. * First check that the memory pool has a valid private data. */

(*dev->dev_ops->dev_infos_get)(dev, &dev_info); if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {

PMD_DEBUG_TRACE(\ mp->name, (int) mp->private_data_size,

(int) sizeof(struct rte_pktmbuf_pool_private)); return (-ENOSPC);

}

/* mbuf data部分大小(2048) > 256 */ mbp_priv = rte_mempool_get_priv(mp);

if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <

dev_info.min_rx_bufsize) {

PMD_DEBUG_TRACE(\

\ \ mp->name,

(int)mbp_priv->mbuf_data_room_size, (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize), (int)RTE_PKTMBUF_HEADROOM, (int)dev_info.min_rx_bufsize); return (-EINVAL); }

/* eth_em_rx_queue_setup, 初始化收包描述符 */

return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,

socket_id, rx_conf, mp); }

int

eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,

uint16_t nb_desc,

unsigned int socket_id,

const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) {

const struct rte_memzone *rz; struct em_rx_queue *rxq; struct e1000_hw *hw;

uint32_t rsize;

hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);

/*

* Validate number of receive descriptors.

* It must not exceed hardware maximum, and must be multiple * of EM_ALIGN. */

if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 || (nb_desc > EM_MAX_RING_DESC) ||

(nb_desc < EM_MIN_RING_DESC)) { return (-EINVAL); }

/*

* EM devices don't support drop_en functionality */

if (rx_conf->rx_drop_en) {

RTE_LOG(ERR, PMD, \device\\n\

return (-EINVAL); }

/* 之前setup过, 释放资源 */

/* Free memory prior to re-allocation if needed. */

if (dev->data->rx_queues[queue_idx] != NULL) {

em_rx_queue_release(dev->data->rx_queues[queue_idx]); dev->data->rx_queues[queue_idx] = NULL; }

/* 名为rte_em_pmd_rx_ring_0_1的memzone分配,用于收包描述符 */ /* Allocate RX ring for max possible mumber of hardware descriptors. */

rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;

if ((rz = ring_dma_zone_reserve(dev, \ socket_id)) == NULL) return (-ENOMEM);