Merge: octeontx2: update driver to upstream v6.13

MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6271

# Merge Request Required Information

## Summary of Changes
This patch set has changes for RVU AF/PF/VF Net driver and CPT driver.

## Approved Development Ticket(s)
JIRA: https://issues.redhat.com/browse/RHEL-23617

RHEL-23617
Signed-off-by: Sai Krishna <sgajula@redhat.com>

Approved-by: Kamal Heib <kheib@redhat.com>
Approved-by: José Ignacio Tornos Martínez <jtornosm@redhat.com>
Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com>

Merged-by: Patrick Talbert <ptalbert@redhat.com>
This commit is contained in:
Patrick Talbert 2025-02-06 08:14:18 -05:00
commit f6a0ac3264
69 changed files with 13819 additions and 1796 deletions

View File

@ -11634,6 +11634,7 @@ M: Sunil Goutham <sgoutham@marvell.com>
M: Geetha sowjanya <gakula@marvell.com>
M: Subbaraya Sundeep <sbhatta@marvell.com>
M: hariprasad <hkelam@marvell.com>
M: Bharat Bhushan <bbhushan2@marvell.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/marvell/octeontx2/nic/

View File

@ -1947,45 +1947,32 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
struct mvpp2_port *port = netdev_priv(netdev);
const char *str;
int i, q;
if (sset != ETH_SS_STATS)
return;
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
strscpy(data, mvpp2_ethtool_mib_regs[i].string,
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
ethtool_puts(&data, mvpp2_ethtool_mib_regs[i].string);
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
strscpy(data, mvpp2_ethtool_port_regs[i].string,
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
ethtool_puts(&data, mvpp2_ethtool_port_regs[i].string);
for (q = 0; q < port->ntxqs; q++) {
for (q = 0; q < port->ntxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
snprintf(data, ETH_GSTRING_LEN,
mvpp2_ethtool_txq_regs[i].string, q);
data += ETH_GSTRING_LEN;
str = mvpp2_ethtool_txq_regs[i].string;
ethtool_sprintf(&data, str, q);
}
}
for (q = 0; q < port->nrxqs; q++) {
for (q = 0; q < port->nrxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
snprintf(data, ETH_GSTRING_LEN,
mvpp2_ethtool_rxq_regs[i].string,
q);
data += ETH_GSTRING_LEN;
str = mvpp2_ethtool_rxq_regs[i].string;
ethtool_sprintf(&data, str, q);
}
}
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
strscpy(data, mvpp2_ethtool_xdp[i].string,
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++)
ethtool_puts(&data, mvpp2_ethtool_xdp[i].string);
}
static void

View File

@ -47,7 +47,7 @@ static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_err_pkts",
};
#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
#define OCTEP_GLOBAL_STATS_CNT ARRAY_SIZE(octep_gstrings_global_stats)
static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
@ -56,7 +56,7 @@ static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_busy[Q-%u]",
};
#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
#define OCTEP_TX_Q_STATS_CNT ARRAY_SIZE(octep_gstrings_tx_q_stats)
static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
@ -64,7 +64,7 @@ static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_alloc_errors[Q-%u]",
};
#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
#define OCTEP_RX_Q_STATS_CNT ARRAY_SIZE(octep_gstrings_rx_q_stats)
static void octep_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@ -80,32 +80,25 @@ static void octep_get_strings(struct net_device *netdev,
{
struct octep_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
char *strings = (char *)data;
const char *str;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_gstrings_global_stats[i]);
strings += ETH_GSTRING_LEN;
}
for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++)
ethtool_puts(&data, octep_gstrings_global_stats[i]);
for (i = 0; i < num_queues; i++) {
for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_gstrings_tx_q_stats[j], i);
strings += ETH_GSTRING_LEN;
str = octep_gstrings_tx_q_stats[j];
ethtool_sprintf(&data, str, i);
}
}
for (i = 0; i < num_queues; i++) {
for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_gstrings_rx_q_stats[j], i);
strings += ETH_GSTRING_LEN;
str = octep_gstrings_rx_q_stats[j];
ethtool_sprintf(&data, str, i);
}
}
break;
default:
break;

View File

@ -25,7 +25,7 @@ static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_dropped_bytes_fifo_full",
};
#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
#define OCTEP_VF_GLOBAL_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_global_stats)
static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
@ -34,7 +34,7 @@ static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_busy[Q-%u]",
};
#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
#define OCTEP_VF_TX_Q_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_tx_q_stats)
static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
@ -42,7 +42,7 @@ static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_alloc_errors[Q-%u]",
};
#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
#define OCTEP_VF_RX_Q_STATS_CNT ARRAY_SIZE(octep_vf_gstrings_rx_q_stats)
static void octep_vf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@ -58,32 +58,25 @@ static void octep_vf_get_strings(struct net_device *netdev,
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
char *strings = (char *)data;
const char *str;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_vf_gstrings_global_stats[i]);
strings += ETH_GSTRING_LEN;
}
for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++)
ethtool_puts(&data, octep_vf_gstrings_global_stats[i]);
for (i = 0; i < num_queues; i++) {
for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_vf_gstrings_tx_q_stats[j], i);
strings += ETH_GSTRING_LEN;
str = octep_vf_gstrings_tx_q_stats[j];
ethtool_sprintf(&data, str, i);
}
}
for (i = 0; i < num_queues; i++) {
for (i = 0; i < num_queues; i++)
for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_vf_gstrings_rx_q_stats[j], i);
strings += ETH_GSTRING_LEN;
str = octep_vf_gstrings_rx_q_stats[j];
ethtool_sprintf(&data, str, i);
}
}
break;
default:
break;

View File

@ -36,6 +36,7 @@ config OCTEONTX2_PF
select DIMLIB
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
depends on MACSEC || !MACSEC
help
This driver supports Marvell's OcteonTX2 NIC physical function.
@ -44,3 +45,11 @@ config OCTEONTX2_VF
depends on OCTEONTX2_PF
help
This driver supports Marvell's OcteonTX2 NIC virtual function.
config RVU_ESWITCH
tristate "Marvell RVU E-Switch support"
depends on OCTEONTX2_PF
default m
help
This driver supports Marvell's RVU E-Switch that
provides internal SRIOV packet steering and switching.

View File

@ -11,4 +11,5 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \
rvu_rep.o

View File

@ -24,6 +24,8 @@
#define DRV_NAME "Marvell-CGX/RPM"
#define DRV_STRING "Marvell CGX/RPM Driver"
#define CGX_RX_STAT_GLOBAL_INDEX 9
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
@ -55,6 +57,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
[LMAC_MODE_50G_R] = "50G_R",
[LMAC_MODE_100G_R] = "100G_R",
[LMAC_MODE_USXGMII] = "USXGMII",
[LMAC_MODE_USGMII] = "USGMII",
};
/* CGX PHY management internal APIs */
@ -109,6 +112,11 @@ struct mac_ops *get_mac_ops(void *cgxd)
return ((struct cgx *)cgxd)->mac_ops;
}
u32 cgx_get_fifo_len(void *cgxd)
{
return ((struct cgx *)cgxd)->fifo_len;
}
void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
{
writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
@ -169,6 +177,9 @@ void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
/* Software must not access disabled LMAC registers */
if (!is_lmac_valid(cgx_dev, lmac_id))
return;
cgx_write(cgx_dev, lmac_id, offset, val);
}
@ -176,6 +187,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
/* Software must not access disabled LMAC registers */
if (!is_lmac_valid(cgx_dev, lmac_id))
return 0;
return cgx_read(cgx_dev, lmac_id, offset);
}
@ -199,6 +214,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
}
static u8 cgx_get_nix_resetbit(struct cgx *cgx)
{
int first_lmac;
u8 p2x;
/* non 98XX silicons supports only NIX0 block */
if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX)
return CGX_NIX0_RESET;
first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac);
if (p2x == CMR_P2X_SEL_NIX1)
return CGX_NIX1_RESET;
else
return CGX_NIX0_RESET;
}
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@ -216,24 +249,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
return 0;
}
static u64 mac2u64 (u8 *mac_addr)
{
u64 mac = 0;
int index;
for (index = ETH_ALEN - 1; index >= 0; index--)
mac |= ((u64)*mac_addr++) << (8 * index);
return mac;
}
static void cfg2mac(u64 cfg, u8 *mac_addr)
{
int i, index = 0;
for (i = ETH_ALEN - 1; i >= 0; i--, index++)
mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
}
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
@ -242,13 +257,16 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
int index, id;
u64 cfg;
if (!lmac)
return -ENODEV;
/* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
cfg = ether_addr_to_u64(mac_addr);
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
@ -315,7 +333,7 @@ int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
index = id * lmac->mac_to_index_bmap.max + idx;
cfg = mac2u64 (mac_addr);
cfg = ether_addr_to_u64(mac_addr);
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cfg |= ((u64)lmac_id << 49);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
@ -398,7 +416,7 @@ int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
cfg &= ~CGX_RX_DMAC_ADR_MASK;
cfg |= mac2u64 (mac_addr);
cfg |= ether_addr_to_u64(mac_addr);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
return 0;
@ -434,7 +452,7 @@ int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
/* Read MAC address to check whether it is ucast or mcast */
cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
cfg2mac(cfg, mac);
u64_to_ether_addr(cfg, mac);
if (is_multicast_ether_addr(mac))
lmac->mcast_filters_count--;
@ -506,7 +524,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
u8 num_lmacs;
u32 fifo_len;
fifo_len = cgx->mac_ops->fifo_len;
fifo_len = cgx->fifo_len;
num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
switch (num_lmacs) {
@ -530,14 +548,15 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u8 lmac_type;
struct lmac *lmac;
u64 cfg;
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
lmac = lmac_pdata(lmac_id, cgx);
if (lmac->lmac_type == LMAC_MODE_SGMII ||
lmac->lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
@ -559,15 +578,16 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx);
u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
u16 max_dmac;
int index, i;
u64 cfg = 0;
int id;
if (!cgx)
if (!cgx || !lmac)
return;
max_dmac = lmac->mac_to_index_bmap.max;
id = get_sequence_id_of_lmac(cgx, lmac_id);
mac_ops = cgx->mac_ops;
@ -706,6 +726,30 @@ u64 cgx_features_get(void *cgxd)
return ((struct cgx *)cgxd)->hw_features;
}
int cgx_stats_reset(void *cgxd, int lmac_id)
{
struct cgx *cgx = cgxd;
int stat_id;
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
/* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
cgx_write(cgx, 0,
(CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
else
cgx_write(cgx, lmac_id,
(CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
}
for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
return 0;
}
static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
{
if (!linfo->fec)
@ -740,7 +784,7 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
int corr_reg, uncorr_reg;
struct cgx *cgx = cgxd;
if (!cgx || lmac_id >= cgx->lmac_count)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
@ -813,6 +857,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
@ -1223,8 +1272,6 @@ static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
const char *lmac_string;
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
@ -1235,12 +1282,12 @@ static inline void link_status_user_format(u64 lstat,
if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
linfo->lmac_type_id, cgx->cgx_id, lmac_id);
strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type));
return;
}
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id],
sizeof(linfo->lmac_type));
}
/* Hardware event handlers */
@ -1345,7 +1392,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
wake_up_interruptible(&lmac->wq_cmd_cmplt);
wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
@ -1556,6 +1603,23 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
/* Resetting PFC related CSRs */
cfg = 0xff;
cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
if (pf_req_flr)
cgx_lmac_internal_loopback(cgxd, lmac_id, false);
return 0;
}
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free)
{
@ -1675,8 +1739,11 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->lmac_idmap[lmac->lmac_id] = lmac;
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
}
/* Start X2P reset on given MAC block */
cgx->mac_ops->mac_x2p_reset(cgx, true);
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
@ -1722,7 +1789,7 @@ static void cgx_populate_features(struct cgx *cgx)
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
if (is_dev_rpm(cgx))
@ -1742,6 +1809,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
return 0x60;
}
static void cgx_x2p_reset(void *cgxd, bool enable)
{
struct cgx *cgx = cgxd;
int lmac_id;
u64 cfg;
if (enable) {
for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac)
cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false);
usleep_range(1000, 2000);
cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP;
cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
} else {
cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG);
cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP);
cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg);
}
}
static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
cfg |= DATA_PKT_RX_EN;
else
cfg &= ~DATA_PKT_RX_EN;
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
static struct mac_ops cgx_mac_ops = {
.name = "cgx",
.csr_offset = 0,
@ -1771,6 +1877,10 @@ static struct mac_ops cgx_mac_ops = {
.mac_tx_enable = cgx_lmac_tx_enable,
.pfc_config = cgx_lmac_pfc_config,
.mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
.mac_reset = cgx_lmac_reset,
.mac_stats_reset = cgx_stats_reset,
.mac_x2p_reset = cgx_x2p_reset,
.mac_enadis_rx = cgx_enadis_rx,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)

View File

@ -32,9 +32,14 @@
#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
#define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMR_GLOBAL_CONFIG 0x08
#define CGX_NIX0_RESET BIT_ULL(2)
#define CGX_NIX1_RESET BIT_ULL(3)
#define CGX_NSCI_DROP BIT_ULL(9)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LOGL_XON 0x100
#define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
@ -109,6 +114,7 @@ enum LMAC_TYPE {
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
LMAC_MODE_USGMII = 11,
LMAC_MODE_MAX,
};
@ -139,6 +145,7 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_stats_reset(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
@ -181,4 +188,6 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
u8 *rx_pause);
int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
int pfvf_idx);
int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr);
u32 cgx_get_fifo_len(void *cgxd);
#endif /* CGX_H */

View File

@ -142,15 +142,24 @@ enum nix_scheduler {
#define TXSCH_RR_QTM_MAX ((1 << 24) - 1)
#define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX
#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
#define TXSCH_TL1_DFLT_RR_PRIO (0x7ull)
#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */
/* Don't change the order as on CN10K (except CN10KB)
* SMQX_CFG[SDP] value should be 1 for SDP flows.
*/
#define SMQ_LINK_TYPE_RPM 0
#define SMQ_LINK_TYPE_SDP 1
#define SMQ_LINK_TYPE_LBK 2
/* Min/Max packet sizes, excluding FCS */
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
#define SDP_HW_MIN_FRS 16
#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
#define SDP_LINK_CREDIT 0x320202
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)

View File

@ -24,6 +24,7 @@
* @cgx: parent cgx port
* @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
* @lmac_type: lmac type like SGMII/XAUI
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
* @name: lmac port name
@ -43,6 +44,7 @@ struct lmac {
struct cgx *cgx;
u8 mcast_filters_count;
u8 lmac_id;
u8 lmac_type;
bool cmd_pend;
char *name;
};
@ -70,7 +72,6 @@ struct mac_ops {
u8 irq_offset;
u8 int_ena_bit;
u8 lmac_fwi;
u32 fifo_len;
bool non_contiguous_serdes_lane;
/* RPM & CGX differs in number of Receive/transmit stats */
u8 rx_stats_cnt;
@ -125,10 +126,14 @@ struct mac_ops {
int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int (*mac_reset)(void *cgxd, int lmac_id, u8 pf_req_flr);
/* FEC stats */
int (*get_fec_stats)(void *cgxd, int lmac_id,
struct cgx_fec_stats_rsp *rsp);
int (*mac_stats_reset)(void *cgxd, int lmac_id);
void (*mac_x2p_reset)(void *cgxd, bool enable);
int (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
@ -138,6 +143,10 @@ struct cgx {
u8 lmac_count;
/* number of LMACs per MAC could be 4 or 8 */
u8 max_lmac_per_mac;
/* length of fifo varies depending on the number
* of LMACS
*/
u32 fifo_len;
#define MAX_LMAC_COUNT 8
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;

View File

@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
}
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
spin_unlock(&mdev->mbox_lock);
/* Check if interrupt pending */
intr_val = readq((void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
intr_val |= data;
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
writeq(1, (void __iomem *)mbox->reg_base +
writeq(intr_val, (void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
}
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
}
EXPORT_SYMBOL(otx2_mbox_msg_send);
void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
{
otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
}
EXPORT_SYMBOL(otx2_mbox_msg_send_up);
bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
{
u64 data;
data = readq((void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
/* If data is non-zero wait for ~1ms and return to caller
* whether data has changed to zero or not after the wait.
*/
if (!data)
return true;
usleep_range(950, 1000);
data = readq((void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
return data == 0;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp)
{
@ -413,4 +452,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");

View File

@ -16,6 +16,9 @@
#define MBOX_SIZE SZ_64K
#define MBOX_DOWN_MSG 1
#define MBOX_UP_MSG 2
/* AF/PF: PF initiated, PF/VF VF initiated */
#define MBOX_DOWN_RX_START 0
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
/* Mailbox message types */
#define MBOX_MSG_MASK 0xFFFF
#define MBOX_MSG_INVALID 0xFFFE
@ -133,9 +139,14 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \
M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \
M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@ -167,6 +178,7 @@ M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
cgx_set_link_mode_rsp) \
M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
@ -197,6 +209,8 @@ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
cpt_flt_eng_info_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@ -233,7 +247,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
npc_install_flow_req, npc_install_flow_rsp) \
M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
npc_delete_flow_req, msg_rsp) \
npc_delete_flow_req, npc_delete_flow_rsp) \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
@ -299,6 +313,20 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \
msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
nix_mcast_grp_create_rsp) \
M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \
msg_rsp) \
M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@ -360,12 +388,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
#define MBOX_UP_MCS_MESSAGES \
M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
#define MBOX_UP_REP_MESSAGES \
M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
MBOX_UP_MCS_MESSAGES
MBOX_UP_REP_MESSAGES
#undef M
};
@ -825,6 +857,11 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
NIX_AF_ERR_INVALID_BPID = -434,
NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
};
/* For NIX RX vtag action */
@ -1076,6 +1113,8 @@ struct nix_vtag_config_rsp {
*/
};
#define NIX_FLOW_KEY_TYPE_L3_L4_MASK (~(0xf << 28))
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
@ -1097,10 +1136,15 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
#define NIX_FLOW_KEY_TYPE_AH BIT(22)
#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
#define NIX_FLOW_KEY_TYPE_L4_DST_ONLY BIT(28)
#define NIX_FLOW_KEY_TYPE_L4_SRC_ONLY BIT(29)
#define NIX_FLOW_KEY_TYPE_L3_DST_ONLY BIT(30)
#define NIX_FLOW_KEY_TYPE_L3_SRC_ONLY BIT(31)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@ -1147,6 +1191,7 @@ struct nix_rx_cfg {
struct mbox_msghdr hdr;
#define NIX_RX_OL3_VERIFY BIT(0)
#define NIX_RX_OL4_VERIFY BIT(1)
#define NIX_RX_DROP_RE BIT(2)
u8 len_verify; /* Outer L3/L4 len check */
#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
u8 csum_verify; /* Outer L4 checksum verification */
@ -1182,23 +1227,83 @@ struct nix_bp_cfg_req {
/* bpid_per_chan = 1 assigns separate bp id for each channel */
};
/* PF can be mapped to either CGX or LBK interface,
* so maximum 64 channels are possible.
*/
#define NIX_MAX_BPID_CHAN 64
/* Maximum channels any single NIX interface can have */
#define NIX_MAX_BPID_CHAN 256
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
struct nix_mcast_grp_create_req {
struct mbox_msghdr hdr;
#define NIX_MCAST_INGRESS 0
#define NIX_MCAST_EGRESS 1
u8 dir;
u8 reserved[11];
/* Reserving few bytes for future requirement */
};
struct nix_mcast_grp_create_rsp {
struct mbox_msghdr hdr;
/* This mcast_grp_idx should be passed during MCAM
* write entry for multicast. AF will identify the
* corresponding multicast table index associated
* with the group id and program the same to MCAM entry.
* This group id is also needed during group delete
* and update request.
*/
u32 mcast_grp_idx;
};
struct nix_mcast_grp_destroy_req {
struct mbox_msghdr hdr;
/* Group id returned by nix_mcast_grp_create_rsp */
u32 mcast_grp_idx;
/* If AF is requesting for destroy, then set
* it to '1'. Otherwise keep it to '0'
*/
u8 is_af;
};
struct nix_mcast_grp_update_req {
struct mbox_msghdr hdr;
/* Group id returned by nix_mcast_grp_create_rsp */
u32 mcast_grp_idx;
/* Number of multicast/mirror entries requested */
u32 num_mce_entry;
#define NIX_MCE_ENTRY_MAX 64
#define NIX_RX_RQ 0
#define NIX_RX_RSS 1
/* Receive queue or RSS index within pf_func */
u32 rq_rss_index[NIX_MCE_ENTRY_MAX];
/* pcifunc is required for both ingress and egress multicast */
u16 pcifunc[NIX_MCE_ENTRY_MAX];
/* channel is required for egress multicast */
u16 channel[NIX_MCE_ENTRY_MAX];
#define NIX_MCAST_OP_ADD_ENTRY 0
#define NIX_MCAST_OP_DEL_ENTRY 1
/* Destination type. 0:Receive queue, 1:RSS*/
u8 dest_type[NIX_MCE_ENTRY_MAX];
u8 op;
/* If AF is requesting for update, then set
* it to '1'. Otherwise keep it to '0'
*/
u8 is_af;
};
struct nix_mcast_grp_update_rsp {
struct mbox_msghdr hdr;
u32 mce_start_index;
};
/* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
u32 cpt_credit;
struct {
u8 egrp;
u8 opcode;
u16 opcode;
u16 param1;
u16 param2;
} gen_cfg;
@ -1207,6 +1312,8 @@ struct nix_inline_ipsec_cfg {
u8 cpt_slot;
} inst_qsel;
u8 enable;
u16 bpid;
u32 credit_th;
};
/* Per NIX LF inline IPSec configuration */
@ -1233,7 +1340,9 @@ struct nix_hw_info {
u16 min_mtu;
u32 rpm_dwrr_mtu;
u32 sdp_dwrr_mtu;
u64 rsvd[16]; /* Add reserved fields for future expansion */
u32 lbk_dwrr_mtu;
u32 rsvd32[1];
u64 rsvd[15]; /* Add reserved fields for future expansion */
};
struct nix_bandprof_alloc_req {
@ -1267,6 +1376,37 @@ struct nix_bandprof_get_hwinfo_rsp {
u32 policer_timeunit;
};
struct nix_stats_req {
struct mbox_msghdr hdr;
u8 reset;
u16 pcifunc;
u64 rsvd;
};
struct nix_stats_rsp {
struct mbox_msghdr hdr;
u16 pcifunc;
struct {
u64 octs;
u64 ucast;
u64 bcast;
u64 mcast;
u64 drop;
u64 drop_octs;
u64 drop_mcast;
u64 drop_bcast;
u64 err;
u64 rsvd[5];
} rx;
struct {
u64 ucast;
u64 bcast;
u64 mcast;
u64 drop;
u64 octs;
} tx;
};
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@ -1422,6 +1562,47 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
struct ptp_get_cap_rsp {
struct mbox_msghdr hdr;
#define PTP_CAP_HW_ATOMIC_UPDATE BIT_ULL(0)
u64 cap;
};
struct get_rep_cnt_rsp {
struct mbox_msghdr hdr;
u16 rep_cnt;
u16 rep_pf_map[64];
u64 rsvd;
};
struct esw_cfg_req {
struct mbox_msghdr hdr;
u8 ena;
u64 rsvd;
};
struct rep_evt_data {
u8 port_state;
u8 vf_state;
u16 rx_mode;
u16 rx_flags;
u16 mtu;
u8 mac[ETH_ALEN];
u64 rsvd[5];
};
struct rep_event {
struct mbox_msghdr hdr;
u16 pcifunc;
#define RVU_EVENT_PORT_STATE BIT_ULL(0)
#define RVU_EVENT_PFVF_STATE BIT_ULL(1)
#define RVU_EVENT_MTU_CHANGE BIT_ULL(2)
#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3)
#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4)
u16 event;
struct rep_evt_data evt_data;
};
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
@ -1436,6 +1617,10 @@ struct flow_msg {
__be32 ip4dst;
__be32 ip6dst[4];
};
union {
__be32 spi;
};
u8 tos;
u8 ip_ver;
u8 ip_proto;
@ -1446,6 +1631,17 @@ struct flow_msg {
u8 ip_flag;
u8 next_header;
};
__be16 vlan_itci;
#define OTX2_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
#define OTX2_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
#define OTX2_FLOWER_MASK_MPLS_BOS BIT(8)
#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
u32 mpls_lse[4];
u8 icmp_type;
u8 icmp_code;
__be16 tcp_flags;
u16 sq_id;
};
struct npc_install_flow_req {
@ -1476,6 +1672,8 @@ struct npc_install_flow_req {
u8 vtag0_op;
u16 vtag1_def;
u8 vtag1_op;
/* old counter value */
u16 cntr_val;
};
struct npc_install_flow_rsp {
@ -1491,6 +1689,11 @@ struct npc_delete_flow_req {
u8 all; /* PF + VFs */
};
struct npc_delete_flow_rsp {
struct mbox_msghdr hdr;
u16 cntr_val;
};
struct npc_mcam_read_entry_req {
struct mbox_msghdr hdr;
u16 entry; /* MCAM entry to read */
@ -1540,7 +1743,9 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
PTP_OP_EXTTS_ON = 4,
PTP_OP_PPS_ON = 4,
PTP_OP_ADJTIME = 5,
PTP_OP_SET_CLOCK = 6,
};
struct ptp_req {
@ -1548,12 +1753,16 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
int extts_on;
u64 period;
int pps_on;
s64 delta;
u64 clk;
};
struct ptp_rsp {
struct mbox_msghdr hdr;
u64 clk;
u64 tsc;
};
struct npc_get_field_status_req {
@ -1587,6 +1796,13 @@ struct lmtst_tbl_setup_req {
u64 rsvd[4];
};
struct ndc_sync_op {
struct mbox_msghdr hdr;
u8 nix_lf_tx_sync;
u8 nix_lf_rx_sync;
u8 npa_lf_sync;
};
/* CPT mailbox error codes
* Range 901 - 1000.
*/
@ -1616,7 +1832,9 @@ struct cpt_lf_alloc_req_msg {
u16 nix_pf_func;
u16 sso_pf_func;
u16 eng_grpmsk;
int blkaddr;
u8 blkaddr;
u8 ctx_ilen_valid : 1;
u8 ctx_ilen : 7;
};
#define CPT_INLINE_INBOUND 0
@ -1707,6 +1925,22 @@ struct cpt_lf_rst_req {
u32 rsvd;
};
/* Mailbox message format to request for CPT faulted engines */
struct cpt_flt_eng_info_req {
struct mbox_msghdr hdr;
int blkaddr;
bool reset;
u32 rsvd;
};
struct cpt_flt_eng_info_rsp {
struct mbox_msghdr hdr;
#define CPT_AF_MAX_FLT_INT_VECS 3
u64 flt_eng_map[CPT_AF_MAX_FLT_INT_VECS];
u64 rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS];
u64 rsvd;
};
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
@ -1882,7 +2116,7 @@ struct mcs_hw_info {
u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
u8 secy_entries; /* RX/TX SECY entries per mcs block */
u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
u8 sa_entries; /* PN table entries = SA entries */
u16 sa_entries; /* PN table entries = SA entries */
u64 rsvd[16];
};

View File

@ -117,7 +117,7 @@ void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(id);
stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
@ -215,7 +215,7 @@ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(id);
stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
if (mcs->hw->mcs_blks > 1) {
@ -1219,6 +1219,17 @@ struct mcs *mcs_get_pdata(int mcs_id)
return NULL;
}
bool is_mcs_bypass(int mcs_id)
{
struct mcs *mcs_dev;
list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
if (mcs_dev->mcs_id == mcs_id)
return mcs_dev->bypass;
}
return true;
}
void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
{
u64 val = 0;
@ -1436,7 +1447,7 @@ static int mcs_x2p_calibration(struct mcs *mcs)
return err;
}
static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
static void mcs_set_external_bypass(struct mcs *mcs, bool bypass)
{
u64 val;
@ -1447,6 +1458,7 @@ static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
else
val &= ~BIT_ULL(6);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
mcs->bypass = bypass;
}
static void mcs_global_cfg(struct mcs *mcs)

View File

@ -149,6 +149,7 @@ struct mcs {
u16 num_vec;
void *rvu;
u16 *tx_sa_active;
bool bypass;
};
struct mcs_ops {
@ -206,6 +207,7 @@ void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *
int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
bool is_mcs_bypass(int mcs_id);
/* CN10K-B APIs */
void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);

View File

@ -810,14 +810,37 @@
offset = 0x9d8ull; \
offset; })
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDX(a) ({ \
u64 offset; \
\
offset = 0xee80ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0xe818ull; \
offset += (a) * 0x8ull; \
offset; })
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) ({ \
u64 offset; \
\
offset = 0xa680ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0xd018ull; \
offset += (a) * 0x8ull; \
offset; })
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) ({ \
u64 offset; \
\
offset = 0xf680ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0xe018ull; \
offset += (a) * 0x8ull; \
offset; })
#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)

View File

@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
int err, pf;
int pf;
pf = rvu_get_pf(event->pcifunc);
mutex_lock(&rvu->mbox_lock);
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
if (!req)
if (!req) {
mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
}
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
if (err)
dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
mutex_unlock(&rvu->mbox_lock);
return 0;
}

View File

@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CUSTOM1 = 0xF,
};
/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
* headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
* differ only at bit 0 so mask 0xE can be used to detect extended headers.
*/
enum npc_kpu_lc_ltype {
NPC_LT_LC_IP = 1,
NPC_LT_LC_PTP = 1,
NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
@ -85,8 +89,7 @@ enum npc_kpu_lc_ltype {
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_SCTP = 4,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
@ -97,6 +100,7 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
NPC_LT_LD_ICMP,
};
enum npc_kpu_le_ltype {
@ -140,14 +144,14 @@ enum npc_kpu_lg_ltype {
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
NPC_LT_LH_TU_ICMP,
NPC_LT_LH_TU_SCTP,
NPC_LT_LH_TU_SCTP = 4,
NPC_LT_LH_TU_ICMP6,
NPC_LT_LH_CUSTOM0,
NPC_LT_LH_CUSTOM1,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
NPC_LT_LH_CUSTOM0 = 0xE,
NPC_LT_LH_CUSTOM1 = 0xF,
NPC_LT_LH_TU_ICMP = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
@ -155,10 +159,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
NPC_RX_CPT_HDR_PTP_PKIND = 54ULL,
NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
@ -184,6 +189,7 @@ enum key_fields {
NPC_VLAN_ETYPE_CTAG, /* 0x8100 */
NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
NPC_OUTER_VID,
NPC_INNER_VID,
NPC_TOS,
NPC_IPFRAG_IPV4,
NPC_SIP_IPV4,
@ -204,6 +210,18 @@ enum key_fields {
NPC_DPORT_UDP,
NPC_SPORT_SCTP,
NPC_DPORT_SCTP,
NPC_IPSEC_SPI,
NPC_MPLS1_LBTCBOS,
NPC_MPLS1_TTL,
NPC_MPLS2_LBTCBOS,
NPC_MPLS2_TTL,
NPC_MPLS3_LBTCBOS,
NPC_MPLS3_TTL,
NPC_MPLS4_LBTCBOS,
NPC_MPLS4_TTL,
NPC_TYPE_ICMP,
NPC_CODE_ICMP,
NPC_TCP_FLAGS,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
@ -229,6 +247,8 @@ enum key_fields {
NPC_VLAN_TAG1,
/* outer vlan tci for double tagged frame */
NPC_VLAN_TAG2,
/* inner vlan tci for double tagged frame */
NPC_VLAN_TAG3,
/* other header fields programmed to extract but not of our interest */
NPC_UNKNOWN,
NPC_KEY_FIELDS_MAX,
@ -516,7 +536,7 @@ struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
u8 lid;
};
} __packed;
struct npc_lt_def_ipsec {
u8 ltype_mask;
@ -524,7 +544,7 @@ struct npc_lt_def_ipsec {
u8 lid;
u8 spi_offset;
u8 spi_nz;
};
} __packed;
struct npc_lt_def_apad {
u8 ltype_mask;

File diff suppressed because it is too large Load Diff

View File

@ -12,8 +12,8 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
#include "ptp.h"
#include "rvu.h"
#define DRV_NAME "Marvell PTP Driver"
@ -40,11 +40,13 @@
#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
#define PTP_CLOCK_CFG_ATOMIC_OP_MASK GENMASK_ULL(28, 26)
#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
#define PTP_PPS_HI_INCR 0xF60ULL
#define PTP_PPS_LO_INCR 0xF68ULL
#define PTP_PPS_THRESH_LO 0xF50ULL
#define PTP_PPS_THRESH_HI 0xF58ULL
#define PTP_CLOCK_LO 0xF08ULL
@ -53,36 +55,62 @@
#define PTP_TIMESTAMP 0xF20ULL
#define PTP_CLOCK_SEC 0xFD0ULL
#define PTP_SEC_ROLLOVER 0xFD8ULL
/* Atomic update related CSRs */
#define PTP_FRNS_TIMESTAMP 0xFE0ULL
#define PTP_NXT_ROLLOVER_SET 0xFE8ULL
#define PTP_CURR_ROLLOVER_SET 0xFF0ULL
#define PTP_NANO_TIMESTAMP 0xFF8ULL
#define PTP_SEC_TIMESTAMP 0x1000ULL
#define CYCLE_MULT 1000
#define is_rev_A0(ptp) (((ptp)->pdev->revision & 0x0F) == 0x0)
#define is_rev_A1(ptp) (((ptp)->pdev->revision & 0x0F) == 0x1)
/* PTP atomic update operation type */
enum atomic_opcode {
ATOMIC_SET = 1,
ATOMIC_INC = 3,
ATOMIC_DEC = 4
};
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
static bool is_ptp_dev_cnf10ka(struct ptp *ptp)
{
return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP;
}
static bool is_ptp_dev_cn10k(struct ptp *ptp)
static bool is_ptp_dev_cn10ka(struct ptp *ptp)
{
return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
return ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP;
}
static bool cn10k_ptp_errata(struct ptp *ptp)
{
if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
(is_rev_A0(ptp) || is_rev_A1(ptp)))
return true;
return false;
}
static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
static bool is_tstmp_atomic_update_supported(struct rvu *rvu)
{
if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
return true;
return false;
struct ptp *ptp = rvu->ptp;
if (is_rvu_otx2(rvu))
return false;
/* On older silicon variants of CN10K, atomic update feature
* is not available.
*/
if ((is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) &&
(is_rev_A0(ptp) || is_rev_A1(ptp)))
return false;
return true;
}
static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
@ -208,7 +236,7 @@ struct ptp *ptp_get(void)
/* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
else
else if (!IS_ERR(ptp))
pci_dev_get(ptp->pdev);
return ptp;
@ -222,6 +250,65 @@ void ptp_put(struct ptp *ptp)
pci_dev_put(ptp->pdev);
}
static void ptp_atomic_update(struct ptp *ptp, u64 timestamp)
{
u64 regval, curr_rollover_set, nxt_rollover_set;
/* First setup NSECs and SECs */
writeq(timestamp, ptp->reg_base + PTP_NANO_TIMESTAMP);
writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
writeq(timestamp / NSEC_PER_SEC,
ptp->reg_base + PTP_SEC_TIMESTAMP);
nxt_rollover_set = roundup(timestamp, NSEC_PER_SEC);
curr_rollover_set = nxt_rollover_set - NSEC_PER_SEC;
writeq(nxt_rollover_set, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
writeq(curr_rollover_set, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
/* Now, initiate atomic update */
regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
regval |= (ATOMIC_SET << 26);
writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
}
static void ptp_atomic_adjtime(struct ptp *ptp, s64 delta)
{
bool neg_adj = false, atomic_inc_dec = false;
u64 regval, ptp_clock_hi;
if (delta < 0) {
delta = -delta;
neg_adj = true;
}
/* use atomic inc/dec when delta < 1 second */
if (delta < NSEC_PER_SEC)
atomic_inc_dec = true;
if (!atomic_inc_dec) {
ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
if (neg_adj) {
if (ptp_clock_hi > delta)
ptp_clock_hi -= delta;
else
ptp_clock_hi = delta - ptp_clock_hi;
} else {
ptp_clock_hi += delta;
}
ptp_atomic_update(ptp, ptp_clock_hi);
} else {
writeq(delta, ptp->reg_base + PTP_NANO_TIMESTAMP);
writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
/* initiate atomic inc/dec */
regval = readq(ptp->reg_base + PTP_CLOCK_CFG);
regval &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
regval |= neg_adj ? (ATOMIC_DEC << 26) : (ATOMIC_INC << 26);
writeq(regval, ptp->reg_base + PTP_CLOCK_CFG);
}
}
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
@ -277,8 +364,9 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk)
return 0;
}
void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
{
struct ptp *ptp = rvu->ptp;
struct pci_dev *pdev;
u64 clock_comp;
u64 clock_cfg;
@ -297,8 +385,14 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
ptp->clock_rate = sclk * 1000000;
/* Program the seconds rollover value to 1 second */
if (is_ptp_dev_cnf10kb(ptp))
if (is_tstmp_atomic_update_supported(rvu)) {
writeq(0, ptp->reg_base + PTP_NANO_TIMESTAMP);
writeq(0, ptp->reg_base + PTP_FRNS_TIMESTAMP);
writeq(0, ptp->reg_base + PTP_SEC_TIMESTAMP);
writeq(0, ptp->reg_base + PTP_CURR_ROLLOVER_SET);
writeq(0x3b9aca00, ptp->reg_base + PTP_NXT_ROLLOVER_SET);
writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
}
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@ -318,24 +412,11 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
}
clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
if (cn10k_ptp_errata(ptp)) {
/* The ptp_clock_hi rollsover to zero once clock cycle before it
* reaches one second boundary. so, program the pps_lo_incr in
* such a way that the pps threshold value comparison at one
* second boundary will succeed and pps edge changes. After each
* one second boundary, the hrtimer handler will be invoked and
* reprograms the pps threshold value.
*/
ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
ptp->reg_base + PTP_PPS_LO_INCR);
}
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
clock_cfg |= (ATOMIC_SET << 26);
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
@ -350,7 +431,7 @@ static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
u64 timestamp;
if (is_ptp_dev_cn10k(ptp)) {
if (is_ptp_dev_cn10ka(ptp) || is_ptp_dev_cnf10ka(ptp)) {
timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
*clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
} else {
@ -368,31 +449,78 @@ static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
return 0;
}
static int ptp_extts_on(struct ptp *ptp, int on)
static int ptp_config_hrtimer(struct ptp *ptp, int on)
{
u64 ptp_clock_hi;
if (cn10k_ptp_errata(ptp)) {
if (on) {
ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
} else {
if (hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
}
if (on) {
ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
} else {
if (hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
}
return 0;
}
static int ptp_pps_on(struct ptp *ptp, int on, u64 period)
{
u64 clock_cfg;
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
if (on) {
if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) {
dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n");
return -EINVAL;
}
if (period > (8 * NSEC_PER_SEC)) {
dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n");
return -EINVAL;
}
clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI);
writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO);
/* Configure high/low phase time */
period = period / 2;
writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR);
writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR);
} else {
clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV);
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
}
if (on && cn10k_ptp_errata(ptp)) {
/* The ptp_clock_hi rollsover to zero once clock cycle before it
* reaches one second boundary. so, program the pps_lo_incr in
* such a way that the pps threshold value comparison at one
* second boundary will succeed and pps edge changes. After each
* one second boundary, the hrtimer handler will be invoked and
* reprograms the pps threshold value.
*/
ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
ptp->reg_base + PTP_PPS_LO_INCR);
}
if (cn10k_ptp_errata(ptp))
ptp_config_hrtimer(ptp, on);
return 0;
}
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
err = -ENOMEM;
goto error;
@ -415,33 +543,30 @@ static int ptp_probe(struct pci_dev *pdev,
first_ptp_block = ptp;
spin_lock_init(&ptp->ptp_lock);
if (is_ptp_tsfmt_sec_nsec(ptp))
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
else
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ptp->hrtimer.function = ptp_reset_thresh;
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
return 0;
error_free:
devm_kfree(dev, ptp);
kfree(ptp);
error:
/* For `ptp_get()` we need to differentiate between the case
* when the core has not tried to probe this device and the case when
* the probe failed. In the later case we pretend that the
* initialization was successful and keep the error in
* the probe failed. In the later case we keep the error in
* `dev->driver_data`.
*/
pci_set_drvdata(pdev, ERR_PTR(err));
if (!first_ptp_block)
first_ptp_block = ERR_PTR(err);
return 0;
return err;
}
static void ptp_remove(struct pci_dev *pdev)
@ -449,16 +574,17 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
if (IS_ERR_OR_NULL(ptp))
return;
if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
/* Disable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
kfree(ptp);
}
static const struct pci_device_id ptp_id_table[] = {
@ -519,8 +645,14 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
case PTP_OP_EXTTS_ON:
err = ptp_extts_on(rvu->ptp, req->extts_on);
case PTP_OP_PPS_ON:
err = ptp_pps_on(rvu->ptp, req->pps_on, req->period);
break;
case PTP_OP_ADJTIME:
ptp_atomic_adjtime(rvu->ptp, req->delta);
break;
case PTP_OP_SET_CLOCK:
ptp_atomic_update(rvu->ptp, (u64)req->clk);
break;
default:
err = -EINVAL;
@ -529,3 +661,17 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
return err;
}
int rvu_mbox_handler_ptp_get_cap(struct rvu *rvu, struct msg_req *req,
struct ptp_get_cap_rsp *rsp)
{
if (!rvu->ptp)
return -ENODEV;
if (is_tstmp_atomic_update_supported(rvu))
rsp->cap |= PTP_CAP_HW_ATOMIC_UPDATE;
else
rsp->cap &= ~BIT_ULL_MASK(0);
return 0;
}

View File

@ -23,9 +23,10 @@ struct ptp {
u32 clock_period;
};
struct rvu;
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;

View File

@ -37,6 +37,10 @@ static struct mac_ops rpm_mac_ops = {
.mac_tx_enable = rpm_lmac_tx_enable,
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
.mac_stats_reset = rpm_stats_reset,
.mac_x2p_reset = rpm_x2p_reset,
.mac_enadis_rx = rpm_enadis_rx,
};
static struct mac_ops rpm2_mac_ops = {
@ -47,7 +51,7 @@ static struct mac_ops rpm2_mac_ops = {
.int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S,
.irq_offset = 1,
.int_ena_bit = BIT_ULL(0),
.lmac_fwi = RPM_LMAC_FWI,
.lmac_fwi = RPM2_LMAC_FWI,
.non_contiguous_serdes_lane = true,
.rx_stats_cnt = 43,
.tx_stats_cnt = 34,
@ -68,6 +72,10 @@ static struct mac_ops rpm2_mac_ops = {
.mac_tx_enable = rpm_lmac_tx_enable,
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
.mac_stats_reset = rpm_stats_reset,
.mac_x2p_reset = rpm_x2p_reset,
.mac_enadis_rx = rpm_enadis_rx,
};
bool is_dev_rpm2(void *rpmd)
@ -353,8 +361,8 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
{
u64 cfg, pfc_class_mask_cfg;
rpm_t *rpm = rpmd;
u64 cfg;
/* ALL pause frames received are completely ignored */
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
@ -371,6 +379,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
/* Disable forward pause to driver */
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
/* Enable channel mask for all LMACS */
if (is_dev_rpm2(rpm))
rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
@ -378,9 +391,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
/* Disable all PFC classes */
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
RPMX_CMRX_PRT_CBFC_CTL;
cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@ -434,6 +449,21 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
return 0;
}
int rpm_stats_reset(void *rpmd, int lmac_id)
{
rpm_t *rpm = rpmd;
u64 cfg;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
cfg |= RPMX_CMD_CLEAR_TX | RPMX_CMD_CLEAR_RX | BIT_ULL(lmac_id);
rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
return 0;
}
u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
{
rpm_t *rpm = rpmd;
@ -441,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
int err;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id);
if (!err)
return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
return err;
@ -454,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
u8 num_lmacs;
u32 fifo_len;
fifo_len = rpm->mac_ops->fifo_len;
fifo_len = rpm->fifo_len;
num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
switch (num_lmacs) {
@ -497,6 +527,7 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
rpm_t *rpm = rpmd;
u8 num_lmacs;
u32 fifo_len;
u16 max_lmac;
lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
/* LMACs are divided into two groups and each group
@ -504,7 +535,11 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
* Group0 lmac_id range {0..3}
* Group1 lmac_id range {4..7}
*/
fifo_len = rpm->mac_ops->fifo_len / 2;
max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF;
if (max_lmac > 4)
fifo_len = rpm->fifo_len / 2;
else
fifo_len = rpm->fifo_len;
if (lmac_id < 4) {
num_lmacs = hweight8(lmac_info & 0xF);
@ -537,14 +572,15 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
u8 lmac_type;
struct lmac *lmac;
u64 cfg;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
lmac = lmac_pdata(lmac_id, rpm);
if (lmac->lmac_type == LMAC_MODE_QSGMII ||
lmac->lmac_type == LMAC_MODE_SGMII) {
dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
return 0;
}
@ -602,18 +638,19 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
RPMX_CMRX_PRT_CBFC_CTL;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
} else {
cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
}
if (tx_pause) {
@ -632,10 +669,6 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
RPMX_CMRX_PRT_CBFC_CTL;
rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
return 0;
@ -670,46 +703,110 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
return 0;
/* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common
* for all counters. Acquire lock to ensure serialized reads
*/
mutex_lock(&rpm->lock);
if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id));
val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_corr_blks = (val_hi << 16 | val_lo);
val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id));
val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
/* 50G uses 2 Physical serdes lines */
if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
LMAC_MODE_50G_R) {
val_lo = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_VL1_CCW_LO);
val_hi = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_CW_HI);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id));
val_hi = rpm_read(rpm, 0,
RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_corr_blks += (val_hi << 16 | val_lo);
val_lo = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_VL1_NCCW_LO);
val_hi = rpm_read(rpm, lmac_id,
RPMX_MTI_FCFECX_CW_HI);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id));
val_hi = rpm_read(rpm, 0,
RPMX_MTI_FCFECX_CW_HI(lmac_id));
rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
}
} else {
/* enable RS-FEC capture */
cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL);
cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
rsp->fec_corr_blks = (val_hi << 32 | val_lo);
val_lo = rpm_read(rpm, 0,
RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC);
rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
}
mutex_unlock(&rpm->lock);
return 0;
}
int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr)
{
u64 rx_logl_xon, cfg;
rpm_t *rpm = rpmd;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
/* Resetting PFC related CSRs */
rx_logl_xon = is_dev_rpm2(rpm) ? RPM2_CMRX_RX_LOGL_XON :
RPMX_CMRX_RX_LOGL_XON;
cfg = 0xff;
rpm_write(rpm, lmac_id, rx_logl_xon, cfg);
if (pf_req_flr)
rpm_lmac_internal_loopback(rpm, lmac_id, false);
return 0;
}
void rpm_x2p_reset(void *rpmd, bool enable)
{
rpm_t *rpm = rpmd;
int lmac_id;
u64 cfg;
if (enable) {
for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac)
rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false);
usleep_range(1000, 2000);
cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET);
} else {
cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG);
cfg &= ~RPM_NIX0_RESET;
rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg);
}
}
int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
u64 cfg;
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
if (enable)
cfg |= RPM_RX_EN;
else
cfg &= ~RPM_RX_EN;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
return 0;
}

View File

@ -17,6 +17,8 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_CMR_GLOBAL_CFG 0x08
#define RPM_NIX0_RESET BIT_ULL(3)
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_RX_ID_MAP 0x80
@ -74,6 +76,7 @@
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
#define RPM_DEFAULT_PAUSE_TIME 0x7FF
#define RPMX_CMRX_RX_LOGL_XON 0x4100
#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
@ -83,18 +86,23 @@
/* FEC stats */
#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(28)
#define RPMX_CMD_CLEAR_RX BIT_ULL(30)
#define RPMX_CMD_CLEAR_TX BIT_ULL(31)
#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL 0x40018
#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC 0x40000
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
#define RPMX_MTI_FCFECX_CW_HI 0x38638
#define RPMX_MTI_FCFECX_VL0_CCW_LO(a) (0x38618 + ((a) * 0x40))
#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a) (0x38620 + ((a) * 0x40))
#define RPMX_MTI_FCFECX_VL1_CCW_LO(a) (0x38628 + ((a) * 0x40))
#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a) (0x38630 + ((a) * 0x40))
#define RPMX_MTI_FCFECX_CW_HI(a) (0x38638 + ((a) * 0x40))
/* CN10KB CSR Declaration */
#define RPM2_CMRX_SW_INT 0x1b0
#define RPM2_CMRX_SW_INT_ENA_W1S 0x1b8
#define RPM2_CMRX_SW_INT_ENA_W1S 0x1c8
#define RPM2_LMAC_FWI 0x12
#define RPM2_CMR_CHAN_MSK_OR 0x3120
#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2)
#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1)
@ -131,4 +139,8 @@ int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
int rpm2_get_nr_lmacs(void *rpmd);
bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
int rpm_stats_reset(void *rpmd, int lmac_id);
void rpm_x2p_reset(void *rpmd, bool enable);
int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */

View File

@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
return start;
}
static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
{
if (!rsrc->bmap)
return;
@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu)
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
@ -935,6 +937,9 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
hw->total_vfs = (cfg >> 20) & 0xFFF;
hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
if (!is_rvu_otx2(rvu))
rvu_apr_block_cn10k_init(rvu);
/* Init NPA LF's bitmap */
block = &hw->block[BLKADDR_NPA];
if (!block->implemented)
@ -1157,6 +1162,7 @@ cpt:
}
rvu_program_channels(rvu);
cgx_start_linkup(rvu);
err = rvu_mcs_init(rvu);
if (err) {
@ -1164,8 +1170,16 @@ cpt:
goto nix_err;
}
err = rvu_cpt_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
goto mcs_err;
}
return 0;
mcs_err:
rvu_mcs_exit(rvu);
nix_err:
rvu_nix_freemem(rvu);
npa_err:
@ -1473,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
blkaddr = pf->nix_blkaddr;
} else if (is_afvf(pcifunc)) {
} else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
/* Assign NIX based on VF number. All even numbered VFs get
* NIX0 and odd numbered gets NIX1
@ -1630,7 +1644,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
if (req->ssow > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid SSOW req, %d > max %d\n",
pcifunc, req->sso, block->lf.max);
pcifunc, req->ssow, block->lf.max);
return -EINVAL;
}
mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
@ -2001,6 +2015,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
{
/* Sync cached info for this LF in NDC to LLC/DRAM */
rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
}
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@ -2023,7 +2044,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
u16 target;
/* Only PF can add VF permissions */
if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
return -EOPNOTSUPP;
target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
@ -2055,6 +2076,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
return 0;
}
int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
struct ndc_sync_op *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int err, lfidx, lfblkaddr;
if (req->npa_lf_sync) {
/* Get NPA LF data */
lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (lfblkaddr < 0)
return NPA_AF_ERR_AF_LF_INVALID;
lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
if (lfidx < 0)
return NPA_AF_ERR_AF_LF_INVALID;
/* Sync NPA NDC */
err = rvu_ndc_sync(rvu, lfblkaddr,
lfidx, NPA_AF_NDC_SYNC);
if (err)
dev_err(rvu->dev,
"NDC-NPA sync failed for LF %u\n", lfidx);
}
if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
return 0;
/* Get NIX LF data */
lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (lfblkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
if (lfidx < 0)
return NIX_AF_ERR_AF_LF_INVALID;
if (req->nix_lf_tx_sync) {
/* Sync NIX TX NDC */
err = rvu_ndc_sync(rvu, lfblkaddr,
lfidx, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev,
"NDC-NIX-TX sync fail for LF %u\n", lfidx);
}
if (req->nix_lf_rx_sync) {
/* Sync NIX RX NDC */
err = rvu_ndc_sync(rvu, lfblkaddr,
lfidx, NIX_AF_NDC_RX_SYNC);
if (err)
dev_err(rvu->dev,
"NDC-NIX-RX sync failed for LF %u\n", lfidx);
}
return 0;
}
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@ -2106,7 +2186,7 @@ bad_message:
}
}
static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
{
struct rvu *rvu = mwork->rvu;
int offset, err, id, devid;
@ -2173,6 +2253,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
}
mw->mbox_wrk[devid].num_msgs = 0;
if (poll)
otx2_mbox_wait_for_zero(mbox, devid);
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
}
@ -2180,15 +2263,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
__rvu_mbox_handler(mwork, TYPE_AFPF);
mutex_lock(&rvu->mbox_lock);
__rvu_mbox_handler(mwork, TYPE_AFPF, true);
mutex_unlock(&rvu->mbox_lock);
}
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
__rvu_mbox_handler(mwork, TYPE_AFVF);
__rvu_mbox_handler(mwork, TYPE_AFVF, false);
}
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
@ -2363,6 +2449,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
mutex_init(&rvu->mbox_lock);
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
@ -2392,9 +2480,9 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
}
mw->mbox_wq = alloc_workqueue(name,
mw->mbox_wq = alloc_workqueue("%s",
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
num);
num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
goto unmap_regions;
@ -2617,6 +2705,13 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 2. Flush and reset SSO/SSOW
* 3. Cleanup pools (NPA)
*/
/* Free allocated BPIDs */
rvu_nix_flr_free_bpids(rvu, pcifunc);
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
@ -2632,6 +2727,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* Since LF is detached use LF number as -1.
*/
rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
rvu_mac_reset(rvu, pcifunc);
if (rvu->mcs_blk_cnt)
rvu_mcs_flr_handler(rvu, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
@ -3143,6 +3242,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
int err, chans, vfs;
int pos = 0;
if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
dev_warn(&pdev->dev,
@ -3150,6 +3250,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
/* Get RVU VFs device id */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (!pos)
return 0;
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
@ -3254,7 +3360,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu->ptp = ptp_get();
if (IS_ERR(rvu->ptp)) {
err = PTR_ERR(rvu->ptp);
if (err == -EPROBE_DEFER)
if (err)
goto err_release_regions;
rvu->ptp = NULL;
}
@ -3324,7 +3430,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&rvu->rswitch.switch_lock);
if (rvu->fwdata)
ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
rvu->fwdata->ptp_ext_tstamp);
return 0;

View File

@ -17,14 +17,17 @@
#include "mbox.h"
#include "npc.h"
#include "rvu_reg.h"
#include "ptp.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_98XX 0xB100
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
@ -73,6 +76,7 @@ struct rvu_debugfs {
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
struct dump_ctx nix_tm_ctx;
struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
int npa_qsize_id;
int nix_qsize_id;
@ -108,14 +112,17 @@ struct rvu_block {
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
struct rvu *rvu;
u64 cpt_flt_eng_map[3];
u64 cpt_rcvrd_eng_map[3];
};
struct nix_mcast {
struct qmem *mce_ctx;
struct qmem *mcast_buf;
int replay_pkind;
int next_free_mce;
struct mutex mce_lock; /* Serialize MCE updates */
struct qmem *mce_ctx;
struct qmem *mcast_buf;
int replay_pkind;
struct rsrc_bmap mce_counter[2];
/* Counters for both ingress and egress mcast lists */
struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@ -124,6 +131,23 @@ struct nix_mce_list {
int max;
};
struct nix_mcast_grp_elem {
struct nix_mce_list mcast_mce_list;
u32 mcast_grp_idx;
u32 pcifunc;
int mcam_index;
int mce_start_index;
struct list_head list;
u8 dir;
};
struct nix_mcast_grp {
struct list_head mcast_grp_head;
int count;
int next_grp_index;
struct mutex mcast_grp_lock; /* Serialize MCE updates */
};
/* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata {
u8 lid;
@ -265,6 +289,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
struct nix_bp {
struct rsrc_bmap bpids; /* free bpids bitmap */
u16 cgx_bpid_cnt;
u16 sdp_bpid_cnt;
u16 free_pool_base;
u16 *fn_map; /* pcifunc mapping */
u8 *intf_map; /* interface type map */
u8 *ref_cnt;
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@ -283,6 +317,21 @@ struct nix_mark_format {
u32 *cfg;
};
/* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx {
u16 schq;
u64 cir_off;
u64 cir_val;
u64 pir_off;
u64 pir_val;
};
/* smq flush context */
struct nix_smq_flush_ctx {
int smq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
};
struct npc_pkind {
struct rsrc_bmap rsrc;
u32 *pfchan_map;
@ -318,12 +367,15 @@ struct nix_hw {
struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
struct nix_mcast_grp mcast_grp;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
struct nix_bp bp;
u64 *tx_credits;
u8 cc_mcs_cnt;
};
/* RVU block's capabilities or functionality,
@ -344,8 +396,10 @@ struct hw_cap {
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */
bool npc_hash_extract; /* Hash extract enabled ? */
bool npc_exact_match_enabled; /* Exact match supported ? */
bool cpt_rxc; /* Is CPT-RXC supported */
};
struct rvu_hwinfo {
@ -390,6 +444,13 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
struct channel_fwdata {
struct sdp_node_info info;
u8 valid;
#define RVU_CHANL_INFO_RESERVED 379
u8 reserved[RVU_CHANL_INFO_RESERVED];
};
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@ -408,11 +469,13 @@ struct rvu_fwdata {
u64 msixtr_base;
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
#define FWDATA_RESERVED_MEM 1022
struct channel_fwdata channel_data;
#define FWDATA_RESERVED_MEM 958
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
#define CGX_LMACS_USX 8
#define FWDATA_CGX_LMAC_OFFSET 10536
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
@ -450,6 +513,11 @@ struct rvu_switch {
u16 start_entry;
};
struct rep_evtq_ent {
struct list_head node;
struct rep_event event;
};
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@ -459,7 +527,9 @@ struct rvu {
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
u16 vf_devid; /* VF devices id */
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@ -510,6 +580,7 @@ struct rvu {
struct ptp *ptp;
int mcs_blk_cnt;
int cpt_pf_num;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
@ -524,6 +595,19 @@ struct rvu {
struct list_head mcs_intrq_head;
/* mcs interrupt queue lock */
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
struct mutex mbox_lock; /* Serialize mbox up and down msgs */
u16 rep_pcifunc;
int rep_cnt;
u16 *rep2pfvf_map;
u8 rep_mode;
struct work_struct rep_evt_work;
struct workqueue_struct *rep_evt_wq;
struct list_head rep_evtq_head;
/* Representor event lock */
spinlock_t rep_evtq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@ -546,6 +630,17 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
{
/* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of
* write operation.
*/
rvu_write64(rvu, block, offset, val);
rvu_read64(rvu, block, offset);
/* Barrier to ensure read completes before accessing LF registers */
mb();
}
/* Silicon revisions */
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
{
@ -599,6 +694,45 @@ static inline bool is_rvu_otx2(struct rvu *rvu)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
static inline bool is_cnf10ka_a0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A &&
(pdev->revision & 0x0F) == 0x0)
return true;
return false;
}
static inline bool is_cn10ka_a0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
(pdev->revision & 0x0F) == 0x0)
return true;
return false;
}
static inline bool is_cn10ka_a1(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A &&
(pdev->revision & 0x0F) == 0x1)
return true;
return false;
}
static inline bool is_cn10kb(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
return true;
return false;
}
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
@ -652,12 +786,24 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
return rvu->hw->cpt_chan_base + chan;
}
static inline bool is_rvu_supports_nix1(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_98XX)
return true;
return false;
}
/* Function Prototypes
* RVU
*/
static inline bool is_afvf(u16 pcifunc)
#define RVU_LBK_VF_DEVID 0xA0F8
static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc)
{
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) &&
(rvu->vf_devid == RVU_LBK_VF_DEVID));
}
static inline bool is_vf(u16 pcifunc)
@ -684,6 +830,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
@ -696,6 +843,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
u16 global_slot, u16 *slot_in_block);
@ -716,7 +864,15 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
bool is_sdp_vf(u16 pcifunc);
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
{
if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc)
return true;
return false;
}
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
@ -785,8 +941,17 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_cn10k_aq_enq_rsp *aq_rsp,
u16 pcifunc, u8 ctype, u32 qidx);
int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc);
int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type);
u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
struct nix_txsch *txsch, bool enable);
void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc);
int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
@ -817,7 +982,11 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule);
void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule,
struct npc_install_flow_rsp *rsp);
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
@ -835,6 +1004,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, bool enable);
u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index);
void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u64 cfg);
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
@ -844,10 +1017,13 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc);
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
void cgx_start_linkup(struct rvu *rvu);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
@ -865,6 +1041,7 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int slot);
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
int rvu_cpt_init(struct rvu *rvu);
#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
@ -878,6 +1055,7 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
void rvu_apr_block_cn10k_init(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
@ -892,7 +1070,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
@ -905,4 +1084,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
void rvu_mcs_exit(struct rvu *rvu);
/* Representor APIs */
int rvu_rep_pf_init(struct rvu *rvu);
int rvu_rep_install_mcam_rules(struct rvu *rvu);
void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable);
#endif /* RVU_H */

View File

@ -114,7 +114,7 @@ static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
/* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
pfvf->nix_blkaddr = BLKADDR_NIX0;
if (p2x == CMR_P2X_SEL_NIX1)
if (is_rvu_supports_nix1(rvu) && p2x == CMR_P2X_SEL_NIX1)
pfvf->nix_blkaddr = BLKADDR_NIX1;
}
@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
if (iter >= MAX_LMAC_COUNT)
continue;
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@ -232,10 +234,15 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
int err, pfid;
int pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
if (!pfmap) {
dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
event->cgx_id, event->lmac_id);
return;
}
do {
pfid = find_first_bit(&pfmap,
@ -250,16 +257,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
continue;
}
mutex_lock(&rvu->mbox_lock);
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
if (!msg)
if (!msg) {
mutex_unlock(&rvu->mbox_lock);
continue;
}
msg->link_info = *linfo;
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
@ -336,6 +349,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu)
int rvu_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
int cgx, err;
void *cgxd;
@ -345,7 +359,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
@ -362,6 +376,15 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
/* Clear X2P reset on all MAC blocks */
for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
mac_ops = get_mac_ops(cgxd);
mac_ops->mac_x2p_reset(cgxd, false);
}
/* Register for CGX events */
err = cgx_lmac_event_handler_init(rvu);
if (err)
@ -369,10 +392,26 @@ int rvu_cgx_init(struct rvu *rvu)
mutex_init(&rvu->cgx_cfg_lock);
/* Ensure event handler registration is completed, before
* we turn on the links
*/
mb();
return 0;
}
void cgx_start_linkup(struct rvu *rvu)
{
unsigned long lmac_bmap;
struct mac_ops *mac_ops;
int cgx, lmac, err;
void *cgxd;
/* Enable receive on all LMACS */
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
mac_ops = get_mac_ops(cgxd);
lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
mac_ops->mac_enadis_rx(cgxd, lmac, true);
}
/* Do link up for all CGX ports */
for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
@ -385,8 +424,6 @@ int rvu_cgx_init(struct rvu *rvu)
"Link up process failed to start on cgx %d\n",
cgx);
}
return 0;
}
int rvu_cgx_exit(struct rvu *rvu)
@ -460,6 +497,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
}
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
{
int pf = rvu_get_pf(pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
mac_ops = get_mac_ops(cgxd);
return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
{
struct mac_ops *mac_ops;
@ -574,6 +628,35 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
return rvu_lmac_get_stats(rvu, req, (void *)rsp);
}
int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
struct rvu_pfvf *parent_pf;
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
parent_pf = &rvu->pf[pf];
/* To ensure reset cgx stats won't affect VF stats,
* check if it used by only PF interface.
* If not, return
*/
if (parent_pf->cgx_users > 1) {
dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
return 0;
}
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
mac_ops = get_mac_ops(cgxd);
return mac_ops->mac_stats_reset(cgxd, lmac);
}
int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
@ -686,7 +769,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0, i;
int rc = 0;
u64 cfg;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@ -697,8 +780,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
rsp->hdr.rc = rc;
cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
/* copy 48 bit mac address to req->mac_addr */
for (i = 0; i < ETH_ALEN; i++)
rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
u64_to_ether_addr(cfg, rsp->mac_addr);
return 0;
}
@ -752,18 +834,17 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
/* This msg is expected only from PFs that are mapped to CGX LMACs,
/* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
if (!is_pf_cgxmapped(rvu, pf))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
mac_ops = get_mac_ops(cgxd);
mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, enable);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
@ -866,13 +947,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
u32 rvu_cgx_get_fifolen(struct rvu *rvu)
{
struct mac_ops *mac_ops;
u32 fifo_len;
void *cgxd = rvu_first_cgx_pdata(rvu);
mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
fifo_len = mac_ops ? mac_ops->fifo_len : 0;
if (!cgxd)
return 0;
return fifo_len;
return cgx_get_fifo_len(cgxd);
}
u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
@ -1250,3 +1330,21 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
return err;
}
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
{
int pf = rvu_get_pf(pcifunc);
struct mac_ops *mac_ops;
struct cgx *cgxd;
u8 cgx, lmac;
if (!is_pf_cgxmapped(rvu, pf))
return;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
cgxd = rvu_cgx_pdata(cgx, rvu);
mac_ops = get_mac_ops(cgxd);
if (mac_ops->mac_reset(cgxd, lmac, !is_vf(pcifunc)))
dev_err(rvu->dev, "Failed to reset MAC\n");
}

View File

@ -559,3 +559,12 @@ void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
cfg |= BIT_ULL(1) | BIT_ULL(2);
rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
}
void rvu_apr_block_cn10k_init(struct rvu *rvu)
{
u64 reg;
reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX);
rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);
}

View File

@ -17,7 +17,13 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
#define CPT_CTX_ILEN 2ULL
#define CPT_CTX_ILEN 1ULL
/* Interrupt vector count of CPT RVU and RAS interrupts */
#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2
/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */
#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@ -37,34 +43,103 @@
(_rsp)->free_sts_##etype = free_sts; \
})
static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
#define MAX_AE GENMASK_ULL(47, 32)
#define MAX_IE GENMASK_ULL(31, 16)
#define MAX_SE GENMASK_ULL(15, 0)
static u16 cpt_max_engines_get(struct rvu *rvu)
{
u16 max_ses, max_ies, max_aes;
u64 reg;
reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1);
max_ses = FIELD_GET(MAX_SE, reg);
max_ies = FIELD_GET(MAX_IE, reg);
max_aes = FIELD_GET(MAX_AE, reg);
return max_ses + max_ies + max_aes;
}
/* Number of flt interrupt vectors are depends on number of engines that the
* chip has. Each flt vector represents 64 engines.
*/
static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs)
{
int flt_vecs;
flt_vecs = DIV_ROUND_UP(max_engs, 64);
if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) {
dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n",
flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX);
flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX;
}
return flt_vecs;
}
static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
u64 reg0, reg1, reg2;
u64 reg, val;
int i, eng;
u8 grp;
reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
if (!is_rvu_otx2(rvu)) {
reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
dev_err_ratelimited(rvu->dev,
"Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
reg0, reg1, reg2);
} else {
dev_err_ratelimited(rvu->dev,
"Received CPTAF FLT irq : 0x%llx, 0x%llx",
reg0, reg1);
reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
i = -1;
while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
switch (vec) {
case 0:
eng = i;
break;
case 1:
eng = i + 64;
break;
case 2:
eng = i + 128;
break;
}
grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
/* Disable and enable the engine which triggers fault */
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
spin_lock(&rvu->cpt_intr_lock);
block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
val = val & 0x3;
if (val == 0x1 || val == 0x2)
block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
spin_unlock(&rvu->cpt_intr_lock);
}
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
if (!is_rvu_otx2(rvu))
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
return IRQ_HANDLED;
}
static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
}
static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
}
static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
{
return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
}
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
@ -116,15 +191,26 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
int i;
int i, flt_vecs;
u16 max_engs;
u8 nr;
max_engs = cpt_max_engines_get(rvu);
flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
nr = (max_engs > 64) ? 64 : max_engs;
max_engs -= nr;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i),
INTR_MASK(nr));
}
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
/* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */
for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++)
if (rvu->irq_allocated[off + i]) {
free_irq(pci_irq_vector(rvu->pdev, off + i), block);
rvu->irq_allocated[off + i] = false;
@ -151,7 +237,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@ -170,28 +256,53 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu)
static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
int rvu_intr_vec, ras_intr_vec;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
int i, ret;
irq_handler_t flt_fn;
int i, ret, flt_vecs;
u16 max_engs;
u8 nr;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
max_engs = cpt_max_engines_get(rvu);
flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
case CPT_10K_AF_INT_VEC_FLT0:
flt_fn = rvu_cpt_af_flt0_intr_handler;
break;
case CPT_10K_AF_INT_VEC_FLT1:
flt_fn = rvu_cpt_af_flt1_intr_handler;
break;
case CPT_10K_AF_INT_VEC_FLT2:
flt_fn = rvu_cpt_af_flt2_intr_handler;
break;
}
ret = rvu_cpt_do_register_interrupt(block, off + i,
rvu_cpt_af_flt_intr_handler,
&rvu->irq_name[(off + i) * NAME_SIZE]);
flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
nr = (max_engs > 64) ? 64 : max_engs;
max_engs -= nr;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i),
INTR_MASK(nr));
}
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
rvu_intr_vec = flt_vecs;
ras_intr_vec = rvu_intr_vec + 1;
ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec,
rvu_cpt_af_rvu_intr_handler,
"CPTAF RVU");
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec,
rvu_cpt_af_ras_intr_handler,
"CPTAF RAS");
if (ret)
@ -208,8 +319,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
irq_handler_t flt_fn;
int i, offs, ret = 0;
char irq_name[16];
if (!is_block_implemented(rvu->hw, blkaddr))
return 0;
@ -226,13 +337,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
return cpt_10k_register_interrupts(block, offs);
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
switch (i) {
case CPT_AF_INT_VEC_FLT0:
flt_fn = rvu_cpt_af_flt0_intr_handler;
break;
case CPT_AF_INT_VEC_FLT1:
flt_fn = rvu_cpt_af_flt1_intr_handler;
break;
}
ret = rvu_cpt_do_register_interrupt(block, offs + i,
rvu_cpt_af_flt_intr_handler,
irq_name);
flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
if (ret)
goto err;
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
@ -290,7 +408,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = get_cpt_pf_num(rvu);
int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@ -302,7 +420,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = get_cpt_pf_num(rvu);
int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@ -371,8 +489,12 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
if (!is_rvu_otx2(rvu))
val |= (CPT_CTX_ILEN << 17);
if (!is_rvu_otx2(rvu)) {
if (req->ctx_ilen_valid)
val |= (req->ctx_ilen << 17);
else
val |= (CPT_CTX_ILEN << 17);
}
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
@ -570,7 +692,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
return ret;
}
static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
static bool validate_and_update_reg_offset(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
u64 *reg_offset)
{
u64 offset = req->reg_offset;
int blkaddr, num_lfs, lf;
@ -601,6 +725,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
if (lf < 0)
return false;
/* Translate local LF's offset to global CPT LF's offset to
* access LFX register.
*/
*reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
return true;
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
@ -611,6 +740,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
case CPT_AF_BLK_RST:
case CPT_AF_CONSTANTS1:
case CPT_AF_CTX_FLUSH_TIMER:
case CPT_AF_RXC_CFG1:
return true;
}
@ -634,6 +764,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
struct cpt_rd_wr_reg_msg *rsp)
{
u64 offset = req->reg_offset;
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
@ -645,23 +776,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
if (!validate_and_update_reg_offset(rvu, req, &offset))
return CPT_AF_ERR_ACCESS_DENIED;
rsp->reg_offset = req->reg_offset;
rsp->ret_val = req->ret_val;
rsp->is_write = req->is_write;
if (!is_valid_offset(rvu, req))
return CPT_AF_ERR_ACCESS_DENIED;
if (req->is_write)
rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
rvu_write64(rvu, blkaddr, offset, req->val);
else
rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
rsp->val = rvu_read64(rvu, blkaddr, offset);
return 0;
}
static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
if (is_rvu_otx2(rvu))
return;
@ -685,14 +818,16 @@ static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
if (!hw->cap.cpt_rxc)
return;
rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
}
static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
@ -762,10 +897,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
int blkaddr)
int blkaddr, struct cpt_rxc_time_cfg_req *save)
{
u64 dfrg_reg;
if (save) {
/* Save older config */
dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
}
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
@ -790,7 +936,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
cpt_rxc_time_cfg(rvu, req, blkaddr);
cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0;
}
@ -834,13 +980,43 @@ int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
return 0;
}
int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
struct cpt_flt_eng_info_rsp *rsp)
{
struct rvu_block *block;
unsigned long flags;
int blkaddr, vec;
int flt_vecs;
u16 max_engs;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
return blkaddr;
block = &rvu->hw->block[blkaddr];
max_engs = cpt_max_engines_get(rvu);
flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs);
for (vec = 0; vec < flt_vecs; vec++) {
spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
if (req->reset) {
block->cpt_flt_eng_map[vec] = 0x0;
block->cpt_rcvrd_eng_map[vec] = 0x0;
}
spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
}
return 0;
}
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req;
struct cpt_rxc_time_cfg_req req, prev;
struct rvu_hwinfo *hw = rvu->hw;
int timeout = 2000;
u64 reg;
if (is_rvu_otx2(rvu))
if (!hw->cap.cpt_rxc)
return;
/* Set time limit to minimum values, so that rxc entries will be
@ -852,7 +1028,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1;
req.active_limit = 1;
cpt_rxc_time_cfg(rvu, &req, blkaddr);
cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
@ -878,70 +1054,68 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
/* Restore config */
cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
#define INFLIGHT GENMASK_ULL(8, 0)
#define GRB_CNT GENMASK_ULL(39, 32)
#define GWB_CNT GENMASK_ULL(47, 40)
#define XQ_XOR GENMASK_ULL(63, 63)
#define DQPTR GENMASK_ULL(19, 0)
#define NQPTR GENMASK_ULL(51, 32)
static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
{
int i = 0, hard_lp_ctr = 100000;
u64 inprog, grp_ptr;
u16 nq_ptr, dq_ptr;
int timeout = 1000000;
u64 inprog, inst_ptr;
u64 qsize, pending;
int i = 0;
/* Disable instructions enqueuing */
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
/* Disable executions in the LF's queue */
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
inprog &= ~BIT_ULL(16);
inprog |= BIT_ULL(16);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
qsize = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
do {
inst_ptr = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
FIELD_GET(NQPTR, inst_ptr) -
FIELD_GET(DQPTR, inst_ptr);
udelay(1);
timeout--;
} while ((pending != 0) && (timeout != 0));
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
timeout = 1000000;
/* Wait for CPT queue to become execution-quiescent */
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
if (INPROG_GRB_PARTIAL(inprog)) {
i = 0;
hard_lp_ctr--;
} else {
i++;
}
grp_ptr = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot,
CPT_LF_Q_GRP_PTR));
nq_ptr = (grp_ptr >> 32) & 0x7FFF;
dq_ptr = grp_ptr & 0x7FFF;
} while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
if (hard_lp_ctr == 0)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
i = 0;
hard_lp_ctr = 100000;
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
if ((INPROG_INFLIGHT(inprog) == 0) &&
(INPROG_GWB(inprog) < 40) &&
((INPROG_GRB(inprog) == 0) ||
(INPROG_GRB((inprog)) == 40))) {
if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
(FIELD_GET(GRB_CNT, inprog) == 0)) {
i++;
} else {
i = 0;
hard_lp_ctr--;
timeout--;
}
} while (hard_lp_ctr && (i < 10));
} while ((timeout != 0) && (i < 10));
if (hard_lp_ctr == 0)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
if (timeout == 0)
dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
/* Wait for 2 us to flush all queue writes to memory */
udelay(2);
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
@ -951,18 +1125,15 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
mutex_lock(&rvu->alias_lock);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
cpt_lf_disable_iqueue(rvu, blkaddr, slot);
/* Set group drop to help clear out hardware */
reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
reg |= BIT_ULL(17);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
mutex_unlock(&rvu->alias_lock);
return 0;
}
@ -973,7 +1144,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
int cpt_pf_num = get_cpt_pf_num(rvu);
int cpt_pf_num = rvu->cpt_pf_num;
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
@ -1097,7 +1268,7 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
@ -1110,10 +1281,39 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
reg);
}
}
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32)
int rvu_cpt_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
u64 reg_val;
/* Retrieve CPT PF number */
rvu->cpt_pf_num = get_cpt_pf_num(rvu);
if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) &&
!is_cn10kb(rvu))
hw->cap.cpt_rxc = true;
if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) {
/* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect
* inline inbound peak performance
*/
reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1);
reg_val &= ~MAX_RXC_ICB_CNT;
reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT,
CPT_DFLT_MAX_RXC_ICB_CNT);
rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val);
}
spin_lock_init(&rvu->cpt_intr_lock);
return 0;
}

View File

@ -45,33 +45,6 @@ enum {
CGX_STAT18,
};
/* NIX TX stats */
enum nix_stat_lf_tx {
TX_UCAST = 0x0,
TX_BCAST = 0x1,
TX_MCAST = 0x2,
TX_DROP = 0x3,
TX_OCTS = 0x4,
TX_STATS_ENUM_LAST,
};
/* NIX RX stats */
enum nix_stat_lf_rx {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
RX_BCAST = 0x2,
RX_MCAST = 0x3,
RX_DROP = 0x4,
RX_DROP_OCTS = 0x5,
RX_FCS = 0x6,
RX_ERR = 0x7,
RX_DRP_BCAST = 0x8,
RX_DRP_MCAST = 0x9,
RX_DRP_L3BCAST = 0xa,
RX_DRP_L3MCAST = 0xb,
RX_STATS_ENUM_LAST,
};
static char *cgx_rx_stats_fields[] = {
[CGX_STAT0] = "Received packets",
[CGX_STAT1] = "Octets of received packets",
@ -663,16 +636,16 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
static void get_lf_str_list(struct rvu_block block, int pcifunc,
static void get_lf_str_list(const struct rvu_block *block, int pcifunc,
char *lfs)
{
int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max;
for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
if (lf >= block.lf.max)
for_each_set_bit(lf, block->lf.bmap, block->lf.max) {
if (lf >= block->lf.max)
break;
if (block.fn_map[lf] != pcifunc)
if (block->fn_map[lf] != pcifunc)
continue;
if (lf == prev_lf + 1) {
@ -719,7 +692,7 @@ static int get_max_column_width(struct rvu *rvu)
if (!strlen(block.name))
continue;
get_lf_str_list(block, pcifunc, buf);
get_lf_str_list(&block, pcifunc, buf);
if (lf_str_size <= strlen(buf))
lf_str_size = strlen(buf) + 1;
}
@ -803,7 +776,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
continue;
len = 0;
lfs[len] = '\0';
get_lf_str_list(block, pcifunc, lfs);
get_lf_str_list(&block, pcifunc, lfs);
if (strlen(lfs))
flag = 1;
@ -838,10 +811,10 @@ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
{
char cgx[10], lmac[10], chan[10];
struct rvu *rvu = filp->private;
struct pci_dev *pdev = NULL;
struct mac_ops *mac_ops;
char cgx[10], lmac[10];
struct rvu_pfvf *pfvf;
int pf, domain, blkid;
u8 cgx_id, lmac_id;
@ -852,7 +825,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
/* There can be no CGX devices at all */
if (!mac_ops)
return 0;
seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n",
mac_ops->name);
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
@ -876,8 +849,11 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
&lmac_id);
sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
sprintf(lmac, "LMAC%d", lmac_id);
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
sprintf(chan, "%d",
rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0));
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac,
chan);
pci_dev_put(pdev);
}
@ -1220,6 +1196,11 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
for (aura = id; aura < max_id; aura++) {
aq_req.aura_id = aura;
/* Skip if queue is uninitialized */
if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
continue;
seq_printf(m, "======%s : %d=======\n",
(ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
aq_req.aura_id);
@ -1598,6 +1579,367 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
(u64)sq_ctx->dropped_pkts);
}
static void print_tm_tree(struct seq_file *m,
struct nix_aq_enq_rsp *rsp, u64 sq)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
u16 p1, p2, p3, p4, schq;
int blkaddr;
u64 cfg;
blkaddr = nix_hw->blkaddr;
schq = sq_ctx->smq;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
seq_printf(m,
"SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
sq, schq, p1, p2, p3, p4);
}
/*dumps given tm_tree registers*/
static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
{
int qidx, nixlf, rc, id, max_id = 0;
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
struct rvu_pfvf *pfvf;
u16 pcifunc;
nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
id = rvu->rvu_dbg.nix_tm_ctx.id;
if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
max_id = pfvf->sq_ctx->qsize;
memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
aq_req.hdr.pcifunc = pcifunc;
aq_req.ctype = NIX_AQ_CTYPE_SQ;
aq_req.op = NIX_AQ_INSTOP_READ;
seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
for (qidx = id; qidx < max_id; qidx++) {
aq_req.qidx = qidx;
/* Skip SQ's if not initialized */
if (!test_bit(qidx, pfvf->sq_bmap))
continue;
rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
if (rc) {
seq_printf(m, "Failed to read SQ(%d) context\n",
aq_req.qidx);
continue;
}
print_tm_tree(m, &rsp, aq_req.qidx);
}
return 0;
}
static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct seq_file *m = filp->private_data;
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
struct rvu_pfvf *pfvf;
u16 pcifunc;
u64 nixlf;
int ret;
ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
if (ret)
return ret;
if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->sq_ctx) {
dev_warn(rvu->dev, "SQ context is not initialized\n");
return -EINVAL;
}
rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
return count;
}
RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
{
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
int blkaddr, link, link_level;
struct rvu_hwinfo *hw;
hw = rvu->hw;
blkaddr = nix_hw->blkaddr;
if (lvl == NIX_TXSCH_LVL_MDQ) {
seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_MDQX_OUT_MD_COUNT(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_MDQX_SCHEDULE(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
seq_puts(m, "\n");
}
if (lvl == NIX_TXSCH_LVL_TL4) {
seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL4X_SDP_LINK_CFG(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL4X_SCHEDULE(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL4X_TOPOLOGY(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL4X_MD_DEBUG0(schq)));
seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL4X_MD_DEBUG1(schq)));
seq_puts(m, "\n");
}
if (lvl == NIX_TXSCH_LVL_TL3) {
seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL3X_SCHEDULE(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL3X_TOPOLOGY(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL3X_MD_DEBUG0(schq)));
seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL3X_MD_DEBUG1(schq)));
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
if (lvl == link_level) {
seq_printf(m,
"NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
schq, rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_BP_STATUS(schq)));
for (link = 0; link < hw->cgx_links; link++)
seq_printf(m,
"NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
schq, link,
rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
}
seq_puts(m, "\n");
}
if (lvl == NIX_TXSCH_LVL_TL2) {
seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL2X_TOPOLOGY(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL2X_MD_DEBUG0(schq)));
seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL2X_MD_DEBUG1(schq)));
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
& 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
if (lvl == link_level) {
seq_printf(m,
"NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
schq, rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_BP_STATUS(schq)));
for (link = 0; link < hw->cgx_links; link++)
seq_printf(m,
"NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
schq, link, rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
}
seq_puts(m, "\n");
}
if (lvl == NIX_TXSCH_LVL_TL1) {
seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TX_LINKX_HW_XOFF(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_SCHEDULE(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_TOPOLOGY(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_MD_DEBUG0(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_MD_DEBUG1(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_DROPPED_PACKETS(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_DROPPED_BYTES(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_RED_PACKETS(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_RED_BYTES(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_YELLOW_PACKETS(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_YELLOW_BYTES(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_GREEN_PACKETS(schq)));
seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
rvu_read64(rvu, blkaddr,
NIX_AF_TL1X_GREEN_BYTES(schq)));
seq_puts(m, "\n");
}
}
/*dumps given tm_topo registers*/
static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
{
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_txsch *txsch;
int nixlf, lvl, schq;
u16 pcifunc;
nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
aq_req.hdr.pcifunc = pcifunc;
aq_req.ctype = NIX_AQ_CTYPE_SQ;
aq_req.op = NIX_AQ_INSTOP_READ;
seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
print_tm_topo(m, schq, lvl);
}
}
return 0;
}
static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct seq_file *m = filp->private_data;
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
struct rvu_pfvf *pfvf;
u16 pcifunc;
u64 nixlf;
int ret;
ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
if (ret)
return ret;
if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->sq_ctx) {
dev_warn(rvu->dev, "SQ context is not initialized\n");
return -EINVAL;
}
rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
return count;
}
RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
@ -1818,6 +2160,8 @@ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
struct nix_hw *nix_hw = m->private;
struct rvu *rvu = nix_hw->rvu;
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
@ -1829,6 +2173,16 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
cq_ctx->bpid, cq_ctx->bp_ena);
if (!is_rvu_otx2(rvu)) {
seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
cq_ctx->lbpid_low);
seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
}
seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
cq_ctx->update_time, cq_ctx->avg_level);
seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
@ -1840,6 +2194,11 @@ static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
cq_ctx->qsize, cq_ctx->caching);
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
if (!is_rvu_otx2(rvu)) {
seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
cq_ctx->cpt_drop_err_en);
}
seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
cq_ctx->drop_ena, cq_ctx->drop);
seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
@ -2327,6 +2686,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
nix_hw = &rvu->hw->nix[1];
}
debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_tm_tree_fops);
debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_tm_topo_fops);
debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_sq_ctx_fops);
debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
@ -2749,6 +3112,27 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
do { \
seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
seq_printf(s, "mask 0x%lx\n", \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
} while (0) \
#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
do { \
typeof(_pkt) (pkt) = (_pkt); \
typeof(_mask) (mask) = (_mask); \
seq_printf(s, "%ld %ld %ld\n", \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
} while (0) \
static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
@ -2780,6 +3164,11 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_tci));
break;
case NPC_INNER_VID:
seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
seq_printf(s, "mask 0x%x\n",
ntohs(rule->mask.vlan_itci));
break;
case NPC_TOS:
seq_printf(s, "%d ", rule->packet.tos);
seq_printf(s, "mask 0x%x\n", rule->mask.tos);
@ -2820,6 +3209,54 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
case NPC_TCP_FLAGS:
seq_printf(s, "%d ", rule->packet.tcp_flags);
seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
break;
case NPC_IPSEC_SPI:
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
break;
case NPC_MPLS1_LBTCBOS:
RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
rule->mask.mpls_lse[0]);
break;
case NPC_MPLS1_TTL:
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
rule->mask.mpls_lse[0]);
break;
case NPC_MPLS2_LBTCBOS:
RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
rule->mask.mpls_lse[1]);
break;
case NPC_MPLS2_TTL:
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
rule->mask.mpls_lse[1]);
break;
case NPC_MPLS3_LBTCBOS:
RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
rule->mask.mpls_lse[2]);
break;
case NPC_MPLS3_TTL:
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
rule->mask.mpls_lse[2]);
break;
case NPC_MPLS4_LBTCBOS:
RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
rule->mask.mpls_lse[3]);
break;
case NPC_MPLS4_TTL:
RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
rule->mask.mpls_lse[3]);
break;
case NPC_TYPE_ICMP:
seq_printf(s, "%d ", rule->packet.icmp_type);
seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
break;
case NPC_CODE_ICMP:
seq_printf(s, "%d ", rule->packet.icmp_code);
seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
break;
default:
seq_puts(s, "\n");
break;

View File

@ -5,7 +5,7 @@
*
*/
#include<linux/bitfield.h>
#include <linux/bitfield.h>
#include "rvu.h"
#include "rvu_reg.h"
@ -538,7 +538,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq)
goto err;
return -ENOMEM;
INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
@ -546,9 +546,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
return 0;
err:
rvu_nix_health_reporters_destroy(rvu_dl);
return -ENOMEM;
}
static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
@ -1087,7 +1084,7 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
if (!rvu_dl->devlink_wq)
goto err;
return -ENOMEM;
INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
@ -1095,9 +1092,6 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
return 0;
err:
rvu_npa_health_reporters_destroy(rvu_dl);
return -ENOMEM;
}
static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
@ -1216,7 +1210,8 @@ static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
u64 dwrr_mtu;
dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu);
rvu_write64(rvu, BLKADDR_NIX0,
nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
return 0;
}
@ -1231,7 +1226,8 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
if (!rvu->hw->cap.nix_common_dwrr_mtu)
return -EOPNOTSUPP;
dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
return 0;
@ -1322,6 +1318,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
struct rvu *rvu = rvu_dl->rvu;
struct rvu_switch *rswitch;
if (rvu->rep_mode)
return -EOPNOTSUPP;
rswitch = &rvu->rswitch;
*mode = rswitch->mode;

File diff suppressed because it is too large Load Diff

View File

@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
static int npc_mcam_verify_pf_func(struct rvu *rvu,
struct mcam_entry *entry_data, u8 intf,
u16 pcifunc)
{
u16 pf_func, pf_func_mask;
if (is_npc_intf_rx(intf))
return 0;
pf_func_mask = (entry_data->kw_mask[0] >> 32) &
NPC_KEX_PF_FUNC_MASK;
pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
pf_func = be16_to_cpu((__force __be16)pf_func);
if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
((pf_func & ~RVU_PFVF_FUNC_MASK) !=
(pcifunc & ~RVU_PFVF_FUNC_MASK)))
return -EINVAL;
return 0;
}
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@ -389,7 +367,13 @@ static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
int bank, nixlf, index;
/* get ucast entry rule entry index */
nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
if (nix_get_nixlf(rvu, pf_func, &nixlf, NULL)) {
dev_err(rvu->dev, "%s: nixlf not attached to pcifunc:0x%x\n",
__func__, pf_func);
/* Action 0 is drop */
return 0;
}
index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
NIXLF_UCAST_ENTRY);
bank = npc_get_bank(mcam, index);
@ -411,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
if (is_pffunc_af(owner) || is_afvf(target_func) ||
if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) ||
(owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@ -431,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
/* AF modifies given action iff PF/VF has requested for it */
if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
return;
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@ -589,8 +577,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
}
static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index)
u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
@ -599,6 +587,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
}
void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u64 cfg)
{
int bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
return rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg);
}
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
@ -610,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@ -665,6 +663,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, ucast_idx, index;
struct nix_rx_action action = { 0 };
u64 relaxed_mask;
u8 flow_key_alg;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
@ -695,6 +694,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST;
}
flow_key_alg = action.flow_key_alg;
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
@ -734,7 +735,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
req.vf = pcifunc;
req.index = action.index;
req.match_id = action.match_id;
req.flow_key_alg = action.flow_key_alg;
req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@ -772,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
return;
/* Skip LBK VFs */
if (is_afvf(pcifunc))
if (is_lbk_vf(rvu, pcifunc))
return;
/* If pkt replication is not supported,
@ -848,10 +849,11 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
u8 flow_key_alg;
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@ -882,6 +884,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, ucast_idx);
flow_key_alg = action.flow_key_alg;
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
@ -918,7 +921,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
req.vf = pcifunc | vf_func;
req.index = action.index;
req.match_id = action.match_id;
req.flow_key_alg = action.flow_key_alg;
req.flow_key_alg = flow_key_alg;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
@ -984,11 +987,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_unlock(&mcam->lock);
}
static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action,
struct rvu_pfvf *pfvf, int mcam_index, int blkaddr,
int alg_idx)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int bank, op_rss;
if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index))
return;
op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list);
bank = npc_get_bank(mcam, mcam_index);
mcam_index &= (mcam->banksize - 1);
/* If Rx action is MCAST update only RSS algorithm index */
if (!op_rss) {
*(u64 *)&action = rvu_read64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank));
action.flow_key_alg = alg_idx;
}
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action);
}
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@ -1044,15 +1074,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
alg_idx);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
/* If PF's allmulti entry is enabled,
* Set RSS action for that entry as well
*/
npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr,
alg_idx);
}
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
@ -1626,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
struct npc_coalesced_kpu_prfl *img_data = NULL;
int i = 0, rc = -EINVAL;
void __iomem *kpu_prfl_addr;
u16 offset;
u32 offset;
img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
@ -2462,7 +2493,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
*
* For a VF base MCAM match rule is set by its PF. And all the
* further MCAM rules installed by VF on its own are
* concatenated with the base rule set by its PF. Hence PF entries
* should be at lower priority compared to VF entries. Otherwise
* base rule is hit always and rules installed by VF will be of
* no use. Hence if the request is from PF then allocate low
* priority entries.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@ -2471,17 +2512,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
/* For a VF base MCAM match rule is set by its PF. And all the
* further MCAM rules installed by VF on its own are
* concatenated with the base rule set by its PF. Hence PF entries
* should be at lower priority compared to VF entries. Otherwise
* base rule is hit always and rules installed by VF will be of
* no use. Hence if the request is from PF and NOT a priority
* allocation request then allocate low priority entries.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
goto lprio_alloc;
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@ -2511,6 +2541,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
/* Ensure PF requests are always at bottom and if PF requests
* for higher/lower priority entry wrt reference entry then
* honour that criteria and start search for entries from bottom
* and not in mid zone.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
req->priority == NPC_MCAM_HIGHER_PRIO)
end = req->ref_entry;
if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
req->priority == NPC_MCAM_LOWER_PRIO)
start = req->ref_entry;
}
alloc:
@ -2638,18 +2680,17 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->entry = NPC_MCAM_ENTRY_INVALID;
rsp->free_count = 0;
/* Check if ref_entry is within range */
if (req->priority && req->ref_entry >= mcam->bmap_entries) {
dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
__func__, req->ref_entry);
return NPC_MCAM_INVALID_REQ;
}
/* Check if ref_entry is greater that the range
* then set it to max value.
*/
if (req->ref_entry > mcam->bmap_entries)
req->ref_entry = mcam->bmap_entries;
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
*/
if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
((req->ref_entry == (mcam->bmap_entries - 1)) &&
((req->ref_entry == mcam->bmap_entries) &&
req->priority == NPC_MCAM_LOWER_PRIO))
return NPC_MCAM_INVALID_REQ;
@ -2776,12 +2817,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
if (!is_pffunc_af(pcifunc) &&
npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@ -2914,9 +2949,9 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
return rc;
}
int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
struct npc_mcam_alloc_counter_req *req,
struct npc_mcam_alloc_counter_rsp *rsp)
static int __npc_mcam_alloc_counter(struct rvu *rvu,
struct npc_mcam_alloc_counter_req *req,
struct npc_mcam_alloc_counter_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
@ -2937,11 +2972,9 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
/* Check if unused counters are available or not */
if (!rvu_rsrc_free_count(&mcam->counters)) {
mutex_unlock(&mcam->lock);
return NPC_MCAM_ALLOC_FAILED;
}
@ -2974,12 +3007,27 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
}
}
mutex_unlock(&mcam->lock);
return 0;
}
int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
struct npc_mcam_alloc_counter_req *req,
struct npc_mcam_alloc_counter_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int err;
mutex_lock(&mcam->lock);
err = __npc_mcam_alloc_counter(rvu, req, rsp);
mutex_unlock(&mcam->lock);
return err;
}
static int __npc_mcam_free_counter(struct rvu *rvu,
struct npc_mcam_oper_counter_req *req,
struct msg_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 index, entry = 0;
@ -2989,10 +3037,8 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
mutex_lock(&mcam->lock);
err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
if (err) {
mutex_unlock(&mcam->lock);
return err;
}
@ -3016,10 +3062,66 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
index, req->cntr);
}
mutex_unlock(&mcam->lock);
return 0;
}
int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int err;
mutex_lock(&mcam->lock);
err = __npc_mcam_free_counter(rvu, req, rsp);
mutex_unlock(&mcam->lock);
return err;
}
void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule)
{
struct npc_mcam_oper_counter_req free_req = { 0 };
struct msg_rsp free_rsp;
if (!rule->has_cntr)
return;
free_req.hdr.pcifunc = pcifunc;
free_req.cntr = rule->cntr;
__npc_mcam_free_counter(rvu, &free_req, &free_rsp);
rule->has_cntr = false;
}
void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule,
struct npc_install_flow_rsp *rsp)
{
struct npc_mcam_alloc_counter_req cntr_req = { 0 };
struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
int err;
cntr_req.hdr.pcifunc = pcifunc;
cntr_req.contig = true;
cntr_req.count = 1;
/* we try to allocate a counter to track the stats of this
* rule. If counter could not be allocated then proceed
* without counter because counters are limited than entries.
*/
err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
if (!err && cntr_rsp.count) {
rule->cntr = cntr_rsp.cntr;
rule->has_cntr = true;
rsp->counter = rule->cntr;
} else {
rsp->counter = err;
}
}
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
{
@ -3133,10 +3235,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
req->hdr.pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;

View File

@ -20,6 +20,7 @@ static const char * const npc_flow_names[] = {
[NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag",
[NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
[NPC_OUTER_VID] = "outer vlan id",
[NPC_INNER_VID] = "inner vlan id",
[NPC_TOS] = "tos",
[NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
[NPC_SIP_IPV4] = "ipv4 source ip",
@ -41,6 +42,18 @@ static const char * const npc_flow_names[] = {
[NPC_SPORT_SCTP] = "sctp source port",
[NPC_DPORT_SCTP] = "sctp destination port",
[NPC_LXMB] = "Mcast/Bcast header ",
[NPC_IPSEC_SPI] = "SPI ",
[NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos",
[NPC_MPLS1_TTL] = "lse depth 1 ttl",
[NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos",
[NPC_MPLS2_TTL] = "lse depth 2 ttl",
[NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos",
[NPC_MPLS3_TTL] = "lse depth 3 ttl",
[NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
[NPC_MPLS4_TTL] = "lse depth 4",
[NPC_TYPE_ICMP] = "icmp type",
[NPC_CODE_ICMP] = "icmp code",
[NPC_TCP_FLAGS] = "tcp flags",
[NPC_UNKNOWN] = "unknown",
};
@ -327,6 +340,8 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
*/
struct npc_key_field *vlan_tag1;
struct npc_key_field *vlan_tag2;
/* Inner VLAN TCI for double tagged frames */
struct npc_key_field *vlan_tag3;
u64 *features;
u8 start_lid;
int i;
@ -349,6 +364,7 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
vlan_tag3 = &key_fields[NPC_VLAN_TAG3];
/* if key profile programmed does not extract Ethertype at all */
if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
@ -430,6 +446,12 @@ vlan_tci:
goto done;
}
*features |= BIT_ULL(NPC_OUTER_VID);
/* If key profile extracts inner vlan tci */
if (vlan_tag3->nr_kws) {
key_fields[NPC_INNER_VID] = *vlan_tag3;
*features |= BIT_ULL(NPC_INNER_VID);
}
done:
return;
}
@ -507,12 +529,28 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG3, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 6, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3);
NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1);
NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3);
NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1);
NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3);
NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1);
NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3);
NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1);
/* SMAC follows the DMAC(which is 6 bytes) */
NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
@ -523,7 +561,7 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
u64 tcp_udp_sctp;
u64 proto_flags;
int hdr;
if (is_npc_intf_tx(intf))
@ -534,18 +572,22 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
*features |= BIT_ULL(hdr);
}
tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
proto_flags = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
BIT_ULL(NPC_TCP_FLAGS);
/* for tcp/udp/sctp corresponding layer type should be in the key */
if (*features & tcp_udp_sctp) {
if (*features & proto_flags) {
if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
*features &= ~tcp_udp_sctp;
*features &= ~proto_flags;
else
*features |= BIT_ULL(NPC_IPPROTO_TCP) |
BIT_ULL(NPC_IPPROTO_UDP) |
BIT_ULL(NPC_IPPROTO_SCTP);
BIT_ULL(NPC_IPPROTO_SCTP) |
BIT_ULL(NPC_IPPROTO_ICMP);
}
/* for AH/ICMP/ICMPv6/, check if corresponding layer type is present in the key */
@ -564,6 +606,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
/* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
/* for vlan ethertypes corresponding layer type should be in the key */
if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
@ -572,6 +619,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
/* for L2M/L2B/L3M/L3B, check if the type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
*features |= BIT_ULL(NPC_LXMB);
for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) {
if (npc_check_field(rvu, blkaddr, hdr, intf))
*features |= BIT_ULL(hdr);
}
}
/* Scan key extraction profile and record how fields of our interest
@ -929,9 +981,60 @@ do { \
ntohs(mask->sport), 0);
NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
ntohs(mask->dport), 0);
NPC_WRITE_FLOW(NPC_TYPE_ICMP, icmp_type, pkt->icmp_type, 0,
mask->icmp_type, 0);
NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
mask->icmp_code, 0);
NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
ntohs(mask->tcp_flags), 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
ntohs(mask->vlan_tci), 0);
NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
ntohs(mask->vlan_itci), 0);
NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
pkt->mpls_lse[0]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
mask->mpls_lse[0]), 0);
NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
pkt->mpls_lse[0]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
mask->mpls_lse[0]), 0);
NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
pkt->mpls_lse[1]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
mask->mpls_lse[1]), 0);
NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
pkt->mpls_lse[1]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
mask->mpls_lse[1]), 0);
NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
pkt->mpls_lse[2]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
mask->mpls_lse[2]), 0);
NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
pkt->mpls_lse[2]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
mask->mpls_lse[2]), 0);
NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
pkt->mpls_lse[3]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
mask->mpls_lse[3]), 0);
NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
pkt->mpls_lse[3]), 0,
FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
mask->mpls_lse[3]), 0);
NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
mask->next_header, 0);
@ -978,53 +1081,62 @@ static void rvu_mcam_add_rule(struct npc_mcam *mcam,
static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule)
{
struct npc_mcam_oper_counter_req free_req = { 0 };
struct msg_rsp free_rsp;
struct npc_mcam *mcam = &rvu->hw->mcam;
if (!rule->has_cntr)
return;
mutex_lock(&mcam->lock);
free_req.hdr.pcifunc = pcifunc;
free_req.cntr = rule->cntr;
__rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
rule->has_cntr = false;
mutex_unlock(&mcam->lock);
}
static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
struct rvu_npc_mcam_rule *rule,
struct npc_install_flow_rsp *rsp)
{
struct npc_mcam_alloc_counter_req cntr_req = { 0 };
struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
int err;
struct npc_mcam *mcam = &rvu->hw->mcam;
cntr_req.hdr.pcifunc = pcifunc;
cntr_req.contig = true;
cntr_req.count = 1;
mutex_lock(&mcam->lock);
/* we try to allocate a counter to track the stats of this
* rule. If counter could not be allocated then proceed
* without counter because counters are limited than entries.
*/
err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
&cntr_rsp);
if (!err && cntr_rsp.count) {
rule->cntr = cntr_rsp.cntr;
rule->has_cntr = true;
rsp->counter = rule->cntr;
} else {
rsp->counter = err;
}
__rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp);
mutex_unlock(&mcam->lock);
}
static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
struct npc_install_flow_req *req,
u16 target, bool pf_set_vfs_mac)
static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
u64 op, void *action)
{
int mce_index;
/* If a PF/VF is installing a multicast rule then it is expected
* that the PF/VF should have created a group for the multicast/mirror
* list. Otherwise reject the configuration.
* During this scenario, req->index is set as multicast/mirror
* group index.
*/
if (req->hdr.pcifunc &&
(op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index);
if (mce_index < 0)
return mce_index;
if (op == NIX_RX_ACTIONOP_MCAST)
((struct nix_rx_action *)action)->index = mce_index;
else
((struct nix_tx_action *)action)->index = mce_index;
}
return 0;
}
static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
struct npc_install_flow_req *req,
u16 target, bool pf_set_vfs_mac)
{
struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action;
int ret;
if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
req->chan_mask = 0x0; /* Do not care channel */
@ -1036,6 +1148,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = req->op;
action.index = req->index;
ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
if (ret)
return ret;
action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg;
@ -1052,6 +1169,8 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = NIX_RX_ACTIONOP_UCAST;
}
if (req->match_id)
action.match_id = req->match_id;
}
entry->action = *(u64 *)&action;
@ -1067,14 +1186,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
return 0;
}
static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
struct npc_install_flow_req *req, u16 target)
static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
struct npc_install_flow_req *req, u16 target)
{
struct nix_tx_action action;
u64 mask = ~0ULL;
int ret;
/* If AF is installing then do not care about
* PF_FUNC in Send Descriptor
@ -1088,6 +1210,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
*(u64 *)&action = 0x00;
action.op = req->op;
action.index = req->index;
ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
if (ret)
return ret;
action.match_id = req->match_id;
entry->action = *(u64 *)&action;
@ -1103,6 +1230,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
return 0;
}
static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
@ -1132,10 +1261,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
req->intf, blkaddr);
if (is_npc_intf_rx(req->intf))
npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
else
npc_update_tx_entry(rvu, pfvf, entry, req, target);
if (is_npc_intf_rx(req->intf)) {
err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
if (err)
return err;
} else {
err = npc_update_tx_entry(rvu, pfvf, entry, req, target);
if (err)
return err;
}
/* Default unicast rules do not exist for TX */
if (is_npc_intf_tx(req->intf))
@ -1192,7 +1326,7 @@ find_rule:
write_req.enable_entry = (u8)enable;
/* if counter is available then clear and use it */
if (req->set_cntr && rule->has_cntr) {
rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
write_req.set_cntr = 1;
write_req.cntr = rule->cntr;
}
@ -1252,6 +1386,10 @@ find_rule:
return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
req->index, req->match_id);
if (owner && req->op == NIX_RX_ACTIONOP_MCAST)
return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc,
req->index, entry_index);
return 0;
}
@ -1260,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
struct npc_install_flow_rsp *rsp)
{
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc);
struct rvu_switch *rswitch = &rvu->rswitch;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@ -1313,18 +1452,21 @@ process_flow:
* hence modify pcifunc accordingly.
*/
/* AF installing for a PF/VF */
if (!req->hdr.pcifunc)
if (!req->hdr.pcifunc) {
/* AF installing for a PF/VF */
target = req->vf;
/* PF installing for its VF */
else if (!from_vf && req->vf) {
} else if (!from_vf && req->vf && !from_rep_dev) {
/* PF installing for its VF */
target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
pf_set_vfs_mac = req->default_rule &&
(req->features & BIT_ULL(NPC_DMAC));
}
/* msg received from PF/VF */
else
} else if (from_rep_dev && req->vf) {
/* Representor device installing for a representee */
target = req->vf;
} else {
/* msg received from PF/VF */
target = req->hdr.pcifunc;
}
/* ignore chan_mask in case pf func is not AF, revisit later */
if (!is_pffunc_af(req->hdr.pcifunc))
@ -1336,8 +1478,10 @@ process_flow:
pfvf = rvu_get_pfvf(rvu, target);
if (from_rep_dev)
req->channel = pfvf->rx_chan_base;
/* PF installing for its VF */
if (req->hdr.pcifunc && !from_vf && req->vf)
if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev)
set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
@ -1407,12 +1551,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
struct npc_delete_flow_req *req,
struct msg_rsp *rsp)
struct npc_delete_flow_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_npc_mcam_rule *iter, *tmp;
u16 pcifunc = req->hdr.pcifunc;
struct list_head del_list;
int blkaddr;
INIT_LIST_HEAD(&del_list);
@ -1428,6 +1573,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
list_move_tail(&iter->list, &del_list);
/* single rule */
} else if (req->entry == iter->entry) {
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr)
rsp->cntr_val = rvu_read64(rvu, blkaddr,
NPC_AF_MATCH_STATX(iter->cntr));
list_move_tail(&iter->list, &del_list);
break;
}

View File

@ -218,13 +218,54 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
{
struct npc_mcam_kex_hash *mh = rvu->kpu.mkex_hash;
struct hw_cap *hwcap = &rvu->hw->cap;
u8 intf, ld, hdr_offset, byte_len;
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
u64 cfg;
/* Check if hardware supports hash extraction */
if (!hwcap->npc_hash_extract)
return;
/* Check if IPv6 source/destination address
* should be hash enabled.
* Hashing reduces 128bit SIP/DIP fields to 32bit
* so that 224 bit X2 key can be used for IPv6 based filters as well,
* which in turn results in more number of MCAM entries available for
* use.
*
* Hashing of IPV6 SIP/DIP is enabled in below scenarios
* 1. If the silicon variant supports hashing feature
* 2. If the number of bytes of IP addr being extracted is 4 bytes ie
* 32bit. The assumption here is that if user wants 8bytes of LSB of
* IP addr or full 16 bytes then his intention is not to use 32bit
* hash.
*/
for (intf = 0; intf < hw->npc_intfs; intf++) {
for (ld = 0; ld < NPC_MAX_LD; ld++) {
cfg = rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf,
NPC_LID_LC,
NPC_LT_LC_IP6,
ld));
hdr_offset = FIELD_GET(NPC_HDR_OFFSET, cfg);
byte_len = FIELD_GET(NPC_BYTESM, cfg);
/* Hashing of IPv6 source/destination address should be
* enabled if,
* hdr_offset == 8 (offset of source IPv6 address) or
* hdr_offset == 24 (offset of destination IPv6)
* address) and the number of byte to be
* extracted is 4. As per hardware configuration
* byte_len should be == actual byte_len - 1.
* Hence byte_len is checked against 3 but nor 4.
*/
if ((hdr_offset == 8 || hdr_offset == 24) && byte_len == 3)
mh->lid_lt_ld_hash_en[intf][NPC_LID_LC][NPC_LT_LC_IP6][ld] = true;
}
}
/* Update hash configuration if the field is hash enabled */
for (intf = 0; intf < hw->npc_intfs; intf++) {
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
npc_program_mkex_hash_tx(rvu, blkaddr, intf);
@ -349,22 +390,6 @@ int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
return 0;
}
/**
* rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
* @mac_addr: MAC address.
* Return: mdata for exact match table.
*/
static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
{
u64 mac = 0;
int index;
for (index = ETH_ALEN - 1; index >= 0; index--)
mac |= ((u64)*mac_addr++) << (8 * index);
return mac;
}
/**
* rvu_exact_prepare_mdata - Make mdata for mcam entry
* @mac: MAC address
@ -375,7 +400,7 @@ static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
*/
static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
{
u64 ldata = rvu_npc_exact_mac2u64(mac);
u64 ldata = ether_addr_to_u64(mac);
/* Please note that mask is 48bit which excludes chan and ctype.
* Increase mask bits if we need to include them as well.
@ -563,7 +588,7 @@ static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
u8 ctype, u16 chan, u8 *mac_addr)
{
u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
u64 ldata = ether_addr_to_u64(mac_addr);
/* Enable or disable */
u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
@ -1182,7 +1207,9 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
if (promisc)
goto done;
/* If all rules are deleted and not already in promisc mode; disable cam */
/* If all rules are deleted and not already in promisc mode;
* disable cam
*/
if (!*cnt && val < 0) {
*enable_or_disable_cam = true;
goto done;
@ -1443,7 +1470,6 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
u32 drop_mcam_idx;
bool *promisc;
bool rc;
u32 cnt;
table = rvu->hw->table;
@ -1466,17 +1492,14 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
return LMAC_AF_ERR_INVALID_PARAM;
}
*promisc = false;
cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
mutex_unlock(&table->lock);
/* If no dmac filter entries configured, disable drop rule */
if (!cnt)
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
else
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
/* Enable drop rule */
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
true);
dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
__func__, cgx_id, lmac_id, cnt);
dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n",
__func__, cgx_id, lmac_id);
return 0;
}
@ -1494,7 +1517,6 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
u32 drop_mcam_idx;
bool *promisc;
bool rc;
u32 cnt;
table = rvu->hw->table;
@ -1517,17 +1539,14 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
return LMAC_AF_ERR_INVALID_PARAM;
}
*promisc = true;
cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
mutex_unlock(&table->lock);
/* If no dmac filter entries configured, disable drop rule */
if (!cnt)
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
else
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
/* disable drop rule */
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
false);
dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
__func__, cgx_id, lmac_id, cnt);
dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
__func__, cgx_id, lmac_id);
return 0;
}

View File

@ -70,8 +70,8 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
[NIX_INTF_RX] = {
[NPC_LID_LC] = {
[NPC_LT_LC_IP6] = {
true,
true,
false,
false,
},
},
},
@ -79,8 +79,8 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
[NIX_INTF_TX] = {
[NPC_LID_LC] = {
[NPC_LT_LC_IP6] = {
true,
true,
false,
false,
},
},
},

View File

@ -31,8 +31,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
{0x1200, 0x12E0} } },
{NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
{0x1610, 0x1618}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
{0x1610, 0x1618}, {0x1700, 0x17C8} } },
{NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17C8} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};

View File

@ -121,6 +121,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@ -239,6 +240,7 @@
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@ -272,7 +274,8 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_DWRR_SDP_MTU (0x790)
#define NIX_AF_DWRR_SDP_MTU (0x790) /* All CN10K except CN10KB */
#define NIX_AF_DWRR_MTUX(a) (0x790 | (a) << 16) /* Only for CN10KB */
#define NIX_AF_DWRR_RPM_MTU (0x7A0)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
@ -428,6 +431,8 @@
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@ -436,6 +441,16 @@
#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48)
#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16)
#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16)
#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16)
#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16)
/* SSO */
#define SSO_AF_CONST (0x1000)
@ -531,6 +546,7 @@
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
#define CPT_AF_RXC_CFG1 (0x50000ull)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
@ -547,6 +563,8 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
#define CPT_LF_Q_SIZE 0x100
#define CPT_LF_Q_INST_PTR 0x110
#define CPT_LF_Q_GRP_PTR 0x120
#define CPT_LF_CTX_FLUSH 0x510
@ -730,5 +748,7 @@
#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23
#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22
#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21
#define LMTST_THROTTLE_MASK GENMASK_ULL(38, 35)
#define LMTST_WR_PEND_MAX 15
#endif /* RVU_REG_H */

View File

@ -0,0 +1,468 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell RVU Admin Function driver
*
* Copyright (C) 2024 Marvell.
*
*/
#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "rvu.h"
#include "rvu_reg.h"
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
return req; \
}
MBOX_UP_REP_MESSAGES
#undef M
static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc);
struct rep_event *msg;
int pf;
pf = rvu_get_pf(event->pcifunc);
if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
mutex_lock(&rvu->mbox_lock);
msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
if (!msg) {
mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
}
msg->hdr.pcifunc = event->pcifunc;
msg->event = event->event;
memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data));
otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
mutex_unlock(&rvu->mbox_lock);
return 0;
}
static void rvu_rep_wq_handler(struct work_struct *work)
{
struct rvu *rvu = container_of(work, struct rvu, rep_evt_work);
struct rep_evtq_ent *qentry;
struct rep_event *event;
unsigned long flags;
do {
spin_lock_irqsave(&rvu->rep_evtq_lock, flags);
qentry = list_first_entry_or_null(&rvu->rep_evtq_head,
struct rep_evtq_ent,
node);
if (qentry)
list_del(&qentry->node);
spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags);
if (!qentry)
break; /* nothing more to process */
event = &qentry->event;
rvu_rep_up_notify(rvu, event);
kfree(qentry);
} while (1);
}
int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req,
struct msg_rsp *rsp)
{
struct rep_evtq_ent *qentry;
qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
if (!qentry)
return -ENOMEM;
qentry->event = *req;
spin_lock(&rvu->rep_evtq_lock);
list_add_tail(&qentry->node, &rvu->rep_evtq_head);
spin_unlock(&rvu->rep_evtq_lock);
queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work);
return 0;
}
int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rep_event *req;
int pf;
if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
return 0;
pf = rvu_get_pf(rvu->rep_pcifunc);
mutex_lock(&rvu->mbox_lock);
req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
if (!req) {
mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
}
req->hdr.pcifunc = rvu->rep_pcifunc;
req->event |= RVU_EVENT_PFVF_STATE;
req->pcifunc = pcifunc;
req->evt_data.vf_state = enable;
otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
mutex_unlock(&rvu->mbox_lock);
return 0;
}
#define RVU_LF_RX_STATS(reg) \
rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg))
#define RVU_LF_TX_STATS(reg) \
rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg))
int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu,
struct nix_stats_req *req,
struct nix_stats_rsp *rsp)
{
u16 pcifunc = req->pcifunc;
int nixlf, blkaddr, err;
struct msg_req rst_req;
struct msg_rsp rst_rsp;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
if (err)
return 0;
if (req->reset) {
rst_req.hdr.pcifunc = pcifunc;
return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp);
}
rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS);
rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST);
rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST);
rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST);
rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP);
rsp->rx.err = RVU_LF_RX_STATS(RX_ERR);
rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS);
rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST);
rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST);
rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS);
rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST);
rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST);
rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST);
rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP);
rsp->pcifunc = req->pcifunc;
return 0;
}
static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
{
int id;
for (id = 0; id < rvu->rep_cnt; id++)
if (rvu->rep2pfvf_map[id] == pcifunc)
return id;
return 0;
}
static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc,
u16 vlan_tci, int *vidx)
{
struct nix_vtag_config_rsp rsp = {};
struct nix_vtag_config req = {};
u64 etype = ETH_P_8021Q;
int err;
/* Insert vlan tag */
req.hdr.pcifunc = pcifunc;
req.vtag_size = VTAGSIZE_T4;
req.cfg_type = 0; /* tx vlan cfg */
req.tx.cfg_vtag0 = true;
req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci;
err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
if (err) {
dev_err(rvu->dev, "Tx vlan config failed\n");
return err;
}
*vidx = rsp.vtag0_idx;
return 0;
}
static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
{
struct nix_vtag_config req = {};
struct nix_vtag_config_rsp rsp;
/* config strip, capture and size */
req.hdr.pcifunc = pcifunc;
req.vtag_size = VTAGSIZE_T4;
req.cfg_type = 1; /* rx vlan cfg */
req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
req.rx.strip_vtag = true;
req.rx.capture_vtag = false;
return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
}
static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
u16 entry, bool rte)
{
struct npc_install_flow_req req = {};
struct npc_install_flow_rsp rsp = {};
struct rvu_pfvf *pfvf;
u16 vlan_tci, rep_id;
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* To steer the traffic from Representee to Representor */
rep_id = rvu_rep_get_vlan_id(rvu, pcifunc);
if (rte) {
vlan_tci = rep_id | BIT_ULL(8);
req.vf = rvu->rep_pcifunc;
req.op = NIX_RX_ACTIONOP_UCAST;
req.index = rep_id;
} else {
vlan_tci = rep_id;
req.vf = pcifunc;
req.op = NIX_RX_ACTION_DEFAULT;
}
rvu_rep_rx_vlan_cfg(rvu, req.vf);
req.entry = entry;
req.hdr.pcifunc = 0; /* AF is requester */
req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
req.vtag0_valid = true;
req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q);
req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q);
req.packet.vlan_tci = cpu_to_be16(vlan_tci);
req.mask.vlan_tci = cpu_to_be16(0xffff);
req.channel = RVU_SWITCH_LBK_CHAN;
req.chan_mask = 0xffff;
req.intf = pfvf->nix_rx_intf;
return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
bool rte)
{
struct npc_install_flow_req req = {};
struct npc_install_flow_rsp rsp = {};
struct rvu_pfvf *pfvf;
int vidx, err;
u16 vlan_tci;
u8 lbkid;
pfvf = rvu_get_pfvf(rvu, pcifunc);
vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
if (rte)
vlan_tci |= BIT_ULL(8);
err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
if (err)
return err;
lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
req.hdr.pcifunc = 0; /* AF is requester */
if (rte) {
req.vf = pcifunc;
} else {
req.vf = rvu->rep_pcifunc;
req.packet.sq_id = vlan_tci;
req.mask.sq_id = 0xffff;
}
req.entry = entry;
req.intf = pfvf->nix_tx_intf;
req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
req.set_cntr = 1;
req.vtag0_def = vidx;
req.vtag0_op = 1;
return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
int rvu_rep_install_mcam_rules(struct rvu *rvu)
{
struct rvu_switch *rswitch = &rvu->rswitch;
u16 start = rswitch->start_entry;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc, entry = 0;
int pf, vf, numvfs;
int err, nixlf, i;
u8 rep;
for (pf = 1; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
pcifunc = pf << RVU_PFVF_PF_SHIFT;
rvu_get_nix_blkaddr(rvu, pcifunc);
rep = true;
for (i = 0; i < 2; i++) {
err = rvu_rep_install_rx_rule(rvu, pcifunc,
start + entry, rep);
if (err)
return err;
rswitch->entry2pcifunc[entry++] = pcifunc;
err = rvu_rep_install_tx_rule(rvu, pcifunc,
start + entry, rep);
if (err)
return err;
rswitch->entry2pcifunc[entry++] = pcifunc;
rep = false;
}
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
pcifunc = pf << RVU_PFVF_PF_SHIFT |
((vf + 1) & RVU_PFVF_FUNC_MASK);
rvu_get_nix_blkaddr(rvu, pcifunc);
/* Skip installimg rules if nixlf is not attached */
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
continue;
rep = true;
for (i = 0; i < 2; i++) {
err = rvu_rep_install_rx_rule(rvu, pcifunc,
start + entry,
rep);
if (err)
return err;
rswitch->entry2pcifunc[entry++] = pcifunc;
err = rvu_rep_install_tx_rule(rvu, pcifunc,
start + entry,
rep);
if (err)
return err;
rswitch->entry2pcifunc[entry++] = pcifunc;
rep = false;
}
}
}
/* Initialize the wq for handling REP events */
spin_lock_init(&rvu->rep_evtq_lock);
INIT_LIST_HEAD(&rvu->rep_evtq_head);
INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0);
if (!rvu->rep_evt_wq) {
dev_err(rvu->dev, "REP workqueue allocation failed\n");
return -ENOMEM;
}
return 0;
}
void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
{
struct rvu_switch *rswitch = &rvu->rswitch;
struct npc_mcam *mcam = &rvu->hw->mcam;
u32 max = rswitch->used_entries;
int blkaddr;
u16 entry;
if (!rswitch->used_entries)
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
mutex_lock(&mcam->lock);
for (entry = 0; entry < max; entry++) {
if (rswitch->entry2pcifunc[entry] == pcifunc)
npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
}
mutex_unlock(&mcam->lock);
}
int rvu_rep_pf_init(struct rvu *rvu)
{
u16 pcifunc = rvu->rep_pcifunc;
struct rvu_pfvf *pfvf;
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
rvu_switch_enable_lbk_link(rvu, pcifunc, true);
rvu_rep_rx_vlan_cfg(rvu, pcifunc);
return 0;
}
int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
struct msg_rsp *rsp)
{
if (req->hdr.pcifunc != rvu->rep_pcifunc)
return 0;
rvu->rep_mode = req->ena;
if (!rvu->rep_mode)
rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1);
return 0;
}
int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
struct get_rep_cnt_rsp *rsp)
{
int pf, vf, numvfs, hwvf, rep = 0;
u16 pcifunc;
rvu->rep_pcifunc = req->hdr.pcifunc;
rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
rvu->rep_cnt = rsp->rep_cnt;
rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt *
sizeof(u16), GFP_KERNEL);
if (!rvu->rep2pfvf_map)
return -ENOMEM;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
pcifunc = pf << RVU_PFVF_PF_SHIFT;
rvu->rep2pfvf_map[rep] = pcifunc;
rsp->rep_pf_map[rep] = pcifunc;
rep++;
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++) {
rvu->rep2pfvf_map[rep] = pcifunc |
((vf + 1) & RVU_PFVF_FUNC_MASK);
rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep];
rep++;
}
}
return 0;
}

View File

@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc)
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
bool is_sdp_vf(u16 pcifunc)
#define RVU_SDP_VF_DEVID 0xA0F7
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
{
if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
return (rvu->vf_devid == RVU_SDP_VF_DEVID);
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
if (rvu->fwdata->channel_data.valid) {
sdp_pf_num[0] = 0;
pfvf = &rvu->pf[sdp_pf_num[0]];
pfvf->sdp_info = &rvu->fwdata->channel_data.info;
return 0;
}
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {

View File

@ -71,13 +71,11 @@ enum cpt_af_int_vec_e {
CPT_AF_INT_VEC_CNT = 0x4,
};
enum cpt_10k_af_int_vec_e {
enum cpt_cn10k_flt_int_vec_e {
CPT_10K_AF_INT_VEC_FLT0 = 0x0,
CPT_10K_AF_INT_VEC_FLT1 = 0x1,
CPT_10K_AF_INT_VEC_FLT2 = 0x2,
CPT_10K_AF_INT_VEC_RVU = 0x3,
CPT_10K_AF_INT_VEC_RAS = 0x4,
CPT_10K_AF_INT_VEC_CNT = 0x5,
CPT_10K_AF_INT_VEC_FLT_MAX = 0x3,
};
/* NPA Admin function Interrupt Vector Enumeration */
@ -340,11 +338,12 @@ struct nix_aq_res_s {
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
u64 rsvd_64_67 : 4;
u64 lbp_ena : 1;
u64 lbpid_low : 3;
u64 bp_ena : 1;
u64 rsvd_69_71 : 3;
u64 lbpid_med : 3;
u64 bpid : 9;
u64 rsvd_81_83 : 3;
u64 lbpid_high : 3;
u64 qint_idx : 7;
u64 cq_err : 1;
u64 cint_idx : 7;
@ -358,10 +357,14 @@ struct nix_cq_ctx_s {
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
u64 rsvd_210_211 : 2;
u64 substream : 20;
u64 cpt_drop_err_en : 1;
u64 rsvd_211 : 1;
u64 substream : 12;
u64 stash_thresh : 4;
u64 lbp_frac : 4;
u64 caching : 1;
u64 rsvd_233_235 : 3;
u64 stashing : 1;
u64 rsvd_234_235 : 2;
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
@ -820,4 +823,30 @@ enum nix_tx_vtag_op {
#define VTAG_STRIP BIT_ULL(4)
#define VTAG_CAPTURE BIT_ULL(5)
/* NIX TX stats */
enum nix_stat_lf_tx {
TX_UCAST = 0x0,
TX_BCAST = 0x1,
TX_MCAST = 0x2,
TX_DROP = 0x3,
TX_OCTS = 0x4,
TX_STATS_ENUM_LAST,
};
/* NIX RX stats */
enum nix_stat_lf_rx {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
RX_BCAST = 0x2,
RX_MCAST = 0x3,
RX_DROP = 0x4,
RX_DROP_OCTS = 0x5,
RX_FCS = 0x6,
RX_ERR = 0x7,
RX_DRP_BCAST = 0x8,
RX_DRP_MCAST = 0x9,
RX_DRP_L3BCAST = 0xa,
RX_DRP_L3MCAST = 0xb,
RX_STATS_ENUM_LAST,
};
#endif /* RVU_STRUCT_H */

View File

@ -8,6 +8,17 @@
#include <linux/bitfield.h>
#include "rvu.h"
void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct nix_hw *nix_hw;
nix_hw = get_nix_hw(rvu->hw, pfvf->nix_blkaddr);
/* Enable LBK links with channel 63 for TX MCAM rule */
rvu_nix_tx_tl2_cfg(rvu, pfvf->nix_blkaddr, pcifunc,
&nix_hw->txsch[NIX_TXSCH_LVL_TL2], enable);
}
static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc,
u16 chan_mask)
{
@ -52,6 +63,8 @@ static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry)
if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags))
return 0;
rvu_switch_enable_lbk_link(rvu, pcifunc, true);
lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
ether_addr_copy(req.packet.dmac, pfvf->mac_addr);
eth_broadcast_addr((u8 *)&req.mask.dmac);
@ -145,6 +158,7 @@ void rvu_switch_enable(struct rvu *rvu)
struct npc_mcam_alloc_entry_req alloc_req = { 0 };
struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
struct npc_delete_flow_req uninstall_req = { 0 };
struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct msg_rsp rsp;
@ -152,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu)
alloc_req.contig = true;
alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
if (rvu->rep_mode)
alloc_req.count = alloc_req.count * 4;
ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
&alloc_rsp);
if (ret) {
@ -175,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu)
rswitch->used_entries = alloc_rsp.count;
rswitch->start_entry = alloc_rsp.entry;
ret = rvu_switch_install_rules(rvu);
if (rvu->rep_mode) {
rvu_rep_pf_init(rvu);
ret = rvu_rep_install_mcam_rules(rvu);
} else {
ret = rvu_switch_install_rules(rvu);
}
if (ret)
goto uninstall_rules;
@ -184,7 +205,7 @@ void rvu_switch_enable(struct rvu *rvu)
uninstall_rules:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
kfree(rswitch->entry2pcifunc);
free_entries:
free_req.all = 1;
@ -196,6 +217,7 @@ exit:
void rvu_switch_disable(struct rvu *rvu)
{
struct npc_delete_flow_req uninstall_req = { 0 };
struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct rvu_hwinfo *hw = rvu->hw;
@ -207,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu)
if (!rswitch->used_entries)
return;
if (rvu->rep_mode)
goto free_ents;
for (pf = 1; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
@ -218,6 +243,9 @@ void rvu_switch_disable(struct rvu *rvu)
"Reverting RX rule for PF%d failed(%d)\n",
pf, err);
/* Disable LBK link */
rvu_switch_enable_lbk_link(rvu, pcifunc, false);
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
@ -226,24 +254,30 @@ void rvu_switch_disable(struct rvu *rvu)
dev_err(rvu->dev,
"Reverting RX rule for PF%dVF%d failed(%d)\n",
pf, vf, err);
rvu_switch_enable_lbk_link(rvu, pcifunc, false);
}
}
free_ents:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
rswitch->used_entries = 0;
kfree(rswitch->entry2pcifunc);
}
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
{
struct rvu_switch *rswitch = &rvu->rswitch;
u32 max = rswitch->used_entries;
u16 entry;
if (rvu->rep_mode)
return rvu_rep_update_rules(rvu, pcifunc, ena);
if (!rswitch->used_entries)
return;

View File

@ -5,13 +5,16 @@
obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
otx2_devlink.o qos_sq.o qos.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af

View File

@ -72,7 +72,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
}
EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@ -201,6 +201,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
rsp = (struct nix_bandprof_alloc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
rc = PTR_ERR(rsp);
goto out;
}
if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
rc = -EIO;
goto out;
@ -448,6 +453,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
aq->prof.hl_en = 0;
aq->prof_mask.hl_en = 1;
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;

View File

@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,265 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell IPSEC offload driver
*
* Copyright (C) 2024 Marvell.
*/
#ifndef CN10K_IPSEC_H
#define CN10K_IPSEC_H
#include <linux/types.h>
DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
/* CPT instruction size in bytes */
#define CN10K_CPT_INST_SIZE 64
/* CPT instruction (CPT_INST_S) queue length */
#define CN10K_CPT_INST_QLEN 8200
/* CPT instruction queue size passed to HW is in units of
* 40*CPT_INST_S messages.
*/
#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40)
/* CPT needs 320 free entries */
#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE)
#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40)
/* CPT instruction queue length in bytes */
#define CN10K_CPT_INST_QLEN_BYTES \
((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \
CN10K_CPT_INST_QLEN_EXTRA_BYTES)
/* CPT instruction group queue length in bytes */
#define CN10K_CPT_INST_GRP_QLEN_BYTES \
((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16)
/* CPT FC length in bytes */
#define CN10K_CPT_Q_FC_LEN 128
/* Default CPT engine group for ipsec offload */
#define CN10K_DEF_CPT_IPSEC_EGRP 1
/* CN10K CPT LF registers */
#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT)
#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10)
#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40)
#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0)
#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100)
#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110)
#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120)
#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3)
#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510)
/* IPSEC Instruction opcodes */
#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL
#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL
#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL
enum cn10k_cpt_comp_e {
CN10K_CPT_COMP_E_NOTDONE = 0x00,
CN10K_CPT_COMP_E_GOOD = 0x01,
CN10K_CPT_COMP_E_FAULT = 0x02,
CN10K_CPT_COMP_E_HWERR = 0x04,
CN10K_CPT_COMP_E_INSTERR = 0x05,
CN10K_CPT_COMP_E_WARN = 0x06,
CN10K_CPT_COMP_E_MASK = 0x3F
};
struct cn10k_cpt_inst_queue {
u8 *vaddr;
u8 *real_vaddr;
dma_addr_t dma_addr;
dma_addr_t real_dma_addr;
u32 size;
};
enum cn10k_cpt_hw_state_e {
CN10K_CPT_HW_UNAVAILABLE,
CN10K_CPT_HW_AVAILABLE,
CN10K_CPT_HW_IN_USE
};
struct cn10k_ipsec {
/* Outbound CPT */
u64 io_addr;
atomic_t cpt_state;
struct cn10k_cpt_inst_queue iq;
/* SA info */
u32 sa_size;
u32 outb_sa_count;
struct work_struct sa_work;
struct workqueue_struct *sa_workq;
};
/* CN10K IPSEC Security Association (SA) */
/* SA direction */
#define CN10K_IPSEC_SA_DIR_INB 0
#define CN10K_IPSEC_SA_DIR_OUTB 1
/* SA protocol */
#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0
#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1
/* SA Encryption Type */
#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5
/* SA IPSEC mode Transport/Tunnel */
#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0
#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1
/* SA AES Key Length */
#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1
#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2
#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3
/* IV Source */
#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0
#define CN10K_IPSEC_SA_IV_SRC_PACKET 3
struct cn10k_tx_sa_s {
u64 esn_en : 1; /* W0 */
u64 rsvd_w0_1_8 : 8;
u64 hw_ctx_off : 7;
u64 ctx_id : 16;
u64 rsvd_w0_32_47 : 16;
u64 ctx_push_size : 7;
u64 rsvd_w0_55 : 1;
u64 ctx_hdr_size : 2;
u64 aop_valid : 1;
u64 rsvd_w0_59 : 1;
u64 ctx_size : 4;
u64 w1; /* W1 */
u64 sa_valid : 1; /* W2 */
u64 sa_dir : 1;
u64 rsvd_w2_2_3 : 2;
u64 ipsec_mode : 1;
u64 ipsec_protocol : 1;
u64 aes_key_len : 2;
u64 enc_type : 3;
u64 rsvd_w2_11_19 : 9;
u64 iv_src : 2;
u64 rsvd_w2_22_31 : 10;
u64 rsvd_w2_32_63 : 32;
u64 w3; /* W3 */
u8 cipher_key[32]; /* W4 - W7 */
u32 rsvd_w8_0_31; /* W8 : IV */
u32 iv_gcm_salt;
u64 rsvd_w9_w30[22]; /* W9 - W30 */
u64 hw_ctx[6]; /* W31 - W36 */
};
/* CPT instruction parameter-1 */
#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1
#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2
#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20
#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8
/* CPT instruction parameter-2 */
#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0
#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8
/* CPT Instruction Structure */
struct cpt_inst_s {
u64 nixtxl : 3; /* W0 */
u64 doneint : 1;
u64 rsvd_w0_4_15 : 12;
u64 dat_offset : 8;
u64 ext_param1 : 8;
u64 nixtx_offset : 20;
u64 rsvd_w0_52_63 : 12;
u64 res_addr; /* W1 */
u64 tag : 32; /* W2 */
u64 tt : 2;
u64 grp : 10;
u64 rsvd_w2_44_47 : 4;
u64 rvu_pf_func : 16;
u64 qord : 1; /* W3 */
u64 rsvd_w3_1_2 : 2;
u64 wqe_ptr : 61;
u64 dlen : 16; /* W4 */
u64 param2 : 16;
u64 param1 : 16;
u64 opcode_major : 8;
u64 opcode_minor : 8;
u64 dptr; /* W5 */
u64 rptr; /* W6 */
u64 cptr : 60; /* W7 */
u64 ctx_val : 1;
u64 egrp : 3;
};
/* CPT Instruction Result Structure */
struct cpt_res_s {
u64 compcode : 7; /* W0 */
u64 doneint : 1;
u64 uc_compcode : 8;
u64 uc_info : 48;
u64 esn; /* W1 */
};
/* CPT SG structure */
struct cpt_sg_s {
u64 seg1_size : 16;
u64 seg2_size : 16;
u64 seg3_size : 16;
u64 segs : 2;
u64 rsvd_63_50 : 14;
};
/* CPT LF_INPROG Register */
#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0)
#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32)
#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40)
/* CPT LF_Q_GRP_PTR Register */
#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0)
#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32)
/* CPT LF_Q_SIZE Register */
#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7)
/* CPT LF_Q_SIZE Register */
#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
/* CPT LF CTX Flush Register */
#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
#ifdef CONFIG_XFRM_OFFLOAD
int cn10k_ipsec_init(struct net_device *netdev);
void cn10k_ipsec_clean(struct otx2_nic *pf);
int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable);
bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset);
bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
struct otx2_snd_queue *sq, struct sk_buff *skb,
int num_segs, int size);
#else
static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev)
{
return 0;
}
static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf)
{
}
static inline __maybe_unused
int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable)
{
return 0;
}
static inline bool __maybe_unused
otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
{
return true;
}
static inline bool __maybe_unused
cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq,
struct otx2_snd_queue *sq, struct sk_buff *skb,
int num_segs, int size)
{
return true;
}
#endif
#endif // CN10K_IPSEC_H

File diff suppressed because it is too large Load Diff

View File

@ -9,12 +9,20 @@
#include <linux/pci.h>
#include <net/page_pool/helpers.h>
#include <net/tso.h>
#include <linux/bitfield.h>
#include <linux/dcbnl.h>
#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
{
return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en;
}
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
@ -82,6 +90,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
return 1;
}
EXPORT_SYMBOL(otx2_update_rq_stats);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
{
@ -90,9 +99,15 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
if (!pfvf->qset.sq)
return 0;
if (qidx >= pfvf->hw.non_qos_queues) {
if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap))
return 0;
}
otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
return 1;
}
EXPORT_SYMBOL(otx2_update_sq_stats);
void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
@ -221,7 +236,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
u16 maxlen;
int err;
maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
maxlen = pfvf->hw.max_mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
@ -230,7 +245,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
req->maxlen = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
/* Use max receive length supported by hardware for loopback devices */
if (is_otx2_lbkvf(pfvf->pdev))
@ -240,13 +255,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
EXPORT_SYMBOL(otx2_hw_set_mtu);
int otx2_config_pause_frm(struct otx2_nic *pfvf)
{
struct cgx_pause_frm_cfg *req;
int err;
if (is_otx2_lbkvf(pfvf->pdev))
if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
@ -514,8 +530,8 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma)
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma)
{
u8 *buf;
@ -533,8 +549,8 @@ int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
return 0;
}
static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma)
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma)
{
int ret;
@ -617,6 +633,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
/* Set link type for DWRR MTU selection on CN10K silicons */
if (!is_dev_otx2(pfvf->pdev))
req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57),
(u64)hw->smq_link_type);
req->num_regs++;
/* MDQ config */
parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
@ -627,20 +647,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
int sdp_chan = hw->tx_chan_base + prio;
if (is_otx2_sdp_rep(pfvf->pdev))
prio = 0;
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
if (is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
req->regval[2] = BIT_ULL(12) | BIT_ULL(13) |
(sdp_chan & 0xff);
}
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
if (lvl == hw->txschq_link_cfg_lvl) {
if (lvl == hw->txschq_link_cfg_lvl &&
!is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@ -651,13 +682,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
if (lvl == hw->txschq_link_cfg_lvl) {
if (lvl == hw->txschq_link_cfg_lvl &&
!is_otx2_sdp_rep(pfvf->pdev)) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
/* Enable this queue and backpressure
@ -679,7 +711,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
@ -716,8 +748,10 @@ EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
int chan_cnt = pfvf->hw.tx_chan_cnt;
struct nix_txsch_alloc_req *req;
int lvl;
struct nix_txsch_alloc_rsp *rsp;
int lvl, schq, rc;
/* Get memory to put this msg */
req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
@ -728,52 +762,100 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf)
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
req->schq[lvl] = 1;
return otx2_sync_mbox_msg(&pfvf->mbox);
if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) {
req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt;
req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt;
}
rc = otx2_sync_mbox_msg(&pfvf->mbox);
if (rc)
return rc;
rsp = (struct nix_txsch_alloc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp))
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl];
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pfvf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
}
pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
return 0;
}
int otx2_txschq_stop(struct otx2_nic *pfvf)
void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
{
struct nix_txsch_free_req *free_req;
int lvl, schq, err;
int err;
mutex_lock(&pfvf->mbox.lock);
/* Free the transmit schedulers */
free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
if (!free_req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
netdev_err(pfvf->netdev,
"Failed alloc txschq free req\n");
return;
}
free_req->flags = TXSCHQ_FREE_ALL;
free_req->schq_lvl = lvl;
free_req->schq = schq;
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err) {
netdev_err(pfvf->netdev,
"Failed stop txschq %d at level %d\n", schq, lvl);
}
mutex_unlock(&pfvf->mbox.lock);
}
EXPORT_SYMBOL(otx2_txschq_free_one);
void otx2_txschq_stop(struct otx2_nic *pfvf)
{
int lvl, schq, idx;
/* free non QOS TLx nodes */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) {
otx2_txschq_free_one(pfvf, lvl,
pfvf->hw.txschq_list[lvl][idx]);
}
}
/* Clear the txschq list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
pfvf->hw.txschq_list[lvl][schq] = 0;
}
return err;
}
void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
struct otx2_snd_queue *sq;
u64 incr, *ptr, val;
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
sq = &pfvf->qset.sq[qidx];
if (!sq->sqb_ptrs)
continue;
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F;
if (sqe_head == sqe_tail)
break;
usleep_range(1, 3);
timeout--;
}
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F;
if (sqe_head != sqe_tail)
usleep_range(50, 60);
}
}
@ -826,7 +908,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
{
struct otx2_nic *pfvf = dev;
struct otx2_snd_queue *sq;
@ -845,7 +927,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
aq->sq.sq_int_ena = NIX_SQINT_BITS;
@ -863,11 +945,12 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
u8 chan_offset;
int err;
pool = &pfvf->qset.pool[sqb_aura];
@ -879,6 +962,29 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
/* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG.
* SG of NIX and CPT are same in size. Allocate memory for CPT SG
* same as NIX SQE for base address alignment.
* Layout of a NIX SQE and CPT SG entry:
* -----------------------------
* | CPT Scatter Gather |
* | (SQE SIZE) |
* | |
* -----------------------------
* | NIX SQE |
* | (SQE SIZE) |
* | |
* -----------------------------
*/
err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt,
sq->sqe_size * 2);
if (err)
return err;
err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64);
if (err)
return err;
if (qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
TSO_HEADER_SIZE);
@ -894,8 +1000,11 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
if (err) {
kfree(sq->sg);
sq->sg = NULL;
return err;
}
}
sq->head = 0;
@ -911,7 +1020,15 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
chan_offset = qidx % pfvf->hw.tx_chan_cnt;
err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
if (err) {
kfree(sq->sg);
sq->sg = NULL;
return err;
}
return 0;
}
@ -936,9 +1053,17 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
} else {
cq->cq_type = CQ_XDP;
cq->cint_idx = qidx - non_xdp_queues;
cq->cqe_cnt = qset->sqe_cnt;
if (pfvf->hw.xdp_queues &&
qidx < non_xdp_queues + pfvf->hw.xdp_queues) {
cq->cq_type = CQ_XDP;
cq->cint_idx = qidx - non_xdp_queues;
cq->cqe_cnt = qset->sqe_cnt;
} else {
cq->cq_type = CQ_QOS;
cq->cint_idx = qidx - non_xdp_queues -
pfvf->hw.xdp_queues;
cq->cqe_cnt = qset->sqe_cnt;
}
}
cq->cqe_size = pfvf->qset.xqe_size;
@ -1049,7 +1174,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@ -1087,7 +1212,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@ -1096,11 +1221,11 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf);
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
nixlf->xqe_sz = NIX_XQESZ_W16;
nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@ -1134,7 +1259,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@ -1202,8 +1327,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
pfvf->qset.pool = NULL;
}
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
{
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
@ -1279,8 +1404,8 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
return 0;
}
static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size)
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size)
{
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
@ -1350,7 +1475,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@ -1370,7 +1495,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
@ -1497,7 +1622,7 @@ int otx2_detach_resources(struct mbox *mbox)
detach->partial = false;
/* Send detach request to AF */
otx2_mbox_msg_send(&mbox->mbox, 0);
otx2_sync_mbox_msg(mbox);
mutex_unlock(&mbox->lock);
return 0;
}
@ -1598,19 +1723,44 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
#ifdef CONFIG_DCB
req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
#else
req->chan_cnt = 1;
req->bpid_per_chan = 0;
#endif
if (otx2_is_pfc_enabled(pfvf)) {
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
req->bpid_per_chan = 1;
} else {
req->chan_cnt = 1;
req->bpid_per_chan = 0;
}
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_config_bp);
int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
{
struct nix_bp_cfg_req *req;
if (enable)
req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox);
else
req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox);
if (!req)
return -ENOMEM;
req->chan_base = 0;
if (otx2_is_pfc_enabled(pfvf)) {
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
req->bpid_per_chan = 1;
} else {
req->chan_cnt = 1;
req->bpid_per_chan = 0;
}
return otx2_sync_mbox_msg(&pfvf->mbox);
}
EXPORT_SYMBOL(otx2_nix_cpt_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp)
@ -1630,21 +1780,6 @@ void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
}
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp)
{
int lvl, schq;
/* Setup transmit scheduler list */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
struct npa_lf_alloc_rsp *rsp)
{
@ -1659,6 +1794,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.sqb_size = rsp->sqb_size;
pfvf->hw.rx_chan_base = rsp->rx_chan_base;
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt;
pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
pfvf->hw.cgx_links = rsp->cgx_links;
@ -1703,6 +1840,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n)
free_irq(vector, &qset->napi[qidx]);
}
}
EXPORT_SYMBOL(otx2_free_cints);
void otx2_set_cints_affinity(struct otx2_nic *pfvf)
{
@ -1728,6 +1866,17 @@ void otx2_set_cints_affinity(struct otx2_nic *pfvf)
}
}
static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw)
{
if (is_otx2_lbkvf(pfvf->pdev)) {
pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK;
return hw->lbk_dwrr_mtu;
}
pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM;
return hw->rpm_dwrr_mtu;
}
u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
{
struct nix_hw_info *rsp;
@ -1747,6 +1896,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
if (!rc) {
rsp = (struct nix_hw_info *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
rc = PTR_ERR(rsp);
goto out;
}
/* HW counts VLAN insertion bytes (8 for double tag)
* irrespective of whether SQE is requesting to insert VLAN
@ -1757,7 +1910,7 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
/* Also save DWRR MTU, needed for DWRR weight calculation */
pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp);
if (!pfvf->hw.dwrr_mtu)
pfvf->hw.dwrr_mtu = 1;
}
@ -1791,31 +1944,16 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t
}
}
if ((changed & NETIF_F_HW_TC) && tc) {
if (!pfvf->flow_cfg->max_flows) {
netdev_err(netdev,
"Can't enable TC, MCAM entries not allocated\n");
return -EINVAL;
}
}
if ((changed & NETIF_F_HW_TC) && !tc &&
pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
otx2_tc_flower_rule_cnt(pfvf)) {
netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
return -EBUSY;
}
if ((changed & NETIF_F_NTUPLE) && ntuple &&
(netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) {
netdev_err(netdev,
"Can't enable NTUPLE when TC is active, disable TC and retry\n");
return -EINVAL;
}
if ((changed & NETIF_F_HW_TC) && tc &&
(netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
netdev_err(netdev,
"Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
"Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n");
return -EINVAL;
}
@ -1834,4 +1972,50 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len)
{
enum dma_data_direction dir = DMA_TO_DEVICE;
const skb_frag_t *frag;
struct page *page;
int offset;
/* Crypto hardware need write permission for ipsec crypto offload */
if (unlikely(xfrm_offload(skb))) {
dir = DMA_BIDIRECTIONAL;
skb = skb_unshare(skb, GFP_ATOMIC);
}
/* First segment is always skb->data */
if (!seg) {
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
*len = skb_headlen(skb);
} else {
frag = &skb_shinfo(skb)->frags[seg - 1];
page = skb_frag_page(frag);
offset = skb_frag_off(frag);
*len = skb_frag_size(frag);
}
return otx2_dma_map_page(pfvf, page, offset, *len, dir);
}
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
{
enum dma_data_direction dir = DMA_TO_DEVICE;
struct sk_buff *skb = NULL;
int seg;
skb = (struct sk_buff *)sg->skb;
if (unlikely(xfrm_offload(skb)))
dir = DMA_BIDIRECTIONAL;
for (seg = 0; seg < sg->num_segs; seg++) {
otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
sg->size[seg], dir);
}
sg->num_segs = 0;
}

View File

@ -15,10 +15,12 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/macsec.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@ -26,6 +28,9 @@
#include "otx2_txrx.h"
#include "otx2_devlink.h"
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
#include "cn10k_ipsec.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@ -36,6 +41,10 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@ -48,6 +57,9 @@
#define NIX_PF_PFC_PRIO_MAX 8
#endif
/* Number of segments per SG structure */
#define MAX_SEGS_PER_SG 3
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@ -116,33 +128,6 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
/* NIX TX stats */
enum nix_stat_lf_tx {
TX_UCAST = 0x0,
TX_BCAST = 0x1,
TX_MCAST = 0x2,
TX_DROP = 0x3,
TX_OCTS = 0x4,
TX_STATS_ENUM_LAST,
};
/* NIX RX stats */
enum nix_stat_lf_rx {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
RX_BCAST = 0x2,
RX_MCAST = 0x3,
RX_DROP = 0x4,
RX_DROP_OCTS = 0x5,
RX_FCS = 0x6,
RX_ERR = 0x7,
RX_DRP_BCAST = 0x8,
RX_DRP_MCAST = 0x9,
RX_DRP_L3BCAST = 0xa,
RX_DRP_L3MCAST = 0xb,
RX_STATS_ENUM_LAST,
};
struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
@ -181,13 +166,29 @@ struct mbox {
int up_num_msgs; /* mbox_up number of messages */
};
/* Egress rate limiting definitions */
#define MAX_BURST_EXPONENT 0x0FULL
#define MAX_BURST_MANTISSA 0xFFULL
#define MAX_BURST_SIZE 130816ULL
#define MAX_RATE_DIVIDER_EXPONENT 12ULL
#define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL
/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
#define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
struct otx2_hw {
struct pci_dev *pdev;
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
u16 xdp_queues;
u16 tot_tx_queues;
u16 tc_tx_queues;
u16 non_qos_queues; /* tx queues plus xdp queues */
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
@ -195,6 +196,7 @@ struct otx2_hw {
#define OTX2_DEFAULT_RBUF_LEN 2048
u16 rbuf_len;
u32 xqe_size;
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
@ -203,13 +205,19 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
u8 txschq_cnt[NIX_TXSCH_LVL_CNT];
u8 txschq_aggr_lvl_rr_prio;
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
u32 max_mtu;
u8 smq_link_type;
/* HW settings, coalescing etc */
u16 rx_chan_base;
u16 tx_chan_base;
u8 rx_chan_cnt;
u8 tx_chan_cnt;
u16 cq_qcount_wait;
u16 cq_ecount_wait;
u16 rq_skid;
@ -246,6 +254,8 @@ struct otx2_hw {
#define CN10K_LMTST 2
#define CN10K_RPM 3
#define CN10K_PTP_ONESTEP 4
#define CN10K_HW_MACSEC 5
#define QOS_CIR_PIR_SUPPORT 6
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@ -301,6 +311,7 @@ struct otx2_ptp {
struct ptp_pin_desc extts_config;
u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
u64 (*ptp_tstamp2nsec)(const struct timecounter *time_counter, u64 timestamp);
struct delayed_work synctstamp_work;
u64 tstamp;
u32 base_ns;
@ -336,23 +347,85 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
};
struct otx2_tc_info {
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
unsigned long *tc_entries_bitmap;
refcount_t mark_flows;
struct list_head flow_list_tc;
bool ntuple;
};
struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset,
u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
#define CN10K_MCS_SA_PER_SC 4
/* Stats which need to be accumulated in software because
* of shared counters in hardware.
*/
struct cn10k_txsc_stats {
u64 InPktsUntagged;
u64 InPktsNoTag;
u64 InPktsBadTag;
u64 InPktsUnknownSCI;
u64 InPktsNoSCI;
u64 InPktsOverrun;
};
struct cn10k_rxsc_stats {
u64 InOctetsValidated;
u64 InOctetsDecrypted;
u64 InPktsUnchecked;
u64 InPktsDelayed;
u64 InPktsOK;
u64 InPktsInvalid;
u64 InPktsLate;
u64 InPktsNotValid;
u64 InPktsNotUsingSA;
u64 InPktsUnusedSA;
};
struct cn10k_mcs_txsc {
struct macsec_secy *sw_secy;
struct cn10k_txsc_stats stats;
struct list_head entry;
enum macsec_validation_type last_validate_frames;
bool last_replay_protect;
u16 hw_secy_id_tx;
u16 hw_secy_id_rx;
u16 hw_flow_id;
u16 hw_sc_id;
u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
u8 sa_bmap;
u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
u8 encoding_sa;
u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN];
ssci_t ssci[CN10K_MCS_SA_PER_SC];
bool vlan_dev; /* macsec running on VLAN ? */
};
struct cn10k_mcs_rxsc {
struct macsec_secy *sw_secy;
struct macsec_rx_sc *sw_rxsc;
struct cn10k_rxsc_stats stats;
struct list_head entry;
u16 hw_flow_id;
u16 hw_sc_id;
u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
u8 sa_bmap;
u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
u8 salt[CN10K_MCS_SA_PER_SC][MACSEC_SALT_LEN];
ssci_t ssci[CN10K_MCS_SA_PER_SC];
};
struct cn10k_mcs_cfg {
struct list_head txsc_list;
struct list_head rxsc_list;
};
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@ -378,6 +451,10 @@ struct otx2_nic {
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18)
#define OTX2_FLAG_PORT_UP BIT_ULL(19)
#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20)
u64 flags;
u64 *cq_op_addr;
@ -402,7 +479,6 @@ struct otx2_nic {
/* NPC MCAM */
struct otx2_flow_config *flow_cfg;
struct otx2_mac_table *mac_table;
struct otx2_tc_info tc_info;
u64 reset_count;
struct work_struct reset_task;
@ -430,21 +506,38 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
#ifdef CONFIG_DCB
/* PFC */
u8 pfc_en;
#ifdef CONFIG_DCB
u8 *queue_to_pfc_map;
u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
#endif
/* qos */
struct otx2_qos qos;
/* napi event count. It is needed for adaptive irq coalescing. */
u32 napi_events;
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
#if IS_ENABLED(CONFIG_RVU_ESWITCH)
struct rep_dev **reps;
int rep_cnt;
u16 rep_pf_map[RVU_MAX_REP];
u16 esw_mode;
#endif
/* Inline ipsec */
struct cn10k_ipsec ipsec;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
{
return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) ||
(pdev->device == PCI_DEVID_RVU_REP);
}
static inline bool is_96xx_A0(struct pci_dev *pdev)
@ -459,6 +552,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
static inline bool is_otx2_sdp_rep(struct pci_dev *pdev)
{
return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP;
}
/* REVID for PCIe devices.
* Bits 0..1: minor pass, bit 3..2: major pass
* bits 7..4: midr id
@ -479,6 +577,20 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
{
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
{
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF &&
(pdev->revision & 0xFF) == 0x54)
return true;
return false;
}
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@ -509,7 +621,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
__set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag);
}
if (is_dev_cn10kb(pfvf->pdev))
__set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@ -524,6 +640,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
case BLKTYPE_NPA:
blkaddr = BLKADDR_NPA;
break;
case BLKTYPE_CPT:
blkaddr = BLKADDR_CPT0;
break;
default:
blkaddr = BLKADDR_RVUM;
break;
@ -669,8 +788,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
NPA_LF_AURA_OP_ALLOCX(0));
u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
return otx2_atomic64_add(incr, ptr);
@ -715,7 +833,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
return 0;
otx2_mbox_msg_send(&mbox->mbox_up, devid);
otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
if (err)
return err;
@ -767,6 +885,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@ -811,12 +930,47 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
{
u16 smq;
int idx;
#ifdef CONFIG_DCB
if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
#endif
/* check if qidx falls under QOS queues */
if (qidx >= pfvf->hw.non_qos_queues) {
smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues];
} else {
idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ];
smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx];
}
return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
return smq;
}
static inline u16 otx2_get_total_tx_queues(struct otx2_nic *pfvf)
{
return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues;
}
static inline u64 otx2_convert_rate(u64 rate)
{
u64 converted_rate;
/* Convert bytes per second to Mbps */
converted_rate = rate * 8;
converted_rate = max_t(u64, converted_rate / 1000000, 1);
return converted_rate;
}
static inline int otx2_tc_flower_rule_cnt(struct otx2_nic *pfvf)
{
/* return here if MCAM entries not allocated */
if (!pfvf->flow_cfg)
return 0;
return pfvf->flow_cfg->nr_flows;
}
/* MSI-X APIs */
@ -829,6 +983,7 @@ void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
int otx2_reset_mac_stats(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@ -843,19 +998,41 @@ int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura);
int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura);
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma);
int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size);
int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs);
int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf);
void otx2_free_queue_mem(struct otx2_qset *qset);
int otx2_alloc_queue_mem(struct otx2_nic *pf);
int otx2_init_hw_resources(struct otx2_nic *pfvf);
void otx2_free_hw_resources(struct otx2_nic *pf);
int otx2_wq_init(struct otx2_nic *pf);
int otx2_check_pf_usable(struct otx2_nic *pf);
int otx2_pfaf_mbox_init(struct otx2_nic *pf);
int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af);
int otx2_realloc_msix_vectors(struct otx2_nic *pf);
void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@ -929,7 +1106,8 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
@ -949,4 +1127,50 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif
#if IS_ENABLED(CONFIG_MACSEC)
/* MACSEC offload support */
int cn10k_mcs_init(struct otx2_nic *pfvf);
void cn10k_mcs_free(struct otx2_nic *pfvf);
void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
#else
static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
struct mcs_intr_info *event)
{}
#endif /* CONFIG_MACSEC */
/* qos support */
static inline void otx2_qos_init(struct otx2_nic *pfvf, int qos_txqs)
{
struct otx2_hw *hw = &pfvf->hw;
hw->tc_tx_queues = qos_txqs;
INIT_LIST_HEAD(&pfvf->qos.qos_tree);
mutex_init(&pfvf->qos.qos_lock);
}
static inline void otx2_shutdown_qos(struct otx2_nic *pfvf)
{
mutex_destroy(&pfvf->qos.qos_lock);
}
u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev);
int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid);
void otx2_qos_config_txschq(struct otx2_nic *pfvf);
void otx2_clean_qos_queues(struct otx2_nic *pfvf);
int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
struct flow_cls_offload *cls_flower);
static inline int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len);
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
#endif /* OTX2_COMMON_H */

View File

@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@ -70,7 +71,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
* link config level. These rest of the scheduler can be
* same as hw.txschq_list.
*/
for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
req->schq[lvl] = 1;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
@ -83,7 +84,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
if (!rsp->schq[lvl])
return -ENOSPC;
@ -122,22 +123,16 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
struct nix_txsch_free_req *free_req;
int lvl;
mutex_lock(&pfvf->mbox.lock);
/* free PFC TLx nodes */
free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
if (!free_req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
free_req->flags = TXSCHQ_FREE_ALL;
otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
otx2_txschq_free_one(pfvf, lvl,
pfvf->pfc_schq_list[lvl][prio]);
pfvf->pfc_alloc_status[prio] = false;
return 0;
@ -267,6 +262,7 @@ update_sq_smq_map:
return 0;
}
EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@ -289,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@ -318,6 +315,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pfc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
err = PTR_ERR(rsp);
goto unlock;
}
if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
dev_warn(pfvf->dev,
"Failed to config PFC\n");
@ -328,6 +330,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@ -392,6 +395,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@ -406,9 +410,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
u8 old_pfc_en;
int err;
/* Save PFC configuration to interface */
old_pfc_en = pfvf->pfc_en;
pfvf->pfc_en = pfc->pfc_en;
if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
@ -418,13 +423,20 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
* supported by the tx queue configuration
*/
err = otx2_check_pfc_config(pfvf);
if (err)
if (err) {
pfvf->pfc_en = old_pfc_en;
return err;
}
process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
if (err)
if (err) {
pfvf->pfc_en = old_pfc_en;
return err;
}
/* Default disable backpressure on NIX-CPT */
otx2_nix_cpt_config_bp(pfvf, false);
/* Request Per channel Bpids */
if (pfc->pfc_en)
@ -432,6 +444,12 @@ process_pfc:
err = otx2_pfc_txschq_update(pfvf);
if (err) {
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, false);
otx2_pfc_txschq_stop(pfvf);
pfvf->pfc_en = old_pfc_en;
otx2_config_priority_flow_ctrl(pfvf);
dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
return err;
}
@ -468,3 +486,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
EXPORT_SYMBOL(otx2_dcbnl_set_ops);

View File

@ -42,7 +42,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
return 0;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
otx2_tc_alloc_ent_bitmap(pfvf);
return 0;
}
@ -78,7 +77,56 @@ static const struct devlink_param otx2_dl_params[] = {
otx2_dl_mcam_count_validate),
};
#ifdef CONFIG_RVU_ESWITCH
static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
if (!otx2_rep_dev(pfvf->pdev))
return -EOPNOTSUPP;
*mode = pfvf->esw_mode;
return 0;
}
static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
int ret = 0;
if (!otx2_rep_dev(pfvf->pdev))
return -EOPNOTSUPP;
if (pfvf->esw_mode == mode)
return 0;
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
rvu_rep_destroy(pfvf);
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
ret = rvu_rep_create(pfvf, extack);
break;
default:
return -EINVAL;
}
if (!ret)
pfvf->esw_mode = mode;
return ret;
}
#endif
static const struct devlink_ops otx2_devlink_ops = {
#ifdef CONFIG_RVU_ESWITCH
.eswitch_mode_get = otx2_devlink_eswitch_mode_get,
.eswitch_mode_set = otx2_devlink_eswitch_mode_set,
#endif
};
int otx2_register_dl(struct otx2_nic *pfvf)
@ -114,6 +162,7 @@ err_dl:
devlink_free(dl);
return err;
}
EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@ -125,3 +174,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
EXPORT_SYMBOL(otx2_unregister_dl);

View File

@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
if (!err) {
rsp = (struct cgx_mac_addr_add_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
mutex_unlock(&pf->mbox.lock);
return PTR_ERR(rsp);
}
*dmac_index = rsp->index;
}
@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
rsp = (struct cgx_mac_addr_update_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
rc = PTR_ERR(rsp);
goto out;
}
pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;

View File

@ -85,20 +85,22 @@ static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
int start_qidx = qset * pfvf->hw.rx_queues;
int qidx, stats;
for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
for (stats = 0; stats < otx2_n_queue_stats; stats++) {
sprintf(*data, "rxq%d: %s", qidx + start_qidx,
otx2_queue_stats[stats].name);
*data += ETH_GSTRING_LEN;
}
}
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
for (stats = 0; stats < otx2_n_queue_stats; stats++) {
sprintf(*data, "txq%d: %s", qidx + start_qidx,
otx2_queue_stats[stats].name);
*data += ETH_GSTRING_LEN;
}
}
for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++)
for (stats = 0; stats < otx2_n_queue_stats; stats++)
ethtool_sprintf(data, "rxq%d: %s", qidx + start_qidx,
otx2_queue_stats[stats].name);
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++)
for (stats = 0; stats < otx2_n_queue_stats; stats++)
if (qidx >= pfvf->hw.non_qos_queues)
ethtool_sprintf(data, "txq_qos%d: %s",
qidx + start_qidx -
pfvf->hw.non_qos_queues,
otx2_queue_stats[stats].name);
else
ethtool_sprintf(data, "txq%d: %s",
qidx + start_qidx,
otx2_queue_stats[stats].name);
}
static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@ -109,36 +111,25 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
for (stats = 0; stats < otx2_n_dev_stats; stats++) {
memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < otx2_n_dev_stats; stats++)
ethtool_puts(&data, otx2_dev_stats[stats].name);
for (stats = 0; stats < otx2_n_drv_stats; stats++) {
memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < otx2_n_drv_stats; stats++)
ethtool_puts(&data, otx2_drv_stats[stats].name);
otx2_get_qset_strings(pfvf, &data, 0);
if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
sprintf(data, "cgx_rxstat%d: ", stats);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++)
ethtool_sprintf(&data, "cgx_rxstat%d: ", stats);
for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
sprintf(data, "cgx_txstat%d: ", stats);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++)
ethtool_sprintf(&data, "cgx_txstat%d: ", stats);
}
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
sprintf(data, "Fec Corrected Errors: ");
data += ETH_GSTRING_LEN;
sprintf(data, "Fec Uncorrected Errors: ");
data += ETH_GSTRING_LEN;
ethtool_puts(&data, "reset_count");
ethtool_puts(&data, "Fec Corrected Errors: ");
ethtool_puts(&data, "Fec Uncorrected Errors: ");
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@ -159,7 +150,7 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
[otx2_queue_stats[stat].index];
}
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
if (!otx2_update_sq_stats(pfvf, qidx)) {
for (stat = 0; stat < otx2_n_queue_stats; stat++)
*((*data)++) = 0;
@ -254,7 +245,7 @@ static int otx2_get_sset_count(struct net_device *netdev, int sset)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
(pfvf->hw.rx_queues + otx2_get_total_tx_queues(pfvf));
if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
otx2_update_lmac_fec_stats(pfvf);
@ -282,7 +273,7 @@ static int otx2_set_channels(struct net_device *dev,
{
struct otx2_nic *pfvf = netdev_priv(dev);
bool if_up = netif_running(dev);
int err = 0;
int err, qos_txqs;
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
@ -296,14 +287,18 @@ static int otx2_set_channels(struct net_device *dev,
if (if_up)
dev->netdev_ops->ndo_stop(dev);
err = otx2_set_real_num_queues(dev, channel->tx_count,
qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
OTX2_QOS_MAX_LEAF_NODES);
err = otx2_set_real_num_queues(dev, channel->tx_count + qos_txqs,
channel->rx_count);
if (err)
return err;
pfvf->hw.rx_queues = channel->rx_count;
pfvf->hw.tx_queues = channel->tx_count;
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
if (pfvf->xdp_prog)
pfvf->hw.xdp_queues = channel->rx_count;
if (if_up)
err = dev->netdev_ops->ndo_open(dev);
@ -323,16 +318,25 @@ static void otx2_get_pauseparam(struct net_device *netdev,
if (is_otx2_lbkvf(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
if (!req)
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return;
}
if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
rsp = (struct cgx_pause_frm_cfg *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
mutex_unlock(&pfvf->mbox.lock);
return;
}
pause->rx_pause = rsp->rx_pause;
pause->tx_pause = rsp->tx_pause;
}
mutex_unlock(&pfvf->mbox.lock);
}
static int otx2_set_pauseparam(struct net_device *netdev,
@ -372,6 +376,7 @@ static void otx2_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
kernel_ring->cqe_size = pfvf->hw.xqe_size;
}
static int otx2_set_ringparam(struct net_device *netdev,
@ -382,6 +387,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
u32 rx_buf_len = kernel_ring->rx_buf_len;
u32 old_rx_buf_len = pfvf->hw.rbuf_len;
u32 xqe_size = kernel_ring->cqe_size;
bool if_up = netif_running(netdev);
struct otx2_qset *qs = &pfvf->qset;
u32 rx_count, tx_count;
@ -398,6 +404,12 @@ static int otx2_set_ringparam(struct net_device *netdev,
return -EINVAL;
}
if (xqe_size != 128 && xqe_size != 512) {
netdev_err(netdev,
"Completion event size must be 128 or 512");
return -EINVAL;
}
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
@ -416,7 +428,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
rx_buf_len == old_rx_buf_len)
rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
return 0;
if (if_up)
@ -427,6 +439,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->rqe_cnt = rx_count;
pfvf->hw.rbuf_len = rx_buf_len;
pfvf->hw.xqe_size = xqe_size;
if (if_up)
return netdev->netdev_ops->ndo_open(netdev);
@ -744,6 +757,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
@ -1048,6 +1062,11 @@ static int otx2_set_fecparam(struct net_device *netdev,
rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
0, &req->hdr);
if (IS_ERR(rsp)) {
err = PTR_ERR(rsp);
goto end;
}
if (rsp->fec >= 0)
pfvf->linfo.fec = rsp->fec;
else
@ -1287,7 +1306,8 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@ -1340,20 +1360,15 @@ static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
if (sset != ETH_SS_STATS)
return;
for (stats = 0; stats < otx2_n_dev_stats; stats++) {
memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < otx2_n_dev_stats; stats++)
ethtool_puts(&data, otx2_dev_stats[stats].name);
for (stats = 0; stats < otx2_n_drv_stats; stats++) {
memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (stats = 0; stats < otx2_n_drv_stats; stats++)
ethtool_puts(&data, otx2_drv_stats[stats].name);
otx2_get_qset_strings(vf, &data, 0);
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
ethtool_puts(&data, "reset_count");
}
static void otx2vf_get_ethtool_stats(struct net_device *netdev,
@ -1384,7 +1399,7 @@ static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
(vf->hw.rx_queues + vf->hw.tx_queues);
(vf->hw.rx_queues + otx2_get_total_tx_queues(vf));
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
@ -1408,7 +1423,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN,
.supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,

View File

@ -66,11 +66,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
static int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
@ -121,6 +116,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp))
goto exit;
for (ent = 0; ent < rsp->count; ent++)
flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
@ -199,6 +196,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
mutex_unlock(&pfvf->mbox.lock);
return PTR_ERR(rsp);
}
if (rsp->count != req->count) {
netdev_info(pfvf->netdev,
@ -234,6 +235,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
(&pfvf->mbox.mbox, 0, &freq->hdr);
if (IS_ERR(frsp)) {
mutex_unlock(&pfvf->mbox.lock);
return PTR_ERR(frsp);
}
if (frsp->enable) {
pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
@ -252,6 +257,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
refcount_set(&flow_cfg->mark_flows, 1);
return 0;
}
@ -276,6 +282,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
flow_cfg->max_flows = 0;
return 0;
@ -298,6 +305,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return -ENOMEM;
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
@ -872,6 +880,14 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return -EINVAL;
vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
/* Drop rule with vlan_etype == 802.1Q
* and vlan_id == 0 is not supported
*/
if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
fsp->ring_cookie == RX_CLS_FLOW_DISC)
return -EINVAL;
/* Only ETH_P_8021Q and ETH_P_802AD types supported */
if (vlan_etype != ETH_P_8021Q &&
vlan_etype != ETH_P_8021AD)
@ -1078,6 +1094,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
struct ethhdr *eth_hdr;
bool new = false;
int err = 0;
u64 vf_num;
u32 ring;
if (!flow_cfg->max_flows) {
@ -1090,7 +1107,21 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
/* Number of queues on a VF can be greater or less than
* the PF's queue. Hence no need to check for the
* queue count. Hence no need to check queue count if PF
* is installing for its VF. Below is the expected vf_num value
* based on the ethtool commands.
*
* e.g.
* 1. ethtool -U <netdev> ... action -1 ==> vf_num:255
* 2. ethtool -U <netdev> ... action <queue_num> ==> vf_num:0
* 3. ethtool -U <netdev> ... vf <vf_idx> queue <queue_num> ==>
* vf_num:vf_idx+1
*/
vf_num = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
if (!is_otx2_vf(pfvf->pcifunc) && !vf_num &&
ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
if (fsp->location >= otx2_get_maxflows(flow_cfg))
@ -1172,6 +1203,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
flow_cfg->nr_flows++;
}
if (flow->is_vf)
netdev_info(pfvf->netdev,
"Make sure that VF's queue number is within its queue limit\n");
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,65 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
static bool is_tstmp_atomic_update_supported(struct otx2_ptp *ptp)
{
struct ptp_get_cap_rsp *rsp;
struct msg_req *req;
int err;
if (!ptp->nic)
return false;
mutex_lock(&ptp->nic->mbox.lock);
req = otx2_mbox_alloc_msg_ptp_get_cap(&ptp->nic->mbox);
if (!req) {
mutex_unlock(&ptp->nic->mbox.lock);
return false;
}
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err) {
mutex_unlock(&ptp->nic->mbox.lock);
return false;
}
rsp = (struct ptp_get_cap_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
&req->hdr);
mutex_unlock(&ptp->nic->mbox.lock);
if (IS_ERR(rsp))
return false;
if (rsp->cap & PTP_CAP_HW_ATOMIC_UPDATE)
return true;
return false;
}
static int otx2_ptp_hw_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
struct ptp_req *req;
int rc;
if (!ptp->nic)
return -ENODEV;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->op = PTP_OP_ADJTIME;
req->delta = delta;
rc = otx2_sync_mbox_msg(&ptp->nic->mbox);
mutex_unlock(&pfvf->mbox.lock);
return rc;
}
static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
{
struct ptp_req *req;
@ -37,6 +96,49 @@ static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
return rsp->clk;
}
static int otx2_ptp_hw_gettime(struct ptp_clock_info *ptp_info,
struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
u64 tstamp;
tstamp = otx2_ptp_get_clock(ptp);
*ts = ns_to_timespec64(tstamp);
return 0;
}
static int otx2_ptp_hw_settime(struct ptp_clock_info *ptp_info,
const struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
struct ptp_req *req;
u64 nsec;
int rc;
if (!ptp->nic)
return -ENODEV;
nsec = timespec64_to_ns(ts);
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->op = PTP_OP_SET_CLOCK;
req->clk = nsec;
rc = otx2_sync_mbox_msg(&ptp->nic->mbox);
mutex_unlock(&pfvf->mbox.lock);
return rc;
}
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@ -73,7 +175,7 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
static int ptp_extts_on(struct otx2_ptp *ptp, int on)
static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
{
struct ptp_req *req;
@ -84,8 +186,9 @@ static int ptp_extts_on(struct otx2_ptp *ptp, int on)
if (!req)
return -ENOMEM;
req->op = PTP_OP_EXTTS_ON;
req->extts_on = on;
req->op = PTP_OP_PPS_ON;
req->pps_on = on;
req->period = period;
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
@ -124,16 +227,7 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
{
struct otx2_nic *pfvf = ptp->nic;
mutex_lock(&pfvf->mbox.lock);
*tstamp = timecounter_read(&ptp->time_counter);
mutex_unlock(&pfvf->mbox.lock);
}
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
static int otx2_ptp_tc_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
@ -146,32 +240,33 @@ static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
return 0;
}
static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
struct timespec64 *ts)
static int otx2_ptp_tc_gettime(struct ptp_clock_info *ptp_info,
struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
u64 tstamp;
otx2_get_ptpclock(ptp, &tstamp);
mutex_lock(&ptp->nic->mbox.lock);
tstamp = timecounter_read(&ptp->time_counter);
mutex_unlock(&ptp->nic->mbox.lock);
*ts = ns_to_timespec64(tstamp);
return 0;
}
static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
const struct timespec64 *ts)
static int otx2_ptp_tc_settime(struct ptp_clock_info *ptp_info,
const struct timespec64 *ts)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
u64 nsec;
nsec = timespec64_to_ns(ts);
mutex_lock(&pfvf->mbox.lock);
mutex_lock(&ptp->nic->mbox.lock);
timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
mutex_unlock(&pfvf->mbox.lock);
mutex_unlock(&ptp->nic->mbox.lock);
return 0;
}
@ -182,14 +277,20 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
break;
case PTP_PF_PEROUT:
break;
case PTP_PF_PHYSYNC:
return -1;
}
return 0;
}
static u64 otx2_ptp_hw_tstamp2time(const struct timecounter *time_counter, u64 tstamp)
{
/* On HW which supports atomic updates, timecounter is not initialized */
return tstamp;
}
static void otx2_ptp_extts_check(struct work_struct *work)
{
struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
@ -204,7 +305,7 @@ static void otx2_ptp_extts_check(struct work_struct *work)
if (tstmp != ptp->last_extts) {
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
event.timestamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
@ -229,7 +330,7 @@ static void otx2_sync_tstamp(struct work_struct *work)
tstamp = otx2_ptp_get_clock(ptp);
mutex_unlock(&pfvf->mbox.lock);
ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
ptp->tstamp = ptp->ptp_tstamp2nsec(&ptp->time_counter, tstamp);
ptp->base_ns = tstamp % NSEC_PER_SEC;
schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
@ -240,6 +341,7 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
u64 period = 0;
int pin;
if (!ptp->nic)
@ -251,12 +353,24 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
if (on) {
ptp_extts_on(ptp, on);
if (on)
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
} else {
ptp_extts_on(ptp, on);
else
cancel_delayed_work_sync(&ptp->extts_work);
return 0;
case PTP_CLK_REQ_PEROUT:
if (rq->perout.flags)
return -EOPNOTSUPP;
if (rq->perout.index >= ptp_info->n_pins)
return -EINVAL;
if (on) {
period = rq->perout.period.sec * NSEC_PER_SEC +
rq->perout.period.nsec;
ptp_pps_on(ptp, on, period);
} else {
ptp_pps_on(ptp, on, period);
}
return 0;
default:
@ -302,15 +416,6 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->nic = pfvf;
cc = &ptp_ptr->cycle_counter;
cc->read = ptp_cc_read;
cc->mask = CYCLECOUNTER_MASK(64);
cc->mult = 1;
cc->shift = 0;
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
ptp_ptr->extts_config.index = 0;
ptp_ptr->extts_config.func = PTP_PF_NONE;
@ -320,17 +425,38 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
.n_ext_ts = 1,
.n_per_out = 1,
.n_pins = 1,
.pps = 0,
.pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
.verify = otx2_ptp_verify_pin,
};
/* Check whether hardware supports atomic updates to timestamp */
if (is_tstmp_atomic_update_supported(ptp_ptr)) {
ptp_ptr->ptp_info.adjtime = otx2_ptp_hw_adjtime;
ptp_ptr->ptp_info.gettime64 = otx2_ptp_hw_gettime;
ptp_ptr->ptp_info.settime64 = otx2_ptp_hw_settime;
ptp_ptr->ptp_tstamp2nsec = otx2_ptp_hw_tstamp2time;
} else {
ptp_ptr->ptp_info.adjtime = otx2_ptp_tc_adjtime;
ptp_ptr->ptp_info.gettime64 = otx2_ptp_tc_gettime;
ptp_ptr->ptp_info.settime64 = otx2_ptp_tc_settime;
cc = &ptp_ptr->cycle_counter;
cc->read = ptp_cc_read;
cc->mask = CYCLECOUNTER_MASK(64);
cc->mult = 1;
cc->shift = 0;
ptp_ptr->ptp_tstamp2nsec = timecounter_cyc2time;
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
}
INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
@ -387,7 +513,7 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
if (!pfvf->ptp)
return -ENODEV;
*tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
*tsns = pfvf->ptp->ptp_tstamp2nsec(&pfvf->ptp->time_counter, tstamp);
return 0;
}

View File

@ -139,20 +139,34 @@
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
/* NIX AF transmit scheduler registers */
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)

View File

@ -318,23 +318,23 @@ enum nix_snd_status_e {
NIX_SND_STATUS_EXT_ERR = 0x6,
NIX_SND_STATUS_JUMP_FAULT = 0x7,
NIX_SND_STATUS_JUMP_POISON = 0x8,
NIX_SND_STATUS_CRC_ERR = 0x9,
NIX_SND_STATUS_IMM_ERR = 0x10,
NIX_SND_STATUS_SG_ERR = 0x11,
NIX_SND_STATUS_MEM_ERR = 0x12,
NIX_SND_STATUS_INVALID_SUBDC = 0x13,
NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
NIX_SND_STATUS_DATA_FAULT = 0x15,
NIX_SND_STATUS_DATA_POISON = 0x16,
NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
NIX_SND_STATUS_LOCK_VIOL = 0x18,
NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
NIX_SND_STATUS_CRC_ERR = 0x10,
NIX_SND_STATUS_IMM_ERR = 0x11,
NIX_SND_STATUS_SG_ERR = 0x12,
NIX_SND_STATUS_MEM_ERR = 0x13,
NIX_SND_STATUS_INVALID_SUBDC = 0x14,
NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
NIX_SND_STATUS_DATA_FAULT = 0x16,
NIX_SND_STATUS_DATA_POISON = 0x17,
NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
NIX_SND_STATUS_LOCK_VIOL = 0x21,
NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
NIX_SND_STATUS_MAX,
};

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@ -26,10 +27,24 @@
*/
#define PTP_SYNC_SEC_OFFSET 34
DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq);
struct otx2_cq_queue *cq,
bool *need_xdp_flush);
static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
struct sk_buff *skb)
{
if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
(xfrm_offload(skb)))
sq->sqe_base = sq->sqe_ring->base + sq->sqe_size +
(sq->head * (sq->sqe_size * 2));
else
sq->sqe_base = sq->sqe->base;
}
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
@ -79,38 +94,6 @@ static unsigned int frag_num(unsigned int i)
#endif
}
static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len)
{
const skb_frag_t *frag;
struct page *page;
int offset;
/* First segment is always skb->data */
if (!seg) {
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
*len = skb_headlen(skb);
} else {
frag = &skb_shinfo(skb)->frags[seg - 1];
page = skb_frag_page(frag);
offset = skb_frag_off(frag);
*len = skb_frag_size(frag);
}
return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
}
static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
{
int seg;
for (seg = 0; seg < sg->num_segs; seg++) {
otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
sg->size[seg], DMA_TO_DEVICE);
}
sg->num_segs = 0;
}
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
struct nix_cqe_tx_s *cqe)
@ -340,7 +323,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq,
struct nix_cqe_rx_s *cqe)
struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
{
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
@ -356,7 +339,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
if (pfvf->xdp_prog)
if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@ -378,9 +361,14 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
otx2_set_rxhash(pfvf, cqe, skb);
skb_record_rx_queue(skb, cq->cq_idx);
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) {
skb_record_rx_queue(skb, cq->cq_idx);
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
skb->mark = parse->match_id;
napi_gro_frags(napi);
}
@ -389,6 +377,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq, int budget)
{
bool need_xdp_flush = false;
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
@ -410,13 +399,15 @@ process_cqe:
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
cq->pend_cqe--;
}
if (need_xdp_flush)
xdp_do_flush();
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
@ -444,6 +435,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int tx_pkts = 0, tx_bytes = 0, qidx;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
struct net_device *ndev;
int processed_cqe = 0;
if (cq->pend_cqe >= budget)
@ -464,12 +456,13 @@ process_cqe:
break;
}
if (cq->cq_type == CQ_XDP) {
qidx = cq->cq_idx - pfvf->hw.rx_queues;
if (cq->cq_type == CQ_XDP)
otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
} else {
otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
&tx_pkts, &tx_bytes);
}
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
@ -483,15 +476,28 @@ process_cqe:
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
#if IS_ENABLED(CONFIG_RVU_ESWITCH)
if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
ndev = pfvf->reps[qidx]->netdev;
else
#endif
ndev = pfvf->netdev;
if (likely(tx_pkts)) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
qidx = cq->cq_idx - pfvf->hw.rx_queues;
if (qidx >= pfvf->hw.tx_queues)
qidx -= pfvf->hw.xdp_queues;
if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)
qidx = 0;
txq = netdev_get_tx_queue(ndev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
/* Check if queue was stopped earlier due to ring full */
smp_mb();
if (netif_tx_queue_stopped(txq) &&
netif_carrier_ok(pfvf->netdev))
netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
return 0;
@ -499,13 +505,20 @@ process_cqe:
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{
struct dim_sample dim_sample;
struct dim_sample dim_sample = { 0 };
u64 rx_frames, rx_bytes;
u64 tx_frames, tx_bytes;
rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
OTX2_GET_RX_STATS(RX_UCAST);
rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
tx_frames = OTX2_GET_TX_STATS(TX_UCAST);
dim_update_sample(pfvf->napi_events,
rx_frames + tx_frames,
rx_bytes + tx_bytes,
&dim_sample);
net_dim(&cq_poll->dim, dim_sample);
}
@ -546,16 +559,9 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return workdone;
/* Check for adaptive interrupt coalesce */
if (workdone != 0 &&
((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
/* Adjust irq coalese using net_dim */
/* Adjust irq coalese using net_dim */
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
/* Update irq coalescing */
for (i = 0; i < pfvf->hw.cint_cnt; i++)
otx2_config_irq_coalescing(pfvf, i);
}
/* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@ -563,6 +569,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
return workdone;
}
EXPORT_SYMBOL(otx2_napi_handler);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx)
@ -581,7 +588,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
sq->head &= (sq->sqe_cnt - 1);
}
#define MAX_SEGS_PER_SG 3
/* Add SQE scatter/gather subdescriptor structure */
static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct sk_buff *skb, int num_segs, int *offset)
@ -656,7 +662,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
__be16 l3_proto = vlan_get_protocol(skb);
struct udphdr *udph = udp_hdr(skb);
u16 iplen;
__be16 iplen;
ext->lso_sb = skb_transport_offset(skb) +
sizeof(struct udphdr);
@ -734,7 +740,8 @@ static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->aura = sq->aura_id;
/* Post a CQE Tx after pkt transmission */
sqe_hdr->pnc = 1;
sqe_hdr->sq = qidx;
sqe_hdr->sq = (qidx >= pfvf->hw.tx_queues) ?
qidx + pfvf->hw.xdp_queues : qidx;
}
sqe_hdr->total = skb->len;
/* Set SQE identifier which will be used later for freeing SKB */
@ -1109,13 +1116,14 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
}
}
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
struct otx2_nic *pfvf = netdev_priv(netdev);
int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_nic *pfvf = dev;
bool ret;
/* Check if there is enough room between producer
* and consumer index.
@ -1132,6 +1140,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
/* If SKB doesn't fit in a single SQE, linearize it.
* TODO: Consider adding JUMP descriptor instead.
*/
if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
if (__skb_linearize(skb)) {
dev_kfree_skb_any(skb);
@ -1142,12 +1151,18 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
if (skb_vlan_tag_present(skb))
if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
if (!skb)
return true;
}
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
/* Set sqe base address */
otx2_sq_set_sqe_base(sq, skb);
/* Set SQE's SEND_HDR.
* Do not clear the first 64bit as it contains constant info.
*/
@ -1160,7 +1175,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
otx2_sqe_add_ext(pfvf, sq, skb, &offset);
/* Add SG subdesc with data frags */
if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
(xfrm_offload(skb)))
ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset);
else
ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset);
if (!ret) {
otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
return false;
}
@ -1169,11 +1190,15 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) &&
(xfrm_offload(skb)))
return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs,
offset);
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
return true;
}
EXPORT_SYMBOL(otx2_sq_append_skb);
@ -1214,13 +1239,17 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
{
int tx_pkts = 0, tx_bytes = 0;
struct sk_buff *skb = NULL;
struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
struct netdev_queue *txq;
int processed_cqe = 0;
struct sg_list *sg;
int qidx;
sq = &pfvf->qset.sq[cq->cint_idx];
qidx = cq->cq_idx - pfvf->hw.rx_queues;
sq = &pfvf->qset.sq[qidx];
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
@ -1235,12 +1264,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
tx_bytes += skb->len;
tx_pkts++;
otx2_dma_unmap_skb_frags(pfvf, sg);
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
}
if (likely(tx_pkts)) {
if (qidx >= pfvf->hw.tx_queues)
qidx -= pfvf->hw.xdp_queues;
txq = netdev_get_tx_queue(pfvf->netdev, qidx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
@ -1267,6 +1304,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
return err;
}
void otx2_free_pending_sqe(struct otx2_nic *pfvf)
{
int tx_pkts = 0, tx_bytes = 0;
struct sk_buff *skb = NULL;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
struct sg_list *sg;
int sq_idx, sqe;
for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
sq = &pfvf->qset.sq[sq_idx];
for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
sg = &sq->sg[sqe];
skb = (struct sk_buff *)sg->skb;
if (skb) {
tx_bytes += skb->len;
tx_pkts++;
otx2_dma_unmap_skb_frags(pfvf, sg);
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
}
if (!tx_pkts)
continue;
txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
tx_pkts = 0;
tx_bytes = 0;
}
}
static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
int len, int *offset)
{
@ -1323,9 +1392,10 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq)
struct otx2_cq_queue *cq,
bool *need_xdp_flush)
{
unsigned char *hard_start, *data;
unsigned char *hard_start;
int qidx = cq->cq_idx;
struct xdp_buff xdp;
struct page *page;
@ -1339,9 +1409,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
data = (unsigned char *)phys_to_virt(pa);
hard_start = page_address(page);
xdp_prepare_buff(&xdp, hard_start, data - hard_start,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
cqe->sg.seg_size, false);
act = bpf_prog_run_xdp(prog, &xdp);
@ -1360,8 +1429,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
DMA_FROM_DEVICE);
if (!err)
if (!err) {
*need_xdp_flush = true;
return true;
}
put_page(page);
break;
default:

View File

@ -60,6 +60,9 @@
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
/* Packet mark mask */
#define OTX2_RX_MATCH_ID_MASK 0x0000ffff
struct queue_stats {
u64 bytes;
u64 pkts;
@ -96,13 +99,17 @@ struct otx2_snd_queue {
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
/* SQE ring and CPT response queue for Inline IPSEC */
struct qmem *sqe_ring;
struct qmem *cpt_resp;
} ____cacheline_aligned_in_smp;
enum cq_type {
CQ_RX,
CQ_TX,
CQ_XDP,
CQS_PER_CINT = 3, /* RQ + SQ + XDP */
CQ_QOS,
CQS_PER_CINT = 4, /* RQ + SQ + XDP + QOS_SQ */
};
struct otx2_cq_poll {
@ -160,7 +167,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
}
int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);

View File

@ -14,6 +14,7 @@
#include "otx2_reg.h"
#include "otx2_ptp.h"
#include "cn10k.h"
#include "cn10k_ipsec.h"
#define DRV_NAME "rvu_nicvf"
#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
@ -21,6 +22,7 @@
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) },
{ }
};
@ -70,10 +72,6 @@ static void otx2vf_process_vfaf_mbox_msg(struct otx2_nic *vf,
case MBOX_MSG_NIX_LF_ALLOC:
mbox_handler_nix_lf_alloc(vf, (struct nix_lf_alloc_rsp *)msg);
break;
case MBOX_MSG_NIX_TXSCH_ALLOC:
mbox_handler_nix_txsch_alloc(vf,
(struct nix_txsch_alloc_rsp *)msg);
break;
case MBOX_MSG_NIX_BP_ENABLE:
mbox_handler_nix_bp_enable(vf, (struct nix_bp_cfg_rsp *)msg);
break;
@ -93,16 +91,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (af_mbox->num_msgs == 0)
num_msgs = rsp_hdr->num_msgs;
if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < af_mbox->num_msgs; id++) {
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@ -155,6 +157,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@ -162,12 +165,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (vf_mbox->up_num_msgs == 0)
num_msgs = rsp_hdr->num_msgs;
if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@ -182,40 +187,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
/* Read latest mbox data */
smp_rmb();
/* Check for PF => VF response messages */
mbox = &vf->mbox.mbox;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
if (mbox_data & MBOX_DOWN_MSG) {
mbox_data &= ~MBOX_DOWN_MSG;
otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
/* Check for PF => VF response messages */
mbox = &vf->mbox.mbox;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs)
queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
BIT_ULL(0));
}
/* Check for PF => VF notification messages */
mbox = &vf->mbox.mbox_up;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
if (mbox_data & MBOX_UP_MSG) {
mbox_data &= ~MBOX_UP_MSG;
otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
/* Check for PF => VF notification messages */
mbox = &vf->mbox.mbox_up;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs)
queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
BIT_ULL(0));
}
return IRQ_HANDLED;
@ -360,7 +373,7 @@ static int otx2vf_open(struct net_device *netdev)
/* LBKs do not receive link events so tell everyone we are up here */
vf = netdev_priv(netdev);
if (is_otx2_lbkvf(vf->pdev)) {
if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) {
pr_info("%s NIC Link is UP\n", netdev->name);
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
@ -384,7 +397,7 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) {
netif_tx_stop_queue(txq);
/* Check again, incase SQBs got freed up */
@ -478,6 +491,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
.ndo_select_queue = otx2_select_queue,
.ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
@ -488,7 +502,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *vf)
static int otx2_vf_wq_init(struct otx2_nic *vf)
{
vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
if (!vf->otx2_wq)
@ -523,10 +537,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int num_vec = pci_msix_vec_count(pdev);
struct device *dev = &pdev->dev;
int err, qcount, qos_txqs;
struct net_device *netdev;
struct otx2_nic *vf;
struct otx2_hw *hw;
int err, qcount;
err = pcim_enable_device(pdev);
if (err) {
@ -549,7 +563,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
qcount = num_online_cpus();
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount, qcount);
qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
netdev = alloc_etherdev_mqs(sizeof(*vf), qcount + qos_txqs, qcount);
if (!netdev) {
err = -ENOMEM;
goto err_release_regions;
@ -569,8 +584,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->tot_tx_queues = qcount;
hw->non_qos_queues = qcount;
hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
/* Use CQE of 128 byte descriptor size by default */
hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@ -656,6 +673,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
hw->max_mtu = netdev->max_mtu;
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
@ -667,13 +685,26 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n);
}
if (is_otx2_sdp_rep(vf->pdev)) {
int n;
n = vf->pcifunc & RVU_PFVF_FUNC_MASK;
n -= 1;
snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d",
pdev->bus->number, n);
}
err = cn10k_ipsec_init(netdev);
if (err)
goto err_ptp_destroy;
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_ptp_destroy;
goto err_ipsec_clean;
}
err = otx2_wq_init(vf);
err = otx2_vf_wq_init(vf);
if (err)
goto err_unreg_netdev;
@ -696,6 +727,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
@ -703,6 +735,8 @@ err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
err_ipsec_clean:
cn10k_ipsec_clean(vf);
err_ptp_destroy:
otx2_ptp_destroy(vf);
err_detach_rsrc:
@ -755,11 +789,13 @@ static void otx2vf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
cn10k_ipsec_clean(vf);
otx2_ptp_destroy(vf);
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
otx2vf_disable_mbox_intr(vf);
otx2_shutdown_qos(vf);
otx2_detach_resources(&vf->mbox);
otx2vf_disable_mbox_intr(vf);
free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell RVU Ethernet driver
*
* Copyright (C) 2023 Marvell.
*
*/
#ifndef OTX2_QOS_H
#define OTX2_QOS_H
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#define OTX2_QOS_MAX_LVL 4
#define OTX2_QOS_MAX_PRIO 7
#define OTX2_QOS_MAX_LEAF_NODES 16
enum qos_smq_operations {
QOS_CFG_SQ,
QOS_SMQ_FLUSH,
};
u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, u64 maxrate, u32 burst);
int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb);
int otx2_qos_get_qid(struct otx2_nic *pfvf);
void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx);
int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx);
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx);
struct otx2_qos_cfg {
u16 schq[NIX_TXSCH_LVL_CNT];
u16 schq_contig[NIX_TXSCH_LVL_CNT];
int static_node_pos[NIX_TXSCH_LVL_CNT];
int dwrr_node_pos[NIX_TXSCH_LVL_CNT];
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
bool schq_index_used[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
};
struct otx2_qos {
DECLARE_HASHTABLE(qos_hlist, order_base_2(OTX2_QOS_MAX_LEAF_NODES));
struct mutex qos_lock; /* child list lock */
u16 qid_to_sqmap[OTX2_QOS_MAX_LEAF_NODES];
struct list_head qos_tree;
DECLARE_BITMAP(qos_sq_bmap, OTX2_QOS_MAX_LEAF_NODES);
u16 maj_id;
u16 defcls;
u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
};
struct otx2_qos_node {
struct list_head list; /* list management */
struct list_head child_list;
struct list_head child_schq_list;
struct hlist_node hlist;
DECLARE_BITMAP(prio_bmap, OTX2_QOS_MAX_PRIO + 1);
struct otx2_qos_node *parent; /* parent qos node */
u64 rate; /* htb params */
u64 ceil;
u32 classid;
u32 prio;
u32 quantum;
/* hw txschq */
u16 schq;
u16 qid;
u16 prio_anchor;
u16 max_static_prio;
u16 child_dwrr_cnt;
u16 child_static_cnt;
u16 child_dwrr_prio;
u16 txschq_idx; /* txschq allocation index */
u8 level;
bool is_static;
};
#endif

View File

@ -0,0 +1,296 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell RVU Physical Function ethernet driver
*
* Copyright (C) 2023 Marvell.
*
*/
#include <linux/netdevice.h>
#include <net/tso.h>
#include "cn10k.h"
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
#define OTX2_QOS_MAX_LEAF_NODES 16
static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
{
struct otx2_pool *pool;
if (!pfvf->qset.pool)
return;
pool = &pfvf->qset.pool[pool_id];
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
pool->stack = NULL;
pool->fc_addr = NULL;
}
static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int pool_id, stack_pages, num_sqbs;
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
dma_addr_t bufptr;
int err, ptr;
u64 iova, pa;
/* Calculate number of SQBs needed.
*
* For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
* Last SQE is used for pointing to next SQB.
*/
num_sqbs = (hw->sqb_size / 128) - 1;
num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
/* Get no of stack pages needed */
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
if (err)
return err;
/* Initialize pool context */
err = otx2_pool_init(pfvf, pool_id, stack_pages,
num_sqbs, hw->sqb_size);
if (err)
goto aura_free;
/* Flush accumulated messages */
err = otx2_sync_mbox_msg(&pfvf->mbox);
if (err)
goto pool_free;
/* Allocate pointers and free them to aura/pool */
sq = &qset->sq[qidx];
sq->sqb_count = 0;
sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
if (!sq->sqb_ptrs) {
err = -ENOMEM;
goto pool_free;
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
return 0;
sqb_free:
while (ptr--) {
if (!sq->sqb_ptrs[ptr])
continue;
iova = sq->sqb_ptrs[ptr];
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
put_page(virt_to_page(phys_to_virt(pa)));
otx2_aura_allocptr(pfvf, pool_id);
}
sq->sqb_count = 0;
kfree(sq->sqb_ptrs);
pool_free:
qmem_free(pfvf->dev, pool->stack);
aura_free:
qmem_free(pfvf->dev, pool->fc_addr);
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
return err;
}
static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
u64 iova, pa;
int sqb;
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
return;
for (sqb = 0; sqb < sq->sqb_count; sqb++) {
if (!sq->sqb_ptrs[sqb])
continue;
iova = sq->sqb_ptrs[sqb];
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
put_page(virt_to_page(phys_to_virt(pa)));
}
sq->sqb_count = 0;
sq = &qset->sq[qidx];
qmem_free(pfvf->dev, sq->sqe);
qmem_free(pfvf->dev, sq->tso_hdrs);
kfree(sq->sg);
kfree(sq->sqb_ptrs);
qmem_free(pfvf->dev, sq->timestamps);
memset((void *)sq, 0, sizeof(*sq));
}
/* send queue id */
static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
{
int sqe_tail, sqe_head;
u64 incr, *ptr, val;
ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
incr = (u64)qidx << 32;
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F;
if (sqe_head != sqe_tail)
usleep_range(50, 60);
}
static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
{
struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
struct npa_aq_enq_req *aura_aq;
struct npa_aq_enq_req *pool_aq;
struct nix_aq_enq_req *sq_aq;
if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!cn10k_sq_aq)
return -ENOMEM;
cn10k_sq_aq->qidx = qidx;
cn10k_sq_aq->sq.ena = 0;
cn10k_sq_aq->sq_mask.ena = 1;
cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
} else {
sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
if (!sq_aq)
return -ENOMEM;
sq_aq->qidx = qidx;
sq_aq->sq.ena = 0;
sq_aq->sq_mask.ena = 1;
sq_aq->ctype = NIX_AQ_CTYPE_SQ;
sq_aq->op = NIX_AQ_INSTOP_WRITE;
}
aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
if (!aura_aq) {
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
return -ENOMEM;
}
aura_aq->aura_id = aura_id;
aura_aq->aura.ena = 0;
aura_aq->aura_mask.ena = 1;
aura_aq->ctype = NPA_AQ_CTYPE_AURA;
aura_aq->op = NPA_AQ_INSTOP_WRITE;
pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
if (!pool_aq) {
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
return -ENOMEM;
}
pool_aq->aura_id = aura_id;
pool_aq->pool.ena = 0;
pool_aq->pool_mask.ena = 1;
pool_aq->ctype = NPA_AQ_CTYPE_POOL;
pool_aq->op = NPA_AQ_INSTOP_WRITE;
return otx2_sync_mbox_msg(&pfvf->mbox);
}
int otx2_qos_get_qid(struct otx2_nic *pfvf)
{
int qidx;
qidx = find_first_zero_bit(pfvf->qos.qos_sq_bmap,
pfvf->hw.tc_tx_queues);
return qidx == pfvf->hw.tc_tx_queues ? -ENOSPC : qidx;
}
void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx)
{
clear_bit(qidx, pfvf->qos.qos_sq_bmap);
}
int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_hw *hw = &pfvf->hw;
int pool_id, sq_idx, err;
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return -EPERM;
sq_idx = hw->non_qos_queues + qidx;
mutex_lock(&pfvf->mbox.lock);
err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
if (err)
goto out;
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
err = otx2_sq_init(pfvf, sq_idx, pool_id);
if (err)
goto out;
out:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_cq_queue *cq;
int pool_id, sq_idx;
sq_idx = hw->non_qos_queues + qidx;
/* If the DOWN flag is set SQs are already freed */
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return;
sq = &pfvf->qset.sq[sq_idx];
if (!sq->sqb_ptrs)
return;
if (sq_idx < hw->non_qos_queues ||
sq_idx >= otx2_get_total_tx_queues(pfvf)) {
netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
return;
}
cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
otx2_qos_sqb_flush(pfvf, sq_idx);
otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
otx2_cleanup_tx_cqes(pfvf, cq);
mutex_lock(&pfvf->mbox.lock);
otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
mutex_unlock(&pfvf->mbox.lock);
otx2_qos_sq_free_sqbs(pfvf, sq_idx);
otx2_qos_aura_pool_free(pfvf, pool_id);
}

View File

@ -0,0 +1,867 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell RVU representor driver
*
* Copyright (C) 2024 Marvell.
*
*/
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
#include <linux/sort.h>
#include "otx2_common.h"
#include "cn10k.h"
#include "otx2_reg.h"
#include "rep.h"
#define DRV_NAME "rvu_rep"
#define DRV_STRING "Marvell RVU Representor Driver"
static const struct pci_device_id rvu_rep_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) },
{ }
};
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
struct rep_event *data);
static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
{
struct npc_mcam_alloc_entry_req *req;
struct npc_mcam_alloc_entry_rsp *rsp;
struct otx2_nic *priv = rep->mdev;
int ent, allocated = 0;
int count;
rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL);
if (!rep->flow_cfg)
return -ENOMEM;
count = OTX2_DEFAULT_FLOWCOUNT;
rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL);
if (!rep->flow_cfg->flow_ent)
return -ENOMEM;
while (allocated < count) {
req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox);
if (!req)
goto exit;
req->hdr.pcifunc = rep->pcifunc;
req->contig = false;
req->ref_entry = 0;
req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
if (otx2_sync_mbox_msg(&priv->mbox))
goto exit;
rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
(&priv->mbox.mbox, 0, &req->hdr);
for (ent = 0; ent < rsp->count; ent++)
rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
allocated += rsp->count;
if (rsp->count != req->count)
break;
}
exit:
/* Multiple MCAM entry alloc requests could result in non-sequential
* MCAM entries in the flow_ent[] array. Sort them in an ascending
* order, otherwise user installed ntuple filter index and MCAM entry
* index will not be in sync.
*/
if (allocated)
sort(&rep->flow_cfg->flow_ent[0], allocated,
sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
mutex_unlock(&priv->mbox.lock);
rep->flow_cfg->max_flows = allocated;
if (allocated) {
rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
}
INIT_LIST_HEAD(&rep->flow_cfg->flow_list);
INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc);
return 0;
}
static int rvu_rep_setup_tc_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct rep_dev *rep = cb_priv;
struct otx2_nic *priv = rep->mdev;
if (!(rep->flags & RVU_REP_VF_INITIALIZED))
return -EINVAL;
if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
rvu_rep_mcam_flow_init(rep);
priv->netdev = rep->netdev;
priv->flags = rep->flags;
priv->pcifunc = rep->pcifunc;
priv->flow_cfg = rep->flow_cfg;
switch (type) {
case TC_SETUP_CLSFLOWER:
return otx2_setup_tc_cls_flower(priv, type_data);
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(rvu_rep_block_cb_list);
static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct rvu_rep *rep = netdev_priv(netdev);
switch (type) {
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&rvu_rep_block_cb_list,
rvu_rep_setup_tc_cb,
rep, rep, true);
default:
return -EOPNOTSUPP;
}
}
static int
rvu_rep_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct rep_dev *rep = netdev_priv(dev);
struct otx2_nic *priv = rep->mdev;
struct otx2_rcv_queue *rq;
struct otx2_snd_queue *sq;
u16 qidx = rep->rep_id;
otx2_update_rq_stats(priv, qidx);
rq = &priv->qset.rq[qidx];
otx2_update_sq_stats(priv, qidx);
sq = &priv->qset.sq[qidx];
stats->tx_bytes = sq->stats.bytes;
stats->tx_packets = sq->stats.pkts;
stats->rx_bytes = rq->stats.bytes;
stats->rx_packets = rq->stats.pkts;
return 0;
}
static bool
rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id)
{
return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
}
static int
rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp)
{
if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
return -EINVAL;
}
static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack)
{
struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
ether_addr_copy(hw_addr, rep->mac);
*hw_addr_len = ETH_ALEN;
return 0;
}
static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack)
{
struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
struct otx2_nic *priv = rep->mdev;
struct rep_event evt = {0};
eth_hw_addr_set(rep->netdev, hw_addr);
ether_addr_copy(rep->mac, hw_addr);
ether_addr_copy(evt.evt_data.mac, hw_addr);
evt.pcifunc = rep->pcifunc;
rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt);
return 0;
}
static const struct devlink_port_ops rvu_rep_dl_port_ops = {
.port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get,
.port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set,
};
static void
rvu_rep_devlink_set_switch_id(struct otx2_nic *priv,
struct netdev_phys_item_id *ppid)
{
struct pci_dev *pdev = priv->pdev;
u64 id;
id = pci_get_dsn(pdev);
ppid->id_len = sizeof(id);
put_unaligned_be64(id, &ppid->id);
}
static void rvu_rep_devlink_port_unregister(struct rep_dev *rep)
{
devlink_port_unregister(&rep->dl_port);
}
static int rvu_rep_devlink_port_register(struct rep_dev *rep)
{
struct devlink_port_attrs attrs = {};
struct otx2_nic *priv = rep->mdev;
struct devlink *dl = priv->dl->dl;
int err;
if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = rvu_get_pf(rep->pcifunc);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc);
attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
}
rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id);
devlink_port_attrs_set(&rep->dl_port, &attrs);
err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id,
&rvu_rep_dl_port_ops);
if (err) {
dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n",
err);
return err;
}
return 0;
}
static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc)
{
int rep_id;
for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++)
if (priv->rep_pf_map[rep_id] == pcifunc)
return rep_id;
return -EINVAL;
}
static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
struct rep_event *data)
{
struct rep_event *req;
mutex_lock(&priv->mbox.lock);
req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox);
if (!req) {
mutex_unlock(&priv->mbox.lock);
return -ENOMEM;
}
req->event = event;
req->pcifunc = data->pcifunc;
memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data));
otx2_sync_mbox_msg(&priv->mbox);
mutex_unlock(&priv->mbox.lock);
return 0;
}
static void rvu_rep_state_evt_handler(struct otx2_nic *priv,
struct rep_event *info)
{
struct rep_dev *rep;
int rep_id;
rep_id = rvu_rep_get_repid(priv, info->pcifunc);
rep = priv->reps[rep_id];
if (info->evt_data.vf_state)
rep->flags |= RVU_REP_VF_INITIALIZED;
else
rep->flags &= ~RVU_REP_VF_INITIALIZED;
}
int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info)
{
if (info->event & RVU_EVENT_PFVF_STATE)
rvu_rep_state_evt_handler(pf, info);
return 0;
}
static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu)
{
struct rep_dev *rep = netdev_priv(dev);
struct otx2_nic *priv = rep->mdev;
struct rep_event evt = {0};
netdev_info(dev, "Changing MTU from %d to %d\n",
dev->mtu, new_mtu);
dev->mtu = new_mtu;
evt.evt_data.mtu = new_mtu;
evt.pcifunc = rep->pcifunc;
rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt);
return 0;
}
static void rvu_rep_get_stats(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct nix_stats_req *req;
struct nix_stats_rsp *rsp;
struct rep_stats *stats;
struct otx2_nic *priv;
struct rep_dev *rep;
int err;
rep = container_of(del_work, struct rep_dev, stats_wrk);
priv = rep->mdev;
mutex_lock(&priv->mbox.lock);
req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox);
if (!req) {
mutex_unlock(&priv->mbox.lock);
return;
}
req->pcifunc = rep->pcifunc;
err = otx2_sync_mbox_msg_busy_poll(&priv->mbox);
if (err)
goto exit;
rsp = (struct nix_stats_rsp *)
otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
if (IS_ERR(rsp)) {
err = PTR_ERR(rsp);
goto exit;
}
stats = &rep->stats;
stats->rx_bytes = rsp->rx.octs;
stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast +
rsp->rx.mcast;
stats->rx_drops = rsp->rx.drop;
stats->rx_mcast_frames = rsp->rx.mcast;
stats->tx_bytes = rsp->tx.octs;
stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
stats->tx_drops = rsp->tx.drop;
exit:
mutex_unlock(&priv->mbox.lock);
}
static void rvu_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct rep_dev *rep = netdev_priv(dev);
if (!(rep->flags & RVU_REP_VF_INITIALIZED))
return;
stats->rx_packets = rep->stats.rx_frames;
stats->rx_bytes = rep->stats.rx_bytes;
stats->rx_dropped = rep->stats.rx_drops;
stats->multicast = rep->stats.rx_mcast_frames;
stats->tx_packets = rep->stats.tx_frames;
stats->tx_bytes = rep->stats.tx_bytes;
stats->tx_dropped = rep->stats.tx_drops;
schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100));
}
static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
{
struct esw_cfg_req *req;
mutex_lock(&priv->mbox.lock);
req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
if (!req) {
mutex_unlock(&priv->mbox.lock);
return -ENOMEM;
}
req->ena = ena;
otx2_sync_mbox_msg(&priv->mbox);
mutex_unlock(&priv->mbox.lock);
return 0;
}
static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rep_dev *rep = netdev_priv(dev);
struct otx2_nic *pf = rep->mdev;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
sq = &pf->qset.sq[rep->rep_id];
txq = netdev_get_tx_queue(dev, 0);
if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
netif_tx_stop_queue(txq);
/* Check again, in case SQBs got freed up */
smp_mb();
if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
> sq->sqe_thresh)
netif_tx_wake_queue(txq);
return NETDEV_TX_BUSY;
}
return NETDEV_TX_OK;
}
static int rvu_rep_open(struct net_device *dev)
{
struct rep_dev *rep = netdev_priv(dev);
struct otx2_nic *priv = rep->mdev;
struct rep_event evt = {0};
if (!(rep->flags & RVU_REP_VF_INITIALIZED))
return 0;
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
evt.event = RVU_EVENT_PORT_STATE;
evt.evt_data.port_state = 1;
evt.pcifunc = rep->pcifunc;
rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
return 0;
}
static int rvu_rep_stop(struct net_device *dev)
{
struct rep_dev *rep = netdev_priv(dev);
struct otx2_nic *priv = rep->mdev;
struct rep_event evt = {0};
if (!(rep->flags & RVU_REP_VF_INITIALIZED))
return 0;
netif_carrier_off(dev);
netif_tx_disable(dev);
evt.event = RVU_EVENT_PORT_STATE;
evt.pcifunc = rep->pcifunc;
rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
return 0;
}
static const struct net_device_ops rvu_rep_netdev_ops = {
.ndo_open = rvu_rep_open,
.ndo_stop = rvu_rep_stop,
.ndo_start_xmit = rvu_rep_xmit,
.ndo_get_stats64 = rvu_rep_get_stats64,
.ndo_change_mtu = rvu_rep_change_mtu,
.ndo_has_offload_stats = rvu_rep_has_offload_stats,
.ndo_get_offload_stats = rvu_rep_get_offload_stats,
.ndo_setup_tc = rvu_rep_setup_tc,
};
static int rvu_rep_napi_init(struct otx2_nic *priv,
struct netlink_ext_ack *extack)
{
struct otx2_qset *qset = &priv->qset;
struct otx2_cq_poll *cq_poll = NULL;
struct otx2_hw *hw = &priv->hw;
int err = 0, qidx, vec;
char *irq_name;
qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
if (!qset->napi)
return -ENOMEM;
/* Register NAPI handler */
for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
cq_poll = &qset->napi[qidx];
cq_poll->cint_idx = qidx;
cq_poll->cq_ids[CQ_RX] =
(qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ?
qidx + hw->rx_queues :
CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ;
cq_poll->dev = (void *)priv;
netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi,
otx2_napi_handler);
napi_enable(&cq_poll->napi);
}
/* Register CQ IRQ handlers */
vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
irq_name = &hw->irq_name[vec * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx);
err = request_irq(pci_irq_vector(priv->pdev, vec),
otx2_cq_intr_handler, 0, irq_name,
&qset->napi[qidx]);
if (err) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"RVU REP IRQ registration failed for CQ%d",
qidx);
goto err_free_cints;
}
vec++;
/* Enable CQ IRQ */
otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
}
priv->flags &= ~OTX2_FLAG_INTF_DOWN;
return 0;
err_free_cints:
otx2_free_cints(priv, qidx);
otx2_disable_napi(priv);
return err;
}
static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv)
{
struct otx2_qset *qset = &priv->qset;
struct otx2_cq_poll *cq_poll = NULL;
int qidx, vec;
/* Cleanup CQ NAPI and IRQ */
vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) {
/* Disable interrupt */
otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
synchronize_irq(pci_irq_vector(priv->pdev, vec));
cq_poll = &qset->napi[qidx];
napi_synchronize(&cq_poll->napi);
vec++;
}
otx2_free_cints(priv, priv->hw.cint_cnt);
otx2_disable_napi(priv);
}
static void rvu_rep_rsrc_free(struct otx2_nic *priv)
{
struct otx2_qset *qset = &priv->qset;
struct delayed_work *work;
int wrk;
for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) {
work = &priv->refill_wrk[wrk].pool_refill_work;
cancel_delayed_work_sync(work);
}
devm_kfree(priv->dev, priv->refill_wrk);
otx2_free_hw_resources(priv);
otx2_free_queue_mem(qset);
}
static int rvu_rep_rsrc_init(struct otx2_nic *priv)
{
struct otx2_qset *qset = &priv->qset;
int err;
err = otx2_alloc_queue_mem(priv);
if (err)
return err;
priv->hw.max_mtu = otx2_get_max_mtu(priv);
priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN;
priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
err = otx2_init_hw_resources(priv);
if (err)
goto err_free_rsrc;
/* Set maximum frame size allowed in HW */
err = otx2_hw_set_mtu(priv, priv->hw.max_mtu);
if (err) {
dev_err(priv->dev, "Failed to set HW MTU\n");
goto err_free_rsrc;
}
return 0;
err_free_rsrc:
otx2_free_hw_resources(priv);
otx2_free_queue_mem(qset);
return err;
}
void rvu_rep_destroy(struct otx2_nic *priv)
{
struct rep_dev *rep;
int rep_id;
rvu_eswitch_config(priv, false);
priv->flags |= OTX2_FLAG_INTF_DOWN;
rvu_rep_free_cq_rsrc(priv);
for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
rep = priv->reps[rep_id];
unregister_netdev(rep->netdev);
rvu_rep_devlink_port_unregister(rep);
free_netdev(rep->netdev);
kfree(rep->flow_cfg);
}
kfree(priv->reps);
rvu_rep_rsrc_free(priv);
}
int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
{
int rep_cnt = priv->rep_cnt;
struct net_device *ndev;
struct rep_dev *rep;
int rep_id, err;
u16 pcifunc;
err = rvu_rep_rsrc_init(priv);
if (err)
return -ENOMEM;
priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL);
if (!priv->reps)
return -ENOMEM;
for (rep_id = 0; rep_id < rep_cnt; rep_id++) {
ndev = alloc_etherdev(sizeof(*rep));
if (!ndev) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"PFVF representor:%d creation failed",
rep_id);
err = -ENOMEM;
goto exit;
}
rep = netdev_priv(ndev);
priv->reps[rep_id] = rep;
rep->mdev = priv;
rep->netdev = ndev;
rep->rep_id = rep_id;
ndev->min_mtu = OTX2_MIN_MTU;
ndev->max_mtu = priv->hw.max_mtu;
ndev->netdev_ops = &rvu_rep_netdev_ops;
pcifunc = priv->rep_pf_map[rep_id];
rep->pcifunc = pcifunc;
snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK));
ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
ndev->hw_features |= NETIF_F_HW_TC;
ndev->features |= ndev->hw_features;
eth_hw_addr_random(ndev);
err = rvu_rep_devlink_port_register(rep);
if (err) {
free_netdev(ndev);
goto exit;
}
SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
err = register_netdev(ndev);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"PFVF representor registration failed");
rvu_rep_devlink_port_unregister(rep);
free_netdev(ndev);
goto exit;
}
INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats);
}
err = rvu_rep_napi_init(priv, extack);
if (err)
goto exit;
rvu_eswitch_config(priv, true);
return 0;
exit:
while (--rep_id >= 0) {
rep = priv->reps[rep_id];
unregister_netdev(rep->netdev);
rvu_rep_devlink_port_unregister(rep);
free_netdev(rep->netdev);
}
kfree(priv->reps);
rvu_rep_rsrc_free(priv);
return err;
}
static int rvu_get_rep_cnt(struct otx2_nic *priv)
{
struct get_rep_cnt_rsp *rsp;
struct mbox_msghdr *msghdr;
struct msg_req *req;
int err, rep;
mutex_lock(&priv->mbox.lock);
req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox);
if (!req) {
mutex_unlock(&priv->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&priv->mbox);
if (err)
goto exit;
msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
if (IS_ERR(msghdr)) {
err = PTR_ERR(msghdr);
goto exit;
}
rsp = (struct get_rep_cnt_rsp *)msghdr;
priv->hw.tx_queues = rsp->rep_cnt;
priv->hw.rx_queues = rsp->rep_cnt;
priv->rep_cnt = rsp->rep_cnt;
for (rep = 0; rep < priv->rep_cnt; rep++)
priv->rep_pf_map[rep] = rsp->rep_pf_map[rep];
exit:
mutex_unlock(&priv->mbox.lock);
return err;
}
static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct otx2_nic *priv;
struct otx2_hw *hw;
int err;
err = pcim_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
return err;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
return err;
}
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (err) {
dev_err(dev, "DMA mask config failed, abort\n");
goto err_release_regions;
}
pci_set_master(pdev);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
goto err_release_regions;
}
pci_set_drvdata(pdev, priv);
priv->pdev = pdev;
priv->dev = dev;
priv->flags |= OTX2_FLAG_INTF_DOWN;
priv->flags |= OTX2_FLAG_REP_MODE_ENABLED;
hw = &priv->hw;
hw->pdev = pdev;
hw->max_queues = OTX2_MAX_CQ_CNT;
hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
hw->xqe_size = 128;
err = otx2_init_rsrc(pdev, priv);
if (err)
goto err_release_regions;
priv->iommu_domain = iommu_get_domain_for_dev(dev);
err = rvu_get_rep_cnt(priv);
if (err)
goto err_detach_rsrc;
err = otx2_register_dl(priv);
if (err)
goto err_detach_rsrc;
return 0;
err_detach_rsrc:
if (priv->hw.lmt_info)
free_percpu(priv->hw.lmt_info);
if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
qmem_free(priv->dev, priv->dync_lmt);
otx2_detach_resources(&priv->mbox);
otx2_disable_mbox_intr(priv);
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(pdev);
err_release_regions:
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
return err;
}
static void rvu_rep_remove(struct pci_dev *pdev)
{
struct otx2_nic *priv = pci_get_drvdata(pdev);
otx2_unregister_dl(priv);
if (!(priv->flags & OTX2_FLAG_INTF_DOWN))
rvu_rep_destroy(priv);
otx2_detach_resources(&priv->mbox);
if (priv->hw.lmt_info)
free_percpu(priv->hw.lmt_info);
if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
qmem_free(priv->dev, priv->dync_lmt);
otx2_disable_mbox_intr(priv);
otx2_pfaf_mbox_destroy(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
}
static struct pci_driver rvu_rep_driver = {
.name = DRV_NAME,
.id_table = rvu_rep_id_table,
.probe = rvu_rep_probe,
.remove = rvu_rep_remove,
.shutdown = rvu_rep_remove,
};
static int __init rvu_rep_init_module(void)
{
return pci_register_driver(&rvu_rep_driver);
}
static void __exit rvu_rep_cleanup_module(void)
{
pci_unregister_driver(&rvu_rep_driver);
}
module_init(rvu_rep_init_module);
module_exit(rvu_rep_cleanup_module);

View File

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell RVU REPRESENTOR driver
*
* Copyright (C) 2024 Marvell.
*
*/
#ifndef REP_H
#define REP_H
#include <linux/pci.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_common.h"
#define PCI_DEVID_RVU_REP 0xA0E0
#define RVU_MAX_REP OTX2_MAX_CQ_CNT
struct rep_stats {
u64 rx_bytes;
u64 rx_frames;
u64 rx_drops;
u64 rx_mcast_frames;
u64 tx_bytes;
u64 tx_frames;
u64 tx_drops;
};
struct rep_dev {
struct otx2_nic *mdev;
struct net_device *netdev;
struct rep_stats stats;
struct delayed_work stats_wrk;
struct devlink_port dl_port;
struct otx2_flow_config *flow_cfg;
#define RVU_REP_VF_INITIALIZED BIT_ULL(0)
u64 flags;
u16 rep_id;
u16 pcifunc;
u8 mac[ETH_ALEN];
};
static inline bool otx2_rep_dev(struct pci_dev *pdev)
{
return pdev->device == PCI_DEVID_RVU_REP;
}
int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack);
void rvu_rep_destroy(struct otx2_nic *priv);
int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info);
#endif /* REP_H */

View File

@ -484,8 +484,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
skge_stats[i].name, ETH_GSTRING_LEN);
ethtool_puts(&data, skge_stats[i].name);
break;
}
}

View File

@ -3800,8 +3800,7 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
sky2_stats[i].name, ETH_GSTRING_LEN);
ethtool_puts(&data, sky2_stats[i].name);
break;
}
}

View File

@ -0,0 +1 @@
CONFIG_RVU_ESWITCH=m