summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/dsa/rtl8366.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c391
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c71
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h23
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c403
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h12
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c134
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c6
-rw-r--r--drivers/net/ipa/ipa_qmi.c2
-rw-r--r--drivers/net/thunderbolt.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/vrf.c110
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c7
-rw-r--r--drivers/ptp/idt8a340_reg.h1
-rw-r--r--drivers/ptp/ptp_clockmatrix.c330
-rw-r--r--drivers/ptp/ptp_clockmatrix.h24
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/sched/Kconfig6
37 files changed, 1003 insertions, 651 deletions
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 0b61a90f1592..e08c5a9d53da 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -16,6 +16,8 @@ Required properties:
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
+ Use "microchip,sama7g5-emac" for Microchip SAMA7G5 ethernet interface.
+ Use "microchip,sama7g5-gem" for Microchip SAMA7G5 gigabit ethernet interface.
Or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
For "sifive,fu540-c000-gem", second range is required to specify the
diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
index ca3904bf90e0..477066e2b821 100644
--- a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
@@ -76,7 +76,7 @@ examples:
reg = <0x27>;
interrupt-parent = <&gpa1>;
- interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>;
wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index bac9a8a68e50..40bd67a5c8e9 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -333,7 +333,7 @@ static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
mask = chip->info->atu_move_port_mask;
shift = bitmap_weight(&mask, 16);
- entry.state = 0xf, /* Full EntryState means Move */
+ entry.state = 0xf; /* Full EntryState means Move */
entry.portvec = from_port & mask;
entry.portvec |= (to_port & mask) << shift;
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 307466b90489..83d481ef9273 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -384,7 +384,6 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
{
struct realtek_smi *smi = ds->priv;
u16 vid;
- int ret;
for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
if (!smi->ops->is_vlan_valid(smi, vid))
@@ -397,11 +396,7 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
- ret = rtl8366_enable_vlan4k(smi, true);
- if (ret)
- return ret;
-
- return 0;
+ return rtl8366_enable_vlan4k(smi, true);
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 5f8769aa469d..02087d443e73 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -71,7 +71,8 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- pr_err("DMA address has more bits that the device supports\n");
+ netdev_err(ena_dev->net_device,
+ "DMA address has more bits that the device supports\n");
return -EINVAL;
}
@@ -83,6 +84,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
@@ -90,7 +92,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
&sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -105,6 +107,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
@@ -112,7 +115,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
&cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -135,7 +138,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
&aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -156,7 +159,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- pr_err("AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device,
+ "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -176,18 +180,21 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
u16 command_id, bool capture)
{
if (unlikely(command_id >= admin_queue->q_depth)) {
- pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, admin_queue->q_depth);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, admin_queue->q_depth);
return NULL;
}
if (unlikely(!admin_queue->comp_ctx)) {
- pr_err("Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- pr_err("Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Completion context is occupied\n");
return NULL;
}
@@ -217,7 +224,8 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device,
+ "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -259,6 +267,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
+ struct ena_com_dev *ena_dev = admin_queue->ena_dev;
size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
@@ -266,7 +275,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
admin_queue->comp_ctx =
devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -337,7 +346,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device,
+ "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -363,7 +373,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device,
+ "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -423,7 +434,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -444,7 +455,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
if (unlikely(!comp_ctx)) {
- pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "comp_ctx is NULL. Changing the admin queue running state\n");
admin_queue->running_state = false;
return;
}
@@ -496,10 +508,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
admin_queue->stats.completed_cmd += comp_num;
}
-static int ena_com_comp_status_to_errno(u8 comp_status)
+static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ u8 comp_status)
{
if (unlikely(comp_status != 0))
- pr_err("Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -546,7 +560,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
break;
if (time_is_before_jiffies(timeout)) {
- pr_err("Wait for completion (polling) timeout\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.no_completion++;
@@ -562,7 +577,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- pr_err("Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -573,7 +589,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
comp_ctx->status);
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@@ -615,7 +631,8 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -637,8 +654,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->header_location_ctrl =
llq_default_cfg->llq_header_location;
} else {
- pr_err("Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid header location control, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
@@ -652,14 +670,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
} else {
- pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid desc_stride_ctrl, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl, supported_feat,
- llq_info->desc_stride_ctrl);
+ netdev_err(ena_dev->net_device,
+ "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_stride_ctrl,
+ supported_feat, llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -680,20 +700,23 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
llq_info->desc_list_entry_size = 256;
} else {
- pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid entry_size_ctrl, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_ring_entry_size, supported_feat,
- llq_info->desc_list_entry_size);
+ netdev_err(ena_dev->net_device,
+ "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_ring_entry_size, supported_feat,
+ llq_info->desc_list_entry_size);
}
if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
+ netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
+ llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -716,14 +739,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
} else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
} else {
- pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
- supported_feat);
+ netdev_err(ena_dev->net_device,
+ "Invalid descs_num_before_header, supported: 0x%x\n",
+ supported_feat);
return -EINVAL;
}
- pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ netdev_err(ena_dev->net_device,
+ "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+ llq_default_cfg->llq_num_decs_before_header,
+ supported_feat, llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -739,7 +764,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- pr_err("Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device,
+ "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -766,15 +792,17 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
if (comp_ctx->status == ENA_CMD_COMPLETED) {
- pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
+ netdev_err(admin_queue->ena_dev->net_device,
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ comp_ctx->cmd_opcode,
+ admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
} else {
- pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
- comp_ctx->cmd_opcode, comp_ctx->status);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "The ena device didn't send a completion for the admin cmd %d status %d\n",
+ comp_ctx->cmd_opcode, comp_ctx->status);
}
/* Check if shifted to polling mode.
* This will happen if there is a completion without an interrupt
@@ -787,7 +815,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
}
}
- ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
comp_ctxt_release(admin_queue, comp_ctx);
return ret;
@@ -834,15 +862,17 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
- pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ netdev_err(ena_dev->net_device,
+ "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ mmio_read->seq_num, offset, read_resp->req_id,
+ read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- pr_err("Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device,
+ "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -901,7 +931,8 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -951,7 +982,8 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device,
+ "Reg read timeout occurred\n");
return -ETIME;
}
@@ -991,7 +1023,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- pr_debug("Feature %d isn't supported\n", feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ feature_id);
return -EOPNOTSUPP;
}
@@ -1010,7 +1043,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -1027,8 +1060,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- pr_err("Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to submit get_feature command %d error: %d\n",
+ feature_id, ret);
return ret;
}
@@ -1130,9 +1164,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
- pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
- 1 << log_size, 1 << get_resp.u.ind_table.min_size,
- 1 << get_resp.u.ind_table.max_size);
+ netdev_err(ena_dev->net_device,
+ "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ 1 << log_size, 1 << get_resp.u.ind_table.min_size,
+ 1 << get_resp.u.ind_table.max_size);
return -EINVAL;
}
@@ -1223,7 +1258,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device,
+ "Memory address set failed\n");
return ret;
}
}
@@ -1234,7 +1270,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- pr_err("Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1252,7 +1289,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
- pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
+ io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1286,7 +1324,8 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
if (unlikely(!intr_delay_resolution)) {
- pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
+ netdev_err(ena_dev->net_device,
+ "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
@@ -1321,22 +1360,25 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
if (IS_ERR(comp_ctx)) {
- if (comp_ctx == ERR_PTR(-ENODEV))
- pr_debug("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ ret = PTR_ERR(comp_ctx);
+ if (ret == -ENODEV)
+ netdev_dbg(admin_queue->ena_dev->net_device,
+ "Failed to submit command [%d]\n", ret);
else
- pr_err("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Failed to submit command [%d]\n", ret);
- return PTR_ERR(comp_ctx);
+ return ret;
}
ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
if (unlikely(ret)) {
if (admin_queue->running_state)
- pr_err("Failed to process command. ret = %d\n", ret);
+ netdev_err(admin_queue->ena_dev->net_device,
+ "Failed to process command. ret = %d\n", ret);
else
- pr_debug("Failed to process command. ret = %d\n", ret);
+ netdev_dbg(admin_queue->ena_dev->net_device,
+ "Failed to process command. ret = %d\n", ret);
}
return ret;
}
@@ -1365,7 +1407,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -1375,7 +1417,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- pr_err("Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1394,7 +1437,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
+ io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1404,8 +1448,9 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Invalid queue number %d but the max is %d\n", qid,
- ENA_TOTAL_NUM_QUEUES);
+ netdev_err(ena_dev->net_device,
+ "Invalid queue number %d but the max is %d\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1471,7 +1516,8 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1513,13 +1559,14 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
if (ret) {
- pr_info("Can't get aenq configuration\n");
+ dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
return ret;
}
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
- pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
- get_resp.u.aenq.supported_groups, groups_flag);
+ netdev_warn(ena_dev->net_device,
+ "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
+ get_resp.u.aenq.supported_groups, groups_flag);
return -EOPNOTSUPP;
}
@@ -1538,7 +1585,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1546,20 +1594,21 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
{
u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- int width;
+ u32 width;
if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
- pr_debug("ENA dma width: %d\n", width);
+ netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- pr_err("DMA width illegal value: %d\n", width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
+ width);
return -EINVAL;
}
@@ -1583,23 +1632,24 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
(ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
- pr_info("ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
- ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+ dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
- (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
- ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
+ dev_info(ena_dev->dmadev,
+ "ENA controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
ctrl_ver_masked =
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
@@ -1608,7 +1658,8 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
/* Validate the ctrl version without the implementation ID */
if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
- pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
+ netdev_err(ena_dev->net_device,
+ "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
return -1;
}
@@ -1741,12 +1792,13 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
- pr_err("Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- pr_err("Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device,
+ "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1823,8 +1875,9 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
- ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ netdev_err(ena_dev->net_device,
+ "Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1882,8 +1935,9 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
- ENA_TOTAL_NUM_QUEUES);
+ netdev_err(ena_dev->net_device,
+ "Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -2035,8 +2089,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrome, timestamp);
+ netdev_dbg(ena_dev->net_device,
+ "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
@@ -2079,19 +2134,20 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
(cap == ENA_MMIO_READ_TIMEOUT))) {
- pr_err("Reg read32 timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- pr_err("Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device,
+ "Device isn't ready, can't reset device\n");
return -EINVAL;
}
timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
if (timeout == 0) {
- pr_err("Invalid timeout value\n");
+ netdev_err(ena_dev->net_device, "Invalid timeout value\n");
return -EINVAL;
}
@@ -2107,7 +2163,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- pr_err("Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device,
+ "Reset indication didn't turn on\n");
return rc;
}
@@ -2115,7 +2172,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- pr_err("Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device,
+ "Reset indication didn't turn off\n");
return rc;
}
@@ -2152,7 +2210,8 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- pr_err("Failed to get stats. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to get stats. error: %d\n", ret);
return ret;
}
@@ -2187,7 +2246,7 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
return ret;
}
-int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
{
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
@@ -2195,7 +2254,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2214,7 +2274,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2228,7 +2289,8 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- pr_err("Failed to get offload capabilities %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2248,8 +2310,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
}
@@ -2260,8 +2322,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- pr_err("Func hash %d isn't supported by device, abort\n",
- rss->hash_func);
+ netdev_err(ena_dev->net_device,
+ "Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2278,7 +2341,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2290,8 +2353,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- pr_err("Failed to set hash function %d. error: %d\n",
- rss->hash_func, ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
return -EINVAL;
}
@@ -2322,7 +2386,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- pr_err("Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device,
+ "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
@@ -2330,8 +2395,9 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
case ENA_ADMIN_TOEPLITZ:
if (key) {
if (key_len != sizeof(hash_key->key)) {
- pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
+ netdev_err(ena_dev->net_device,
+ "key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
@@ -2343,7 +2409,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
rss->hash_init_val = init_val;
break;
default:
- pr_err("Invalid hash function (%d)\n", func);
+ netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
+ func);
return -EINVAL;
}
@@ -2429,8 +2496,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_INPUT);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
}
@@ -2448,7 +2515,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2459,7 +2526,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2509,9 +2577,10 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
- pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
- i, hash_ctrl->supported_fields[i].fields,
- hash_ctrl->selected_fields[i].fields);
+ netdev_err(ena_dev->net_device,
+ "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ i, hash_ctrl->supported_fields[i].fields,
+ hash_ctrl->selected_fields[i].fields);
return -EOPNOTSUPP;
}
}
@@ -2535,7 +2604,8 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- pr_err("Invalid proto num (%u)\n", proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
+ proto);
return -EINVAL;
}
@@ -2547,8 +2617,9 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
- pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
- proto, hash_fields, supported_fields);
+ netdev_err(ena_dev->net_device,
+ "Proto %d doesn't support the required fields %x. supports only: %x\n",
+ proto, hash_fields, supported_fields);
}
hash_ctrl->selected_fields[proto].fields = hash_fields;
@@ -2588,14 +2659,15 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(
ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
if (ret) {
- pr_err("Failed to convert host indirection table to device table\n");
+ netdev_err(ena_dev->net_device,
+ "Failed to convert host indirection table to device table\n");
return ret;
}
@@ -2612,7 +2684,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2626,7 +2698,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2782,7 +2855,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2790,7 +2863,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
- pr_err("Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
@@ -2803,7 +2876,8 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- pr_err("Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device,
+ "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2815,12 +2889,14 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
}
-static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
+static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
+ u32 coalesce_usecs,
u32 intr_delay_resolution,
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- pr_err("Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device,
+ "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2832,7 +2908,8 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
u32 tx_coalesce_usecs)
{
- return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
+ return ena_com_update_nonadaptive_moderation_interval(ena_dev,
+ tx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_tx_interval);
}
@@ -2840,7 +2917,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de
int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
u32 rx_coalesce_usecs)
{
- return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
+ return ena_com_update_nonadaptive_moderation_interval(ena_dev,
+ rx_coalesce_usecs,
ena_dev->intr_delay_resolution,
&ena_dev->intr_moder_rx_interval);
}
@@ -2856,12 +2934,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
+ netdev_dbg(ena_dev->net_device,
+ "Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
- pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ netdev_err(ena_dev->net_device,
+ "Failed to get interrupt moderation admin cmd. rc: %d\n",
+ rc);
}
/* no moderation supported, disable adaptive support */
@@ -2909,7 +2989,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- pr_err("The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device,
+ "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 55097750d062..343caf41e709 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -303,6 +303,7 @@ struct ena_com_dev {
u8 __iomem *reg_bar;
void __iomem *mem_bar;
void *dmadev;
+ struct net_device *net_device;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@@ -604,7 +605,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
*
* @return: 0 on Success and negative value otherwise.
*/
-int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
+int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu);
/* ena_com_get_offload_settings - Retrieve the device offloads capabilities
* @ena_dev: ENA communication layer struct
@@ -931,6 +932,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_config);
+/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq.
+ * @io_sq: IO submit queue struct
+ *
+ * @return - ena_com_dev struct extracted from io_sq
+ */
+static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq)
+{
+ return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]);
+}
+
+/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq.
+ * @io_sq: IO submit queue struct
+ *
+ * @return - ena_com_dev struct extracted from io_sq
+ */
+static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq)
+{
+ return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]);
+}
+
static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 032ab9f20438..c3be751e7379 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -58,13 +58,15 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
if (is_llq_max_tx_burst_exists(io_sq)) {
if (unlikely(!io_sq->entries_in_tx_burst_left)) {
- pr_err("Error: trying to send more packets than tx burst allows\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Error: trying to send more packets than tx burst allows\n");
return -ENOSPC;
}
io_sq->entries_in_tx_burst_left--;
- pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ io_sq->qid, io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -102,12 +104,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
if (unlikely((header_offset + header_len) >
llq_info->desc_list_entry_size)) {
- pr_err("Trying to write header larger than llq entry can accommodate\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- pr_err("Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -125,7 +129,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- pr_err("Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Bounce buffer is NULL\n");
return NULL;
}
@@ -250,8 +255,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
- pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
- io_cq->qid, *first_cdesc_idx, count);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
count = 0;
@@ -335,7 +341,8 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
return 0;
}
-static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+ struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
@@ -357,10 +364,11 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
- pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+ ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+ ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -385,13 +393,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* num_bufs +1 for potential meta desc */
if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
- pr_debug("Not enough space in the tx queue\n");
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Not enough space in the tx queue\n");
return -ENOMEM;
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
- pr_err("Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Header size is too large %d max header: %d\n",
+ header_len, io_sq->tx_max_header_size);
return -EINVAL;
}
@@ -405,7 +415,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
- pr_err("Failed to create and store tx meta desc\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to create and store tx meta desc\n");
return rc;
}
@@ -529,12 +540,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
- pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
- pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+ ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -557,13 +570,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
- pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
- io_sq->next_to_comp);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+ io_sq->qid, io_sq->next_to_comp);
/* Get rx flags from the last pkt */
- ena_com_rx_set_flags(ena_rx_ctx, cdesc);
+ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
ena_rx_ctx->descs = nb_hw_desc;
+
return 0;
}
@@ -588,11 +603,15 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
ENA_ETH_IO_RX_DESC_LAST_MASK |
- (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
- ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK);
desc->req_id = req_id;
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+ __func__, io_sq->qid, req_id);
+
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2c16c218818a..689313ee25a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -140,8 +140,9 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
- pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
- num_descs, num_entries_needed);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Queue: %d num_descs: %d num_entries_needed: %d\n",
+ io_sq->qid, num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -151,14 +152,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
- pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Write submission queue doorbell for queue: %d tail: %d\n",
+ io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
- pr_debug("Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Reset available entries in tx burst for queue %d to %d\n",
+ io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
@@ -176,8 +179,9 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (unlikely(need_update)) {
- pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
writel(head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
}
@@ -240,7 +244,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- pr_err("Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "Invalid req id %d\n", cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 6cdd9efe8df3..d6cc7aa612b7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -95,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(xdp_pass),
ENA_STAT_RX_ENTRY(xdp_tx),
ENA_STAT_RX_ENTRY(xdp_invalid),
+ ENA_STAT_RX_ENTRY(xdp_redirect),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
@@ -839,7 +840,7 @@ static int ena_set_channels(struct net_device *netdev,
/* The check for max value is already done in ethtool */
if (count < ENA_MIN_NUM_IO_QUEUES ||
(ena_xdp_present(adapter) &&
- !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
+ !ena_xdp_legal_queue_count(adapter, count)))
return -EINVAL;
return ena_update_queue_count(adapter, count);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0e98f45c2b22..06596fa1f9fe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -29,6 +29,8 @@ MODULE_LICENSE("GPL");
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5 * HZ)
+#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
+
#define ENA_NAPI_BUDGET 64
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
@@ -78,6 +80,15 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
+/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
+static void ena_increase_stat(u64 *statp, u64 cnt,
+ struct u64_stats_sync *syncp)
+{
+ u64_stats_update_begin(syncp);
+ (*statp) += cnt;
+ u64_stats_update_end(syncp);
+}
+
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -90,9 +101,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
return;
adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.tx_timeout++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
@@ -152,9 +161,8 @@ static int ena_xmit_common(struct net_device *dev,
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
"Failed to prepare tx bufs\n");
- u64_stats_update_begin(&ring->syncp);
- ring->tx_stats.prepare_ctx_err++;
- u64_stats_update_end(&ring->syncp);
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+ &ring->syncp);
if (rc != -ENOMEM) {
adapter->reset_reason =
ENA_REGS_RESET_DRIVER_INVALID_STATE;
@@ -225,18 +233,18 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
return ret;
}
-static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
- struct ena_tx_buffer *tx_info,
- struct xdp_buff *xdp,
- void **push_hdr,
- u32 *push_len)
+static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
+ struct ena_tx_buffer *tx_info,
+ struct xdp_frame *xdpf,
+ void **push_hdr,
+ u32 *push_len)
{
struct ena_adapter *adapter = xdp_ring->adapter;
struct ena_com_buf *ena_buf;
dma_addr_t dma = 0;
u32 size;
- tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
+ tx_info->xdpf = xdpf;
size = tx_info->xdpf->len;
ena_buf = tx_info->bufs;
@@ -262,9 +270,8 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
return 0;
error_report_dma_error:
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&xdp_ring->syncp);
+ ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
+ &xdp_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
xdp_return_frame_rx_napi(tx_info->xdpf);
@@ -274,29 +281,24 @@ error_report_dma_error:
return -EINVAL;
}
-static int ena_xdp_xmit_buff(struct net_device *dev,
- struct xdp_buff *xdp,
- int qid,
- struct ena_rx_buffer *rx_info)
+static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
+ struct net_device *dev,
+ struct xdp_frame *xdpf,
+ int flags)
{
- struct ena_adapter *adapter = netdev_priv(dev);
struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info;
- struct ena_ring *xdp_ring;
u16 next_to_use, req_id;
- int rc;
void *push_hdr;
u32 push_len;
+ int rc;
- xdp_ring = &adapter->tx_ring[qid];
next_to_use = xdp_ring->next_to_use;
req_id = xdp_ring->free_ids[next_to_use];
tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info->num_of_bufs = 0;
- page_ref_inc(rx_info->page);
- tx_info->xdp_rx_page = rx_info->page;
- rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
+ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
if (unlikely(rc))
goto error_drop_packet;
@@ -311,34 +313,82 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
tx_info,
&ena_tx_ctx,
next_to_use,
- xdp->data_end - xdp->data);
+ xdpf->len);
if (rc)
goto error_unmap_dma;
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- u64_stats_update_begin(&xdp_ring->syncp);
- xdp_ring->tx_stats.doorbells++;
- u64_stats_update_end(&xdp_ring->syncp);
+ if (flags & XDP_XMIT_FLUSH) {
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
+ &xdp_ring->syncp);
+ }
- return NETDEV_TX_OK;
+ return rc;
error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info);
tx_info->xdpf = NULL;
error_drop_packet:
- __free_page(tx_info->xdp_rx_page);
- return NETDEV_TX_OK;
+ xdp_return_frame(xdpf);
+ return rc;
}
-static int ena_xdp_execute(struct ena_ring *rx_ring,
- struct xdp_buff *xdp,
- struct ena_rx_buffer *rx_info)
+static int ena_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+ int qid, i, err, drops = 0;
+ struct ena_ring *xdp_ring;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ return -ENETDOWN;
+
+ /* We assume that all rings have the same XDP program */
+ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
+ return -ENXIO;
+
+ qid = smp_processor_id() % adapter->xdp_num_queues;
+ qid += adapter->xdp_first_ring;
+ xdp_ring = &adapter->tx_ring[qid];
+
+ /* Other CPU ids might try to send thorugh this queue */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ for (i = 0; i < n; i++) {
+ err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0);
+ /* The descriptor is freed by ena_xdp_xmit_frame in case
+ * of an error.
+ */
+ if (err)
+ drops++;
+ }
+
+ /* Ring doorbell to make device aware of the packets */
+ if (flags & XDP_XMIT_FLUSH) {
+ ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
+ ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
+ &xdp_ring->syncp);
+ }
+
+ spin_unlock(&xdp_ring->xdp_tx_lock);
+
+ /* Return number of packets sent */
+ return n - drops;
+}
+
+static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
struct bpf_prog *xdp_prog;
+ struct ena_ring *xdp_ring;
u32 verdict = XDP_PASS;
+ struct xdp_frame *xdpf;
u64 *xdp_stat;
+ int qid;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
@@ -348,28 +398,49 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
verdict = bpf_prog_run_xdp(xdp_prog, xdp);
- if (verdict == XDP_TX) {
- ena_xdp_xmit_buff(rx_ring->netdev,
- xdp,
- rx_ring->qid + rx_ring->adapter->num_io_queues,
- rx_info);
+ switch (verdict) {
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ break;
+ }
+
+ /* Find xmit queue */
+ qid = rx_ring->qid + rx_ring->adapter->num_io_queues;
+ xdp_ring = &rx_ring->adapter->tx_ring[qid];
+
+ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
+ spin_lock(&xdp_ring->xdp_tx_lock);
+
+ ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH);
+ spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
- } else if (unlikely(verdict == XDP_ABORTED)) {
+ break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
+ xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ break;
+ }
+ fallthrough;
+ case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- } else if (unlikely(verdict == XDP_DROP)) {
+ break;
+ case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop;
- } else if (unlikely(verdict == XDP_PASS)) {
+ break;
+ case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass;
- } else {
+ break;
+ default:
bpf_warn_invalid_xdp_action(verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
}
- u64_stats_update_begin(&rx_ring->syncp);
- (*xdp_stat)++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
out:
rcu_read_unlock();
@@ -638,6 +709,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
txr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
txr->disable_meta_caching = adapter->disable_meta_caching;
+ spin_lock_init(&txr->xdp_tx_lock);
/* Don't init RX queues for xdp queues */
if (!ENA_IS_XDP_INDEX(adapter, i)) {
@@ -922,9 +994,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
page = alloc_page(gfp);
if (unlikely(!page)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.page_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+ &rx_ring->syncp);
return -ENOMEM;
}
@@ -934,9 +1005,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.dma_mapping_err++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
+ &rx_ring->syncp);
__free_page(page);
return -EIO;
@@ -952,11 +1022,20 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
return 0;
}
+static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+ struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
+ ENA_PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+}
+
static void ena_free_rx_page(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info)
{
struct page *page = rx_info->page;
- struct ena_com_buf *ena_buf = &rx_info->ena_buf;
if (unlikely(!page)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
@@ -964,9 +1043,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
- ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ ena_unmap_rx_buff(rx_ring, rx_info);
__free_page(page);
rx_info->page = NULL;
@@ -1009,9 +1086,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
}
if (unlikely(i < num)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.refil_partial++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
+ &rx_ring->syncp);
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Refilled rx qid %d with only %d buffers (from %d)\n",
rx_ring->qid, i, num);
@@ -1187,9 +1263,7 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
"Invalid req_id: %hu\n",
req_id);
- u64_stats_update_begin(&ring->syncp);
- ring->tx_stats.bad_req_id++;
- u64_stats_update_end(&ring->syncp);
+ ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
/* Trigger device reset */
ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
@@ -1300,9 +1374,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
if (netif_tx_queue_stopped(txq) && above_thresh &&
test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
netif_tx_wake_queue(txq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_wakeup++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
+ &tx_ring->syncp);
}
__netif_tx_unlock(txq);
}
@@ -1321,9 +1394,8 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
rx_ring->rx_copybreak);
if (unlikely(!skb)) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.skb_alloc_fail++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
+ &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate skb. frags: %d\n", frags);
return NULL;
@@ -1395,9 +1467,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return NULL;
do {
- dma_unmap_page(rx_ring->dev,
- dma_unmap_addr(&rx_info->ena_buf, paddr),
- ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
+ ena_unmap_rx_buff(rx_ring, rx_info);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, ENA_PAGE_SIZE);
@@ -1451,9 +1521,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE;
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_csum++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
return;
@@ -1464,9 +1533,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_csum++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ &rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
skb->ip_summed = CHECKSUM_NONE;
@@ -1475,13 +1543,11 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
if (likely(ena_rx_ctx->l4_csum_checked)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.csum_good++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
+ &rx_ring->syncp);
} else {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.csum_unchecked++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
+ &rx_ring->syncp);
skb->ip_summed = CHECKSUM_NONE;
}
} else {
@@ -1529,7 +1595,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return XDP_DROP;
- ret = ena_xdp_execute(rx_ring, xdp, rx_info);
+ ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
if (ret == XDP_PASS) {
@@ -1559,6 +1625,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
struct sk_buff *skb;
int refill_required;
struct xdp_buff xdp;
+ int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
int rc = 0;
@@ -1606,22 +1673,25 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean);
if (unlikely(!skb)) {
- /* The page might not actually be freed here since the
- * page reference count is incremented in
- * ena_xdp_xmit_buff(), and it will be decreased only
- * when send completion was received from the device
- */
- if (xdp_verdict == XDP_TX)
- ena_free_rx_page(rx_ring,
- &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
for (i = 0; i < ena_rx_ctx.descs; i++) {
- rx_ring->free_ids[next_to_clean] =
- rx_ring->ena_bufs[i].req_id;
+ int req_id = rx_ring->ena_bufs[i].req_id;
+
+ rx_ring->free_ids[next_to_clean] = req_id;
next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
+
+ /* Packets was passed for transmission, unmap it
+ * from RX side.
+ */
+ if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
+ ena_unmap_rx_buff(rx_ring,
+ &rx_ring->rx_buffer_info[req_id]);
+ rx_ring->rx_buffer_info[req_id].page = NULL;
+ }
}
if (xdp_verdict != XDP_PASS) {
+ xdp_flags |= xdp_verdict;
res_budget--;
continue;
}
@@ -1667,20 +1737,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_refill_rx_bufs(rx_ring, refill_required);
}
+ if (xdp_flags & XDP_REDIRECT)
+ xdp_do_flush_map();
+
return work_done;
error:
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_desc_num++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+ &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
} else {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.bad_req_id++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+ &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
}
@@ -1741,9 +1812,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
tx_ring->smoothed_interval,
true);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.unmask_interrupt++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
+ &tx_ring->syncp);
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
@@ -1823,7 +1893,7 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
tx_pkts++;
total_done += tx_info->tx_descs;
- __free_page(tx_info->xdp_rx_page);
+ xdp_return_frame(xdpf);
xdp_ring->free_ids[next_to_clean] = req_id;
next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
xdp_ring->ring_size);
@@ -2550,9 +2620,8 @@ static int ena_up(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
netif_carrier_on(adapter->netdev);
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.interface_up++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.interface_up, 1,
+ &adapter->syncp);
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2590,9 +2659,8 @@ static void ena_down(struct ena_adapter *adapter)
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.interface_down++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.interface_down, 1,
+ &adapter->syncp);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
@@ -2820,15 +2888,12 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
(header_len < tx_ring->tx_max_header_size))
return 0;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.linearize++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
rc = skb_linearize(skb);
if (unlikely(rc)) {
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.linearize_failed++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
+ &tx_ring->syncp);
}
return rc;
@@ -2868,9 +2933,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
tx_ring->push_buf_intermediate_buf);
*header_len = push_len;
if (unlikely(skb->data != *push_hdr)) {
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.llq_buffer_copy++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
+ &tx_ring->syncp);
delta = push_len - skb_head_len;
}
@@ -2927,9 +2991,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring,
return 0;
error_report_dma_error:
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.dma_mapping_err++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
+ &tx_ring->syncp);
netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
tx_info->skb = NULL;
@@ -3006,9 +3069,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
__func__, qid);
netif_tx_stop_queue(txq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_stop++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
+ &tx_ring->syncp);
/* There is a rare condition where this function decide to
* stop the queue but meanwhile clean_tx_irq updates
@@ -3023,9 +3085,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
ENA_TX_WAKEUP_THRESH)) {
netif_tx_wake_queue(txq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.queue_wakeup++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
+ &tx_ring->syncp);
}
}
@@ -3034,9 +3095,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* has a mb
*/
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.doorbells++;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
+ &tx_ring->syncp);
}
return NETDEV_TX_OK;
@@ -3242,6 +3302,7 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
+ .ndo_xdp_xmit = ena_xdp_xmit,
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3671,9 +3732,8 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
rc = -EIO;
}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->tx_stats.missed_tx += missed_tx;
- u64_stats_update_end(&tx_ring->syncp);
+ ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
+ &tx_ring->syncp);
return rc;
}
@@ -3756,9 +3816,8 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
rx_ring->empty_rx_queue++;
if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->rx_stats.empty_rx_ring++;
- u64_stats_update_end(&rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
+ &rx_ring->syncp);
netif_err(adapter, drv, adapter->netdev,
"Trigger refill for ring %d\n", i);
@@ -3788,9 +3847,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.wd_expired++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
+ &adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
@@ -3801,9 +3859,8 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
netif_err(adapter, drv, adapter->netdev,
"ENA admin queue is not in running state!\n");
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.admin_q_pause++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
+ &adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
@@ -4176,18 +4233,36 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->dmadev = &pdev->dev;
+ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
+ rc = -ENOMEM;
+ goto err_free_region;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ adapter->ena_dev = ena_dev;
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ ena_dev->net_device = netdev;
+
+ pci_set_drvdata(pdev, adapter);
+
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_free_region;
+ goto err_netdev_destroy;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
- goto err_free_ena_dev;
+ goto err_device_destroy;
}
calc_queue_ctx.ena_dev = ena_dev;
@@ -4207,26 +4282,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_device_destroy;
}
- /* dev zeroed in init_etherdev */
- netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
- if (!netdev) {
- dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
- rc = -ENOMEM;
- goto err_device_destroy;
- }
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adapter = netdev_priv(netdev);
- pci_set_drvdata(pdev, adapter);
-
- adapter->ena_dev = ena_dev;
- adapter->netdev = netdev;
- adapter->pdev = pdev;
-
ena_set_conf_feat_params(adapter, &get_feat_ctx);
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
@@ -4257,7 +4314,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
dev_err(&pdev->dev,
"Failed to query interrupt moderation feature\n");
- goto err_netdev_destroy;
+ goto err_device_destroy;
}
ena_init_io_rings(adapter,
0,
@@ -4335,11 +4392,11 @@ err_free_msix:
ena_disable_msix(adapter);
err_worker_destroy:
del_timer(&adapter->timer_service);
-err_netdev_destroy:
- free_netdev(netdev);
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+err_netdev_destroy:
+ free_netdev(netdev);
err_free_region:
ena_release_bars(ena_dev, pdev);
err_free_ena_dev:
@@ -4439,9 +4496,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ena_adapter *adapter = pci_get_drvdata(pdev);
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.suspend++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
rtnl_lock();
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
@@ -4462,9 +4517,7 @@ static int __maybe_unused ena_resume(struct device *dev_d)
struct ena_adapter *adapter = dev_get_drvdata(dev_d);
int rc;
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.resume++;
- u64_stats_update_end(&adapter->syncp);
+ ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
rtnl_lock();
rc = ena_restore_device(adapter);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 30eb686749dc..74af15d62ee1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -170,12 +170,6 @@ struct ena_tx_buffer {
* the xdp queues
*/
struct xdp_frame *xdpf;
- /* The rx page for the rx buffer that was received in rx and
- * re transmitted on xdp tx queues as a result of XDP_TX action.
- * We need to free the page once we finished cleaning the buffer in
- * clean_xdp_irq()
- */
- struct page *xdp_rx_page;
/* Indicate if bufs[0] map the linear data of the skb. */
u8 map_linear_data;
@@ -239,6 +233,7 @@ struct ena_stats_rx {
u64 xdp_pass;
u64 xdp_tx;
u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct ena_ring {
@@ -263,6 +258,7 @@ struct ena_ring {
struct ena_com_io_sq *ena_com_io_sq;
struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq;
+ spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
u16 next_to_use;
u16 next_to_clean;
@@ -433,8 +429,8 @@ static inline bool ena_xdp_present_ring(struct ena_ring *ring)
return !!ring->xdp_bpf_prog;
}
-static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
- u32 queues)
+static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
+ u32 queues)
{
return 2 * queues <= adapter->max_num_io_queues;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1f5da4e4f4b2..769694c7f86c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -699,6 +699,7 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
@@ -1147,6 +1148,14 @@ struct macb_pm_data {
u32 usrio;
};
+struct macb_usrio_config {
+ u32 mii;
+ u32 rmii;
+ u32 rgmii;
+ u32 refclk;
+ u32 hdfctlen;
+};
+
struct macb_config {
u32 caps;
unsigned int dma_burst_length;
@@ -1155,6 +1164,7 @@ struct macb_config {
struct clk **rx_clk, struct clk **tsu_clk);
int (*init)(struct platform_device *pdev);
int jumbo_max_len;
+ const struct macb_usrio_config *usrio;
};
struct tsu_incr {
@@ -1288,6 +1298,7 @@ struct macb {
u32 rx_intr_mask;
struct macb_pm_data pm_data;
+ const struct macb_usrio_config *usrio;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 7b1d195787dc..995a13489276 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -460,15 +460,14 @@ static void macb_init_buffers(struct macb *bp)
/**
* macb_set_tx_clk() - Set a clock to a new frequency
- * @clk: Pointer to the clock to change
+ * @bp: pointer to struct macb
* @speed: New frequency in Hz
- * @dev: Pointer to the struct net_device
*/
-static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!clk)
+ if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
switch (speed) {
@@ -485,7 +484,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
return;
}
- rate_rounded = clk_round_rate(clk, rate);
+ rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
return;
@@ -495,11 +494,12 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = abs(rate_rounded - rate);
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
- netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ netdev_warn(bp->dev,
+ "unable to generate target frequency: %ld Hz\n",
rate);
- if (clk_set_rate(clk, rate_rounded))
- netdev_err(dev, "adjusting tx_clk failed.\n");
+ if (clk_set_rate(bp->tx_clk, rate_rounded))
+ netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
static void macb_validate(struct phylink_config *config,
@@ -751,7 +751,7 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp->tx_clk, speed, ndev);
+ macb_set_tx_clk(bp, speed);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
@@ -3694,6 +3694,20 @@ static void macb_probe_queues(void __iomem *mem,
*num_queues = hweight32(*queue_mask);
}
+static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
+ struct clk *rx_clk, struct clk *tsu_clk)
+{
+ struct clk_bulk_data clks[] = {
+ { .clk = tsu_clk, },
+ { .clk = rx_clk, },
+ { .clk = pclk, },
+ { .clk = hclk, },
+ { .clk = tx_clk },
+ };
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
+}
+
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk,
struct clk **rx_clk, struct clk **tsu_clk)
@@ -3913,15 +3927,15 @@ static int macb_init(struct platform_device *pdev)
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
if (phy_interface_mode_is_rgmii(bp->phy_interface))
- val = GEM_BIT(RGMII);
+ val = bp->usrio->rgmii;
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
- val = MACB_BIT(RMII);
+ val = bp->usrio->rmii;
else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
- val = MACB_BIT(MII);
+ val = bp->usrio->mii;
if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
- val |= MACB_BIT(CLKEN);
+ val |= bp->usrio->refclk;
macb_or_gem_writel(bp, USRIO, val);
}
@@ -4406,8 +4420,10 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
- if (!mgmt)
- return -ENOMEM;
+ if (!mgmt) {
+ err = -ENOMEM;
+ goto err_disable_clks;
+ }
init.name = "sifive-gemgxl-mgmt";
init.ops = &fu540_c000_ops;
@@ -4418,16 +4434,26 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
mgmt->hw.init = &init;
*tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
- if (IS_ERR(*tx_clk))
- return PTR_ERR(*tx_clk);
+ if (IS_ERR(*tx_clk)) {
+ err = PTR_ERR(*tx_clk);
+ goto err_disable_clks;
+ }
err = clk_prepare_enable(*tx_clk);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
- else
+ *tx_clk = NULL;
+ goto err_disable_clks;
+ } else {
dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
+ }
return 0;
+
+err_disable_clks:
+ macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
+
+ return err;
}
static int fu540_c000_init(struct platform_device *pdev)
@@ -4439,6 +4465,21 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static const struct macb_usrio_config macb_default_usrio = {
+ .mii = MACB_BIT(MII),
+ .rmii = MACB_BIT(RMII),
+ .rgmii = GEM_BIT(RGMII),
+ .refclk = MACB_BIT(CLKEN),
+};
+
+static const struct macb_usrio_config sama7g5_usrio = {
+ .mii = 0,
+ .rmii = 1,
+ .rgmii = 2,
+ .refclk = BIT(2),
+ .hdfctlen = BIT(6),
+};
+
static const struct macb_config fu540_c000_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
MACB_CAPS_GEM_HAS_PTP,
@@ -4446,12 +4487,14 @@ static const struct macb_config fu540_c000_config = {
.clk_init = fu540_c000_clk_init,
.init = fu540_c000_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config at91sam9260_config = {
.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d3macb_config = {
@@ -4459,6 +4502,7 @@ static const struct macb_config sama5d3macb_config = {
| MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config pc302gem_config = {
@@ -4466,6 +4510,7 @@ static const struct macb_config pc302gem_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d2_config = {
@@ -4473,6 +4518,7 @@ static const struct macb_config sama5d2_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d3_config = {
@@ -4482,6 +4528,7 @@ static const struct macb_config sama5d3_config = {
.clk_init = macb_clk_init,
.init = macb_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d4_config = {
@@ -4489,18 +4536,21 @@ static const struct macb_config sama5d4_config = {
.dma_burst_length = 4,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config emac_config = {
.caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config np4_config = {
.caps = MACB_CAPS_USRIO_DISABLED,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config zynqmp_config = {
@@ -4511,6 +4561,7 @@ static const struct macb_config zynqmp_config = {
.clk_init = macb_clk_init,
.init = macb_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config zynq_config = {
@@ -4519,6 +4570,23 @@ static const struct macb_config zynq_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config sama7g5_gem_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &sama7g5_usrio,
+};
+
+static const struct macb_config sama7g5_emac_config = {
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &sama7g5_usrio,
};
static const struct of_device_id macb_dt_ids[] = {
@@ -4538,6 +4606,8 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
+ { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4640,6 +4710,8 @@ static int macb_probe(struct platform_device *pdev)
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ bp->usrio = macb_config->usrio;
+
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -4735,11 +4807,7 @@ err_out_free_netdev:
free_netdev(dev);
err_disable_clocks:
- clk_disable_unprepare(tx_clk);
- clk_disable_unprepare(hclk);
- clk_disable_unprepare(pclk);
- clk_disable_unprepare(rx_clk);
- clk_disable_unprepare(tsu_clk);
+ macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -4764,11 +4832,8 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
- clk_disable_unprepare(bp->tsu_clk);
+ macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
+ bp->rx_clk, bp->tsu_clk);
pm_runtime_set_suspended(&pdev->dev);
}
phylink_destroy(bp->phylink);
@@ -4947,13 +5012,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(dev))) {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
- }
- clk_disable_unprepare(bp->tsu_clk);
+ if (!(device_may_wakeup(dev)))
+ macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
+ else
+ macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 9c682aff3834..519323460f26 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -36,8 +36,6 @@ void vnic_cq_free(struct vnic_cq *cq)
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
- int err;
-
cq->index = index;
cq->vdev = vdev;
@@ -47,11 +45,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
return -EINVAL;
}
- err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
- if (err)
- return err;
-
- return 0;
+ return vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
}
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 676e437d78f6..d402d83d9edd 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4677,7 +4677,6 @@ static int be_if_create(struct be_adapter *adapter)
{
u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
u32 cap_flags = be_if_cap_flags(adapter);
- int status;
/* alloc required memory for other filtering fields */
adapter->pmac_id = kcalloc(be_max_uc(adapter),
@@ -4700,13 +4699,8 @@ static int be_if_create(struct be_adapter *adapter)
en_flags &= cap_flags;
/* will enable all the needed filter flags in be_open() */
- status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
&adapter->if_handle, 0);
-
- if (status)
- return status;
-
- return 0;
}
int be_update_queues(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index e28510c282e5..4360ce4d3fb6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1625,17 +1625,13 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
{
struct dpaa_bp *dpaa_bp;
int *countptr;
- int res;
dpaa_bp = priv->dpaa_bp;
if (!dpaa_bp)
return -EINVAL;
countptr = this_cpu_ptr(dpaa_bp->percpu_count);
- res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
- if (res)
- return res;
- return 0;
+ return dpaa_eth_refill_bpool(dpaa_bp, countptr);
}
/* Cleanup function for outgoing frame descriptors that were built on Tx path,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c6481bd61239..9d58d8334467 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -430,7 +430,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
priv = new_bus->priv;
- new_bus->name = "Freescale PowerQUICC MII Bus",
+ new_bus->name = "Freescale PowerQUICC MII Bus";
new_bus->read = &fsl_pq_mdio_read;
new_bus->write = &fsl_pq_mdio_write;
new_bus->reset = &fsl_pq_mdio_reset;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a9aca8c24e90..173d6966c1a3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -546,9 +546,9 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
obj_args.integer.type = ACPI_TYPE_INTEGER;
obj_args.integer.value = mac_cb->mac_id;
- argv4.type = ACPI_TYPE_PACKAGE,
- argv4.package.count = 1,
- argv4.package.elements = &obj_args,
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &obj_args;
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
&hns_dsaf_acpi_dsm_guid, 0,
@@ -593,9 +593,9 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
obj_args.integer.type = ACPI_TYPE_INTEGER;
obj_args.integer.value = mac_cb->mac_id;
- argv4.type = ACPI_TYPE_PACKAGE,
- argv4.package.count = 1,
- argv4.package.elements = &obj_args,
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 1;
+ argv4.package.elements = &obj_args;
obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
&hns_dsaf_acpi_dsm_guid, 0,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a70f44d6e396..92cd4446a82e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -5010,7 +5010,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
}
key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
- key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
key_cfg->outer_sipv6_word_en = 0;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 350225bbe0be..9a9b09401d01 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -313,13 +313,7 @@ static void free_rxqs(struct hinic_dev *nic_dev)
static int hinic_configure_max_qnum(struct hinic_dev *nic_dev)
{
- int err;
-
- err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
- if (err)
- return err;
-
- return 0;
+ return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps);
}
static int hinic_rss_init(struct hinic_dev *nic_dev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 67471cb2b129..24c2bfdfec4e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -497,18 +497,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_npa_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- int blkaddr, err;
+ int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
if (blkaddr < 0)
return 0;
/* Initialize admin queue */
- err = npa_aq_init(rvu, &hw->block[blkaddr]);
- if (err)
- return err;
-
- return 0;
+ return npa_aq_init(rvu, &hw->block[blkaddr]);
}
void rvu_npa_freemem(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 7bcf5246350f..56390a664517 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -12,7 +12,6 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct ptp_req *req;
- int err;
if (!ptp->nic)
return -ENODEV;
@@ -24,11 +23,7 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
req->op = PTP_OP_ADJFINE;
req->scaled_ppm = scaled_ppm;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return err;
-
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
static u64 ptp_cc_read(const struct cyclecounter *cc)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 0f20e0788cce..13bd58dafc6c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -93,15 +93,10 @@ static int prestera_port_open(struct net_device *dev)
static int prestera_port_close(struct net_device *dev)
{
struct prestera_port *port = netdev_priv(dev);
- int err;
netif_stop_queue(dev);
- err = prestera_hw_port_state_set(port, false);
- if (err)
- return err;
-
- return 0;
+ return prestera_hw_port_state_set(port, false);
}
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f68a4afeefb8..2f6a0ae20650 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -484,7 +484,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = fwd_fdb,
+ dest[i].ft = fwd_fdb;
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index d65872172229..6fc7483aea03 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -1112,7 +1112,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
/* setup mii state */
ks->mii.dev = netdev;
- ks->mii.phy_id = 1,
+ ks->mii.phy_id = 1;
ks->mii.phy_id_mask = 1;
ks->mii.reg_num_mask = 0xf;
ks->mii.mdio_read = ks8851_phy_read;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 67ba67ed0cb9..03e79a677c8b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -305,17 +305,13 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
static int dwmac5_rxp_disable(void __iomem *ioaddr)
{
u32 val;
- int ret;
val = readl(ioaddr + MTL_OPERATION_MODE);
val &= ~MTL_FRPE;
writel(val, ioaddr + MTL_OPERATION_MODE);
- ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
+ return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
val & RXPI, 1, 10000);
- if (ret)
- return ret;
- return 0;
}
static void dwmac5_rxp_enable(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 702fdc393da0..cfff3d48807a 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -381,9 +381,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
data->bus->name = dev_name(dev);
- data->bus->read = davinci_mdio_read,
- data->bus->write = davinci_mdio_write,
- data->bus->reset = davinci_mdio_reset,
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
data->bus->parent = dev;
data->bus->priv = data;
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index d2c3f273c233..2fc64483f275 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -413,7 +413,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
int ret;
ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work);
- qmi = &ipa_qmi->client_handle,
+ qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
dev = &ipa->pdev->dev;
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 3160443ef3b9..ae83d66195a5 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1241,7 +1241,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
net->handler.uuid = &tbnet_svc_uuid;
- net->handler.callback = tbnet_handle_packet,
+ net->handler.callback = tbnet_handle_packet;
net->handler.data = net;
tb_register_protocol_handler(&net->handler);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index dba847f28096..02e6bbb17b15 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -275,7 +275,7 @@ static const struct net_device_ops usbpn_ops = {
static void usbpn_setup(struct net_device *dev)
{
dev->features = 0;
- dev->netdev_ops = &usbpn_ops,
+ dev->netdev_ops = &usbpn_ops;
dev->header_ops = &phonet_header_ops;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 259d5cbacf2c..571bd03ebd81 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1237,6 +1237,61 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
return skb;
}
+static int vrf_prepare_mac_header(struct sk_buff *skb,
+ struct net_device *vrf_dev, u16 proto)
+{
+ struct ethhdr *eth;
+ int err;
+
+ /* in general, we do not know if there is enough space in the head of
+ * the packet for hosting the mac header.
+ */
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev));
+ if (unlikely(err))
+ /* no space in the skb head */
+ return -ENOBUFS;
+
+ __skb_push(skb, ETH_HLEN);
+ eth = (struct ethhdr *)skb->data;
+
+ skb_reset_mac_header(skb);
+
+ /* we set the ethernet destination and the source addresses to the
+ * address of the VRF device.
+ */
+ ether_addr_copy(eth->h_dest, vrf_dev->dev_addr);
+ ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
+ eth->h_proto = htons(proto);
+
+ /* the destination address of the Ethernet frame corresponds to the
+ * address set on the VRF interface; therefore, the packet is intended
+ * to be processed locally.
+ */
+ skb->protocol = eth->h_proto;
+ skb->pkt_type = PACKET_HOST;
+
+ skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
+
+ skb_pull_inline(skb, ETH_HLEN);
+
+ return 0;
+}
+
+/* prepare and add the mac header to the packet if it was not set previously.
+ * In this way, packet sniffers such as tcpdump can parse the packet correctly.
+ * If the mac header was already set, the original mac header is left
+ * untouched and the function returns immediately.
+ */
+static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
+ struct net_device *vrf_dev,
+ u16 proto)
+{
+ if (skb_mac_header_was_set(skb))
+ return 0;
+
+ return vrf_prepare_mac_header(skb, vrf_dev, proto);
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
@@ -1310,61 +1365,6 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
skb_dst_set(skb, &rt6->dst);
}
-static int vrf_prepare_mac_header(struct sk_buff *skb,
- struct net_device *vrf_dev, u16 proto)
-{
- struct ethhdr *eth;
- int err;
-
- /* in general, we do not know if there is enough space in the head of
- * the packet for hosting the mac header.
- */
- err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev));
- if (unlikely(err))
- /* no space in the skb head */
- return -ENOBUFS;
-
- __skb_push(skb, ETH_HLEN);
- eth = (struct ethhdr *)skb->data;
-
- skb_reset_mac_header(skb);
-
- /* we set the ethernet destination and the source addresses to the
- * address of the VRF device.
- */
- ether_addr_copy(eth->h_dest, vrf_dev->dev_addr);
- ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
- eth->h_proto = htons(proto);
-
- /* the destination address of the Ethernet frame corresponds to the
- * address set on the VRF interface; therefore, the packet is intended
- * to be processed locally.
- */
- skb->protocol = eth->h_proto;
- skb->pkt_type = PACKET_HOST;
-
- skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
-
- skb_pull_inline(skb, ETH_HLEN);
-
- return 0;
-}
-
-/* prepare and add the mac header to the packet if it was not set previously.
- * In this way, packet sniffers such as tcpdump can parse the packet correctly.
- * If the mac header was already set, the original mac header is left
- * untouched and the function returns immediately.
- */
-static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
- struct net_device *vrf_dev,
- u16 proto)
-{
- if (skb_mac_header_was_set(skb))
- return 0;
-
- return vrf_prepare_mac_header(skb, vrf_dev, proto);
-}
-
static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index e1bdde105f24..42f1f610ac2c 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -179,6 +179,8 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct s3fwrn5_i2c_phy *phy;
+ struct irq_data *irq_data;
+ unsigned long irqflags;
int ret;
phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL);
@@ -212,8 +214,11 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ irq_data = irq_get_irq_data(client->irq);
+ irqflags = irqd_get_trigger_type(irq_data) | IRQF_ONESHOT;
+
ret = devm_request_threaded_irq(&client->dev, phy->i2c_dev->irq, NULL,
- s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ s3fwrn5_i2c_irq_thread_fn, irqflags,
S3FWRN5_I2C_DRIVER_NAME, phy);
if (ret)
s3fwrn5_remove(phy->common.ndev);
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index b297c4aba2ba..a664dfe5fd2f 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -103,6 +103,7 @@
#define SM_RESET_CMD 0x5A
#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
#define HW_REV_ID 0x000A
#define BOND_ID 0x000B
#define HW_CSR_ID 0x000C
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 663255774c0b..051511f5e1f2 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -33,14 +33,41 @@ module_param(firmware, charp, 0);
#define SETTIME_CORRECTION (0)
-static long set_write_phase_ready(struct ptp_clock_info *ptp)
+static int contains_full_configuration(const struct firmware *fw)
{
- struct idtcm_channel *channel =
- container_of(ptp, struct idtcm_channel, caps);
+ s32 full_count = FULL_FW_CFG_BYTES - FULL_FW_CFG_SKIPPED_BYTES;
+ struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data;
+ s32 count = 0;
+ u16 regaddr;
+ u8 loaddr;
+ s32 len;
+
+ /* If the firmware contains 'full configuration' SM_RESET can be used
+ * to ensure proper configuration.
+ *
+ * Full configuration is defined as the number of programmable
+ * bytes within the configuration range minus page offset addr range.
+ */
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+ regaddr = rec->hiaddr << 8;
+ regaddr |= rec->loaddr;
- channel->write_phase_ready = 1;
+ loaddr = rec->loaddr;
- return 0;
+ rec++;
+
+ /* Top (status registers) and bottom are read-only */
+ if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+ continue;
+
+ /* Page size 128, last 4 bytes of page skipped */
+ if ((loaddr > 0x7b && loaddr <= 0x7f) || loaddr > 0xfb)
+ continue;
+
+ count++;
+ }
+
+ return (count >= full_count);
}
static int char_array_to_timespec(u8 *buf,
@@ -261,6 +288,53 @@ static int idtcm_write(struct idtcm *idtcm,
return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
}
+static int clear_boot_status(struct idtcm *idtcm)
+{
+ int err;
+ u8 buf[4] = {0};
+
+ err = idtcm_write(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf));
+
+ return err;
+}
+
+static int read_boot_status(struct idtcm *idtcm, u32 *status)
+{
+ int err;
+ u8 buf[4] = {0};
+
+ err = idtcm_read(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf));
+
+ *status = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+
+ return err;
+}
+
+static int wait_for_boot_status_ready(struct idtcm *idtcm)
+{
+ u32 status = 0;
+ u8 i = 30; /* 30 * 100ms = 3s */
+ int err;
+
+ do {
+ err = read_boot_status(idtcm, &status);
+
+ if (err)
+ return err;
+
+ if (status == 0xA0)
+ return 0;
+
+ msleep(100);
+ i--;
+
+ } while (i);
+
+ dev_warn(&idtcm->client->dev, "%s timed out\n", __func__);
+
+ return -EBUSY;
+}
+
static int _idtcm_gettime(struct idtcm_channel *channel,
struct timespec64 *ts)
{
@@ -599,8 +673,9 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
if (idtcm->calculate_overhead_flag) {
/* Assumption: I2C @ 400KHz */
- total_overhead_ns = ktime_to_ns(ktime_get_raw()
- - idtcm->start_time)
+ ktime_t diff = ktime_sub(ktime_get_raw(),
+ idtcm->start_time);
+ total_overhead_ns = ktime_to_ns(diff)
+ idtcm->tod_write_overhead_ns
+ SETTIME_CORRECTION;
@@ -670,7 +745,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
if (err)
return err;
- if (cmd == 0)
+ if ((cmd & TOD_WRITE_SELECTION_MASK) == 0)
break;
if (++count > 20) {
@@ -683,49 +758,74 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
return 0;
}
-static int _idtcm_settime(struct idtcm_channel *channel,
- struct timespec64 const *ts,
- enum hw_tod_write_trig_sel wr_trig)
+static int get_output_base_addr(u8 outn)
{
- struct idtcm *idtcm = channel->idtcm;
- int err;
- int i;
- u8 trig_sel;
+ int base;
- err = _idtcm_set_dpll_hw_tod(channel, ts, wr_trig);
-
- if (err)
- return err;
-
- /* Wait for the operation to complete. */
- for (i = 0; i < 10000; i++) {
- err = idtcm_read(idtcm, channel->hw_dpll_n,
- HW_DPLL_TOD_CTRL_1, &trig_sel,
- sizeof(trig_sel));
+ switch (outn) {
+ case 0:
+ base = OUTPUT_0;
+ break;
+ case 1:
+ base = OUTPUT_1;
+ break;
+ case 2:
+ base = OUTPUT_2;
+ break;
+ case 3:
+ base = OUTPUT_3;
+ break;
+ case 4:
+ base = OUTPUT_4;
+ break;
+ case 5:
+ base = OUTPUT_5;
+ break;
+ case 6:
+ base = OUTPUT_6;
+ break;
+ case 7:
+ base = OUTPUT_7;
+ break;
+ case 8:
+ base = OUTPUT_8;
+ break;
+ case 9:
+ base = OUTPUT_9;
+ break;
+ case 10:
+ base = OUTPUT_10;
+ break;
+ case 11:
+ base = OUTPUT_11;
+ break;
+ default:
+ base = -EINVAL;
+ }
- if (err)
- return err;
+ return base;
+}
- if (trig_sel == 0x4a)
- break;
+static int _idtcm_settime_deprecated(struct idtcm_channel *channel,
+ struct timespec64 const *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
- err = 1;
- }
+ err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
if (err) {
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "%s: Set HW ToD failed\n", __func__);
return err;
}
return idtcm_sync_pps_output(channel);
}
-static int _idtcm_settime_v487(struct idtcm_channel *channel,
- struct timespec64 const *ts,
- enum scsr_tod_write_type_sel wr_type)
+static int _idtcm_settime(struct idtcm_channel *channel,
+ struct timespec64 const *ts,
+ enum scsr_tod_write_type_sel wr_type)
{
return _idtcm_set_dpll_scsr_tod(channel, ts,
SCSR_TOD_WR_TRIG_SEL_IMMEDIATE,
@@ -830,6 +930,7 @@ static int set_tod_write_overhead(struct idtcm_channel *channel)
ktime_t start;
ktime_t stop;
+ ktime_t diff;
char buf[TOD_BYTE_COUNT] = {0};
@@ -849,7 +950,9 @@ static int set_tod_write_overhead(struct idtcm_channel *channel)
stop = ktime_get_raw();
- current_ns = ktime_to_ns(stop - start);
+ diff = ktime_sub(stop, start);
+
+ current_ns = ktime_to_ns(diff);
if (i == 0) {
lowest_ns = current_ns;
@@ -864,14 +967,14 @@ static int set_tod_write_overhead(struct idtcm_channel *channel)
return err;
}
-static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
+static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
{
int err;
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts;
s64 now;
- if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) {
err = idtcm_do_phase_pull_in(channel, delta, 0);
} else {
idtcm->calculate_overhead_flag = 1;
@@ -891,7 +994,7 @@ static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
ts = ns_to_timespec64(now);
- err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ err = _idtcm_settime_deprecated(channel, &ts);
}
return err;
@@ -899,13 +1002,31 @@ static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta)
static int idtcm_state_machine_reset(struct idtcm *idtcm)
{
- int err;
u8 byte = SM_RESET_CMD;
+ u32 status = 0;
+ int err;
+ u8 i;
+
+ clear_boot_status(idtcm);
err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
- if (!err)
- msleep_interruptible(POST_SM_RESET_DELAY_MS);
+ if (!err) {
+ for (i = 0; i < 30; i++) {
+ msleep_interruptible(100);
+ read_boot_status(idtcm, &status);
+
+ if (status == 0xA0) {
+ dev_dbg(&idtcm->client->dev,
+ "SM_RESET completed in %d ms\n",
+ i * 100);
+ break;
+ }
+ }
+
+ if (!status)
+ dev_err(&idtcm->client->dev, "Timed out waiting for CM_RESET to complete\n");
+ }
return err;
}
@@ -1099,7 +1220,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
rec = (struct idtcm_fwrc *) fw->data;
- if (fw->size > 0)
+ if (contains_full_configuration(fw))
idtcm_state_machine_reset(idtcm);
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
@@ -1151,11 +1272,19 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
bool enable, unsigned int outn)
{
struct idtcm *idtcm = channel->idtcm;
+ int base;
int err;
u8 val;
- err = idtcm_read(idtcm, OUTPUT_MODULE_FROM_INDEX(outn),
- OUT_CTRL_1, &val, sizeof(val));
+ base = get_output_base_addr(outn);
+
+ if (!(base > 0)) {
+ dev_err(&idtcm->client->dev,
+ "%s - Unsupported out%d", __func__, outn);
+ return base;
+ }
+
+ err = idtcm_read(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val));
if (err)
return err;
@@ -1165,8 +1294,7 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
else
val &= ~SQUELCH_DISABLE;
- return idtcm_write(idtcm, OUTPUT_MODULE_FROM_INDEX(outn),
- OUT_CTRL_1, &val, sizeof(val));
+ return idtcm_write(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val));
}
static int idtcm_output_mask_enable(struct idtcm_channel *channel,
@@ -1209,6 +1337,23 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
return idtcm_output_enable(channel, enable, perout->index);
}
+static int idtcm_get_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode *pll_mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+ u8 dpll_mode;
+
+ err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ &dpll_mode, sizeof(dpll_mode));
+ if (err)
+ return err;
+
+ *pll_mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+
+ return 0;
+}
+
static int idtcm_set_pll_mode(struct idtcm_channel *channel,
enum pll_mode pll_mode)
{
@@ -1260,16 +1405,8 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
if (err)
return err;
-
- channel->write_phase_ready = 0;
-
- ptp_schedule_worker(channel->ptp_clock,
- msecs_to_jiffies(WR_PHASE_SETUP_MS));
}
- if (!channel->write_phase_ready)
- delta_ns = 0;
-
offset_ps = (s64)delta_ns * 1000;
/*
@@ -1282,7 +1419,7 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS)
offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS;
- phase_50ps = DIV_ROUND_CLOSEST(div64_s64(offset_ps, 50), 1);
+ phase_50ps = div_s64(offset_ps, 50);
for (i = 0; i < 4; i++) {
buf[i] = phase_50ps & 0xff;
@@ -1299,7 +1436,6 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
{
struct idtcm *idtcm = channel->idtcm;
u8 i;
- bool neg_adj = 0;
int err;
u8 buf[6] = {0};
s64 fcw;
@@ -1323,18 +1459,11 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
* FCW = -------------
* 111 * 2^4
*/
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
/* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */
fcw = scaled_ppm * 244140625ULL;
- fcw = div_u64(fcw, 1776);
-
- if (neg_adj)
- fcw = -fcw;
+ fcw = div_s64(fcw, 1776);
for (i = 0; i < 6; i++) {
buf[i] = fcw & 0xff;
@@ -1369,8 +1498,8 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
return err;
}
-static int idtcm_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
{
struct idtcm_channel *channel =
container_of(ptp, struct idtcm_channel, caps);
@@ -1379,7 +1508,7 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
mutex_lock(&idtcm->reg_lock);
- err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
+ err = _idtcm_settime_deprecated(channel, ts);
if (err)
dev_err(&idtcm->client->dev,
@@ -1392,7 +1521,7 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
return err;
}
-static int idtcm_settime_v487(struct ptp_clock_info *ptp,
+static int idtcm_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct idtcm_channel *channel =
@@ -1402,7 +1531,7 @@ static int idtcm_settime_v487(struct ptp_clock_info *ptp,
mutex_lock(&idtcm->reg_lock);
- err = _idtcm_settime_v487(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
+ err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
if (err)
dev_err(&idtcm->client->dev,
@@ -1415,7 +1544,7 @@ static int idtcm_settime_v487(struct ptp_clock_info *ptp,
return err;
}
-static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
+static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
{
struct idtcm_channel *channel =
container_of(ptp, struct idtcm_channel, caps);
@@ -1424,7 +1553,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
mutex_lock(&idtcm->reg_lock);
- err = _idtcm_adjtime(channel, delta);
+ err = _idtcm_adjtime_deprecated(channel, delta);
if (err)
dev_err(&idtcm->client->dev,
@@ -1437,7 +1566,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
return err;
}
-static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta)
+static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct idtcm_channel *channel =
container_of(ptp, struct idtcm_channel, caps);
@@ -1446,7 +1575,7 @@ static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta)
enum scsr_tod_write_type_sel type;
int err;
- if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_V487) {
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
err = idtcm_do_phase_pull_in(channel, delta, 0);
if (err)
dev_err(&idtcm->client->dev,
@@ -1466,7 +1595,7 @@ static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta)
mutex_lock(&idtcm->reg_lock);
- err = _idtcm_settime_v487(channel, &ts, type);
+ err = _idtcm_settime(channel, &ts, type);
if (err)
dev_err(&idtcm->client->dev,
@@ -1810,10 +1939,14 @@ static int idtcm_enable_tod(struct idtcm_channel *channel)
if (err)
return err;
- return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB);
+ if (idtcm->deprecated)
+ return _idtcm_settime_deprecated(channel, &ts);
+ else
+ return _idtcm_settime(channel, &ts,
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
}
-static void idtcm_display_version_info(struct idtcm *idtcm)
+static void idtcm_set_version_info(struct idtcm *idtcm)
{
u8 major;
u8 minor;
@@ -1835,34 +1968,37 @@ static void idtcm_display_version_info(struct idtcm *idtcm)
snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
major, minor, hotfix);
+ if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
+ idtcm->deprecated = 0;
+ else
+ idtcm->deprecated = 1;
+
dev_info(&idtcm->client->dev, fmt, major, minor, hotfix,
product_id, hw_rev_id, config_select);
}
-static const struct ptp_clock_info idtcm_caps_v487 = {
+static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
- .adjtime = &idtcm_adjtime_v487,
+ .adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
- .settime64 = &idtcm_settime_v487,
+ .settime64 = &idtcm_settime,
.enable = &idtcm_enable,
- .do_aux_work = &set_write_phase_ready,
};
-static const struct ptp_clock_info idtcm_caps = {
+static const struct ptp_clock_info idtcm_caps_deprecated = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
- .adjtime = &idtcm_adjtime,
+ .adjtime = &idtcm_adjtime_deprecated,
.gettime64 = &idtcm_gettime,
- .settime64 = &idtcm_settime,
+ .settime64 = &idtcm_settime_deprecated,
.enable = &idtcm_enable,
- .do_aux_work = &set_write_phase_ready,
};
static int configure_channel_pll(struct idtcm_channel *channel)
@@ -1984,15 +2120,15 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
channel->idtcm = idtcm;
- if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
- channel->caps = idtcm_caps_v487;
+ if (idtcm->deprecated)
+ channel->caps = idtcm_caps_deprecated;
else
channel->caps = idtcm_caps;
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT CM TOD%u", index);
- if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) {
+ if (!idtcm->deprecated) {
err = idtcm_enable_tod_sync(channel);
if (err) {
dev_err(&idtcm->client->dev,
@@ -2003,12 +2139,11 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
}
}
- err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ /* Sync pll mode with hardware */
+ err = idtcm_get_pll_mode(channel, &channel->pll_mode);
if (err) {
dev_err(&idtcm->client->dev,
- "Failed at line %d in func %s!\n",
- __LINE__,
- __func__);
+ "Error: %s - Unable to read pll mode\n", __func__);
return err;
}
@@ -2032,8 +2167,6 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
- channel->write_phase_ready = 0;
-
dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
@@ -2094,7 +2227,7 @@ static int idtcm_probe(struct i2c_client *client,
mutex_init(&idtcm->reg_lock);
mutex_lock(&idtcm->reg_lock);
- idtcm_display_version_info(idtcm);
+ idtcm_set_version_info(idtcm);
err = idtcm_load_firmware(idtcm, &client->dev);
@@ -2102,6 +2235,9 @@ static int idtcm_probe(struct i2c_client *client,
dev_warn(&idtcm->client->dev,
"loading firmware failed with %d\n", err);
+ if (wait_for_boot_status_ready(idtcm))
+ dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0\n");
+
if (idtcm->tod_mask) {
for (i = 0; i < MAX_TOD; i++) {
if (idtcm->tod_mask & (1 << i)) {
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index 82840d72364a..645de2c66b64 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -15,6 +15,7 @@
#define FW_FILENAME "idtcm.bin"
#define MAX_TOD (4)
#define MAX_PLL (8)
+#define MAX_OUTPUT (12)
#define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL)
@@ -44,18 +45,20 @@
#define DEFAULT_TOD2_PTP_PLL (2)
#define DEFAULT_TOD3_PTP_PLL (3)
-#define POST_SM_RESET_DELAY_MS (3000)
-#define PHASE_PULL_IN_THRESHOLD_NS (150000)
-#define PHASE_PULL_IN_THRESHOLD_NS_V487 (15000)
-#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
-#define TOD_BYTE_COUNT (11)
-#define WR_PHASE_SETUP_MS (5000)
+#define POST_SM_RESET_DELAY_MS (3000)
+#define PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED (150000)
+#define PHASE_PULL_IN_THRESHOLD_NS (15000)
+#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
+#define TOD_BYTE_COUNT (11)
-#define OUTPUT_MODULE_FROM_INDEX(index) (OUTPUT_0 + (index) * 0x10)
+#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
-#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+#define IDTCM_MAX_WRITE_COUNT (512)
-#define IDTCM_MAX_WRITE_COUNT (512)
+#define FULL_FW_CFG_BYTES (SCRATCH - GPIO_USER_CONTROL)
+#define FULL_FW_CFG_SKIPPED_BYTES (((SCRATCH >> 7) \
+ - (GPIO_USER_CONTROL >> 7)) \
+ * 4) /* 4 bytes skipped every 0x80 */
/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
enum pll_mode {
@@ -120,7 +123,7 @@ struct idtcm_channel {
enum pll_mode pll_mode;
u8 pll;
u16 output_mask;
- int write_phase_ready;
+ u8 output_phase_adj[MAX_OUTPUT][4];
};
struct idtcm {
@@ -129,6 +132,7 @@ struct idtcm {
u8 page_offset;
u8 tod_mask;
char version[16];
+ u8 deprecated;
/* Overhead calculation for adjtime */
u8 calculate_overhead_flag;
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 2c842851d72e..fef3573fdc8b 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -69,7 +69,7 @@ bool __rxrpc_set_call_completion(struct rxrpc_call *call,
if (call->state < RXRPC_CALL_COMPLETE) {
call->abort_code = abort_code;
call->error = error;
- call->completion = compl,
+ call->completion = compl;
call->state = RXRPC_CALL_COMPLETE;
trace_rxrpc_call_complete(call);
wake_up(&call->waitq);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 8a4542387bbd..1e8ab4749c6c 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -813,7 +813,7 @@ config NET_ACT_SAMPLE
config NET_ACT_IPT
tristate "IPtables targets"
- depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES
help
Say Y here to be able to invoke iptables targets after successful
classification.
@@ -912,7 +912,7 @@ config NET_ACT_BPF
config NET_ACT_CONNMARK
tristate "Netfilter Connection Mark Retriever"
- depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NET_CLS_ACT && NETFILTER
depends on NF_CONNTRACK && NF_CONNTRACK_MARK
help
Say Y here to allow retrieving of conn mark
@@ -924,7 +924,7 @@ config NET_ACT_CONNMARK
config NET_ACT_CTINFO
tristate "Netfilter Connection Mark Actions"
- depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
+ depends on NET_CLS_ACT && NETFILTER
depends on NF_CONNTRACK && NF_CONNTRACK_MARK
help
Say Y here to allow transfer of a connmark stored information.