Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts!

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-timecard b/Documentation/ABI/testing/sysfs-timecard
new file mode 100644
index 0000000..97f6773
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-timecard
@@ -0,0 +1,174 @@
+What:		/sys/class/timecard/
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This directory contains files and directories
+		providing a standardized interface to the ancillary
+		features of the OpenCompute timecard.
+
+What:		/sys/class/timecard/ocpN/
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This directory contains the attributes of the Nth timecard
+		registered.
+
+What:		/sys/class/timecard/ocpN/available_clock_sources
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RO) The list of available time sources that the PHC
+		uses for clock adjustments.
+
+		====  =================================================
+                NONE  no adjustments
+                PPS   adjustments come from the PPS1 selector (default)
+                TOD   adjustments from the GNSS/TOD module
+                IRIG  adjustments from external IRIG-B signal
+                DCF   adjustments from external DCF signal
+                ====  =================================================
+
+What:		/sys/class/timecard/ocpN/available_sma_inputs
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RO) Set of available destinations (sinks) for a SMA
+		input signal.
+
+                =====  ================================================
+                10Mhz  signal is used as the 10Mhz reference clock
+                PPS1   signal is sent to the PPS1 selector
+                PPS2   signal is sent to the PPS2 selector
+                TS1    signal is sent to timestamper 1
+                TS2    signal is sent to timestamper 2
+                IRIG   signal is sent to the IRIG-B module
+                DCF    signal is sent to the DCF module
+                =====  ================================================
+
+What:		/sys/class/timecard/ocpN/available_sma_outputs
+Date:		May 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RO) Set of available sources for a SMA output signal.
+
+                =====  ================================================
+                10Mhz  output is from the 10Mhz reference clock
+                PHC    output PPS is from the PHC clock
+                MAC    output PPS is from the Miniature Atomic Clock
+                GNSS   output PPS is from the GNSS module
+                GNSS2  output PPS is from the second GNSS module
+                IRIG   output is from the PHC, in IRIG-B format
+                DCF    output is from the PHC, in DCF format
+                =====  ================================================
+
+What:		/sys/class/timecard/ocpN/clock_source
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RW) Contains the current synchronization source used by
+		the PHC.  May be changed by writing one of the listed
+		values from the available_clock_sources attribute set.
+
+What:		/sys/class/timecard/ocpN/gnss_sync
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RO) Indicates whether a valid GNSS signal is received,
+		or when the signal was lost.
+
+What:		/sys/class/timecard/ocpN/i2c
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This optional attribute links to the associated i2c device.
+
+What:		/sys/class/timecard/ocpN/irig_b_mode
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RW) An integer from 0-7 indicating the timecode format
+		of the IRIG-B output signal: B00<n>
+
+What:		/sys/class/timecard/ocpN/pps
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This optional attribute links to the associated PPS device.
+
+What:		/sys/class/timecard/ocpN/ptp
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This attribute links to the associated PTP device.
+
+What:		/sys/class/timecard/ocpN/serialnum
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RO) Provides the serial number of the timecard.
+
+What:		/sys/class/timecard/ocpN/sma1
+What:		/sys/class/timecard/ocpN/sma2
+What:		/sys/class/timecard/ocpN/sma3
+What:		/sys/class/timecard/ocpN/sma4
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RW) These attributes specify the direction of the signal
+		on the associated SMA connectors, and also the signal sink
+		or source.
+
+		The display format of the attribute is a space separated
+		list of signals, prefixed by the input/output direction.
+
+		The signal direction may be changed (if supported) by
+		prefixing the signal list with either "in:" or "out:".
+		If neither prefix is present, then the direction is unchanged.
+
+		The output signal may be changed by writing one of the listed
+		values from the available_sma_outputs attribute set.
+
+		The input destinations may be changed by writing multiple
+		values from the available_sma_inputs attribute set,
+		separated by spaces.  If there are duplicated input
+		destinations between connectors, the lowest numbered SMA
+		connector is given priority.
+
+		Note that not all input combinations may make sense.
+
+		The 10Mhz reference clock input is currently only valid
+		on SMA1 and may not be combined with other destination sinks.
+
+What:		/sys/class/timecard/ocpN/ts_window_adjust
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RW) When retrieving the PHC with the PTP SYS_OFFSET_EXTENDED
+		ioctl, a system timestamp is made before and after the PHC
+		time is retrieved.  The midpoint between the two system
+		timestamps is usually taken to be the SYS time associated
+		with the PHC time.  This estimate may be wrong, as it depends
+		on PCI latencies, and when the PHC time was latched
+
+		The attribute value reduces the end timestamp by the given
+		number of nanoseconds, so the computed midpoint matches the
+		retrieved PHC time.
+
+		The initial value is set based on measured PCI latency and
+		the estimated point where the FPGA latches the PHC time.  This
+		value may be changed by writing an unsigned integer.
+
+What:		/sys/class/timecard/ocpN/ttyGNSS
+What:		/sys/class/timecard/ocpN/ttyGNSS2
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	These optional attributes link to the TTY serial ports
+		associated with the GNSS devices.
+
+What:		/sys/class/timecard/ocpN/ttyMAC
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This optional attribute links to the TTY serial port
+		associated with the Miniature Atomic Clock.
+
+What:		/sys/class/timecard/ocpN/ttyNMEA
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	This optional attribute links to the TTY serial port
+		which outputs the PHC time in NMEA ZDA format.
+
+What:		/sys/class/timecard/ocpN/utc_tai_offset
+Date:		September 2021
+Contact:	Jonathan Lemon <jonathan.lemon@gmail.com>
+Description:	(RW) The DCF and IRIG output signals are in UTC, while the
+		TimeCard operates on TAI.  This attribute allows setting the
+		offset in seconds, which is added to the TAI timebase for
+		these formats.
+
+		The offset may be changed by writing an unsigned integer.
diff --git a/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml b/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml
new file mode 100644
index 0000000..437502c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/lantiq,etop-xway.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq Xway ETOP Ethernet driver
+
+maintainers:
+  - John Crispin <john@phrozen.org>
+
+properties:
+  $nodename:
+    pattern: "^ethernet@[0-9a-f]+$"
+
+  compatible:
+    const: lantiq,etop-xway
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    items:
+      - description: TX interrupt
+      - description: RX interrupt
+
+  interrupt-names:
+    items:
+      - const: tx
+      - const: rx
+
+  lantiq,tx-burst-length:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: |
+      TX programmable burst length.
+    enum: [2, 4, 8]
+
+  lantiq,rx-burst-length:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: |
+      RX programmable burst length.
+    enum: [2, 4, 8]
+
+  phy-mode: true
+
+required:
+  - compatible
+  - reg
+  - interrupt-parent
+  - interrupts
+  - interrupt-names
+  - lantiq,tx-burst-length
+  - lantiq,rx-burst-length
+  - phy-mode
+
+additionalProperties: false
+
+examples:
+  - |
+    ethernet@e180000 {
+        compatible = "lantiq,etop-xway";
+        reg = <0xe180000 0x40000>;
+        interrupt-parent = <&icu0>;
+        interrupts = <73>, <78>;
+        interrupt-names = "tx", "rx";
+        lantiq,tx-burst-length = <8>;
+        lantiq,rx-burst-length = <8>;
+        phy-mode = "rmii";
+    };
diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt
deleted file mode 100644
index 5ff5e68..0000000
--- a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Lantiq xRX200 GSWIP PMAC Ethernet driver
-==================================
-
-Required properties:
-
-- compatible	: "lantiq,xrx200-net" for the PMAC of the embedded
-		: GSWIP in the xXR200
-- reg		: memory range of the PMAC core inside of the GSWIP core
-- interrupts	: TX and RX DMA interrupts. Use interrupt-names "tx" for
-		: the TX interrupt and "rx" for the RX interrupt.
-
-Example:
-
-ethernet@e10b308 {
-	#address-cells = <1>;
-	#size-cells = <0>;
-	compatible = "lantiq,xrx200-net";
-	reg = <0xe10b308 0xcf8>;
-	interrupts = <73>, <72>;
-	interrupt-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml
new file mode 100644
index 0000000..16d831f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/lantiq,xrx200-net.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq xRX200 GSWIP PMAC Ethernet driver
+
+maintainers:
+  - Hauke Mehrtens <hauke@hauke-m.de>
+
+properties:
+  $nodename:
+    pattern: "^ethernet@[0-9a-f]+$"
+
+  compatible:
+    const: lantiq,xrx200-net
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    items:
+      - description: TX interrupt
+      - description: RX interrupt
+
+  interrupt-names:
+    items:
+      - const: tx
+      - const: rx
+
+  lantiq,tx-burst-length:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: |
+      TX programmable burst length.
+    enum: [2, 4, 8]
+
+  lantiq,rx-burst-length:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: |
+      RX programmable burst length.
+    enum: [2, 4, 8]
+
+  '#address-cells':
+    const: 1
+
+  '#size-cells':
+    const: 0
+
+required:
+  - compatible
+  - reg
+  - interrupt-parent
+  - interrupts
+  - interrupt-names
+  - lantiq,tx-burst-length
+  - lantiq,rx-burst-length
+  - "#address-cells"
+  - "#size-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    ethernet@e10b308 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        compatible = "lantiq,xrx200-net";
+        reg = <0xe10b308 0xcf8>;
+        interrupt-parent = <&icu0>;
+        interrupts = <73>, <72>;
+        interrupt-names = "tx", "rx";
+        lantiq,tx-burst-length = <8>;
+        lantiq,rx-burst-length = <8>;
+    };
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 8218a13..31ca915 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -45,6 +45,6 @@
 extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch);
 extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch);
 extern void ltq_dma_free(struct ltq_dma_channel *ch);
-extern void ltq_dma_init_port(int p);
+extern void ltq_dma_init_port(int p, int tx_burst, int rx_burst);
 
 #endif
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 63dccb2..f8eedeb 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -11,6 +11,7 @@
 #include <linux/export.h>
 #include <linux/spinlock.h>
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/of.h>
 
@@ -30,6 +31,7 @@
 #define LTQ_DMA_PCTRL		0x44
 #define LTQ_DMA_IRNEN		0xf4
 
+#define DMA_ID_CHNR		GENMASK(26, 20)	/* channel number */
 #define DMA_DESCPT		BIT(3)		/* descriptor complete irq */
 #define DMA_TX			BIT(8)		/* TX channel direction */
 #define DMA_CHAN_ON		BIT(0)		/* channel on / off bit */
@@ -39,8 +41,11 @@
 #define DMA_IRQ_ACK		0x7e		/* IRQ status register */
 #define DMA_POLL		BIT(31)		/* turn on channel polling */
 #define DMA_CLK_DIV4		BIT(6)		/* polling clock divider */
-#define DMA_2W_BURST		BIT(1)		/* 2 word burst length */
-#define DMA_MAX_CHANNEL		20		/* the soc has 20 channels */
+#define DMA_PCTRL_2W_BURST	0x1		/* 2 word burst length */
+#define DMA_PCTRL_4W_BURST	0x2		/* 4 word burst length */
+#define DMA_PCTRL_8W_BURST	0x3		/* 8 word burst length */
+#define DMA_TX_BURST_SHIFT	4		/* tx burst shift */
+#define DMA_RX_BURST_SHIFT	2		/* rx burst shift */
 #define DMA_ETOP_ENDIANNESS	(0xf << 8) /* endianness swap etop channels */
 #define DMA_WEIGHT	(BIT(17) | BIT(16))	/* default channel wheight */
 
@@ -177,7 +182,7 @@
 EXPORT_SYMBOL_GPL(ltq_dma_free);
 
 void
-ltq_dma_init_port(int p)
+ltq_dma_init_port(int p, int tx_burst, int rx_burst)
 {
 	ltq_dma_w32(p, LTQ_DMA_PS);
 	switch (p) {
@@ -186,15 +191,44 @@
 		 * Tell the DMA engine to swap the endianness of data frames and
 		 * drop packets if the channel arbitration fails.
 		 */
-		ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN,
+		ltq_dma_w32_mask(0, (DMA_ETOP_ENDIANNESS | DMA_PDEN),
 			LTQ_DMA_PCTRL);
 		break;
 
-	case DMA_PORT_DEU:
-		ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2),
+	default:
+		break;
+	}
+
+	switch (rx_burst) {
+	case 8:
+		ltq_dma_w32_mask(0x0c, (DMA_PCTRL_8W_BURST << DMA_RX_BURST_SHIFT),
 			LTQ_DMA_PCTRL);
 		break;
+	case 4:
+		ltq_dma_w32_mask(0x0c, (DMA_PCTRL_4W_BURST << DMA_RX_BURST_SHIFT),
+			LTQ_DMA_PCTRL);
+		break;
+	case 2:
+		ltq_dma_w32_mask(0x0c, (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT),
+			LTQ_DMA_PCTRL);
+		break;
+	default:
+		break;
+	}
 
+	switch (tx_burst) {
+	case 8:
+		ltq_dma_w32_mask(0x30, (DMA_PCTRL_8W_BURST << DMA_TX_BURST_SHIFT),
+			LTQ_DMA_PCTRL);
+		break;
+	case 4:
+		ltq_dma_w32_mask(0x30, (DMA_PCTRL_4W_BURST << DMA_TX_BURST_SHIFT),
+			LTQ_DMA_PCTRL);
+		break;
+	case 2:
+		ltq_dma_w32_mask(0x30, (DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT),
+			LTQ_DMA_PCTRL);
+		break;
 	default:
 		break;
 	}
@@ -206,7 +240,7 @@
 {
 	struct clk *clk;
 	struct resource *res;
-	unsigned id;
+	unsigned int id, nchannels;
 	int i;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -222,21 +256,24 @@
 	clk_enable(clk);
 	ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
 
+	usleep_range(1, 10);
+
 	/* disable all interrupts */
 	ltq_dma_w32(0, LTQ_DMA_IRNEN);
 
 	/* reset/configure each channel */
-	for (i = 0; i < DMA_MAX_CHANNEL; i++) {
+	id = ltq_dma_r32(LTQ_DMA_ID);
+	nchannels = ((id & DMA_ID_CHNR) >> 20);
+	for (i = 0; i < nchannels; i++) {
 		ltq_dma_w32(i, LTQ_DMA_CS);
 		ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
 		ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
 		ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
 	}
 
-	id = ltq_dma_r32(LTQ_DMA_ID);
 	dev_info(&pdev->dev,
 		"Init done - hw rev: %X, ports: %d, channels: %d\n",
-		id & 0x1f, (id >> 16) & 0xf, id >> 20);
+		id & 0x1f, (id >> 16) & 0xf, nchannels);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 54cdafd..9acf589 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -151,10 +151,9 @@
 	data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset",
 						   GPIOD_OUT_LOW);
 	if (IS_ERR(data->reset_gpio)) {
-		error = PTR_ERR(data->reset_gpio);
-		dev_err(priv->dev, "Failed to request gpio: %d\n", error);
 		mdiobus_free(bus);
-		return error;
+		return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio),
+				     "Failed to request gpio\n");
 	}
 
 	of_property_read_u32(np, "phy-reset-duration", &data->msec);
@@ -166,9 +165,9 @@
 
 	error = of_mdiobus_register(bus, priv->dev->of_node);
 	if (error) {
-		dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
 		mdiobus_free(bus);
-		return error;
+		return dev_err_probe(priv->dev, error,
+				     "cannot register MDIO bus %s\n", bus->name);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 3b51b17..20c032a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2662,10 +2662,8 @@
 
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
 	err = pci_enable_device_mem(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "cannot enable PCI device\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
 
 	/*
 	 * The atl1c chip can DMA to 64-bit addresses, but it uses a single
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 753973a..2e22483 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2297,10 +2297,8 @@
 	int err = 0;
 
 	err = pci_enable_device(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "cannot enable PCI device\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
 
 	/*
 	 * The atl1e chip can DMA to 64-bit addresses, but it uses a single
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 691e147..b3d7d1a 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1311,9 +1311,8 @@
 
 	err = pci_enable_device(pdev);
 	if (err) {
-		dev_err(dev, "Failed to enable PCI device\n");
 		pci_set_drvdata(pdev, NULL);
-		return err;
+		return dev_err_probe(dev, err, "Failed to enable PCI device\n");
 	}
 
 	err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d1667b7..2b87565 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2119,10 +2119,8 @@
 	}
 
 	err = pci_enable_device(pdev);
-	if (err) {
-		dev_err(dev, "Failed to enable PCI device\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(dev, err, "Failed to enable PCI device\n");
 
 	err = pci_request_regions(pdev, DRV_NAME);
 	if (err) {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c36fed9..db66d4b 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1597,9 +1597,8 @@
 
 	err = pcim_enable_device(pdev);
 	if (err) {
-		dev_err(dev, "Failed to enable PCI device\n");
 		pci_set_drvdata(pdev, NULL);
-		return err;
+		return dev_err_probe(dev, err, "Failed to enable PCI device\n");
 	}
 
 	err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 49b76fd..4920a80 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2902,10 +2902,8 @@
 	 * Initialize generic PCI device state.
 	 */
 	err = pci_enable_device(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "cannot enable PCI device\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
 
 	/*
 	 * Reserve PCI resources for the device.  If we can't get them some
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ed1ed48..0064ebd 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -707,20 +707,16 @@
 	else
 		phy = phy_find_first(priv->mdio);
 
-	if (!phy) {
-		dev_err(&dev->dev, "no PHY found\n");
-		return -ENXIO;
-	}
+	if (!phy)
+		return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n");
 
 	priv->old_duplex = -1;
 	priv->old_link = -1;
 
 	err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
 				 PHY_INTERFACE_MODE_GMII);
-	if (err) {
-		dev_err(&dev->dev, "could not attach to PHY\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&dev->dev, err, "could not attach to PHY\n");
 
 	phy_set_max_speed(phy, SPEED_100);
 
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3ca93ad..0c69409 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2610,10 +2610,8 @@
 
 	pcie_flr(pdev);
 	err = pci_enable_device_mem(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "device enable failed\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "device enable failed\n");
 
 	/* set up for high or low dma */
 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 60d94e0..f0ff950 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -804,10 +804,8 @@
 	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
 
 	err = of_mdiobus_register(bus, np);
-	if (err) {
-		dev_err(dev, "cannot register MDIO bus\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(dev, err, "cannot register MDIO bus\n");
 
 	pf->mdio = bus;
 
@@ -1216,10 +1214,8 @@
 			 ERR_PTR(err));
 
 	err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
-	if (err) {
-		dev_err(&pdev->dev, "PCI probing failed\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
 
 	si = pci_get_drvdata(pdev);
 	if (!si->hw.port || !si->hw.global) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index bc59489..36b4f51 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -39,10 +39,8 @@
 	}
 
 	err = pci_enable_device_mem(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "device enable failed\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "device enable failed\n");
 
 	/* set up for high or low dma */
 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 1a9d1e8..0704f6b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -143,10 +143,8 @@
 	int err;
 
 	err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
-	if (err) {
-		dev_err(&pdev->dev, "PCI probing failed\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
 
 	si = pci_get_drvdata(pdev);
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 546a605..6ccb010 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -95,6 +95,7 @@
 	HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
 	HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
 	HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+	HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
 };
 
 #define hnae3_dev_fd_supported(hdev) \
@@ -151,6 +152,9 @@
 #define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
 	test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
 
+#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
+	test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+
 enum HNAE3_PF_CAP_BITS {
 	HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
 };
@@ -341,6 +345,8 @@
 	u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
 	u16 max_frm_size;
 	u16 max_qset_num;
+	u16 umv_size;
+	u16 mc_mac_size;
 };
 
 struct hnae3_client_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 2b66c59..a1555f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -924,6 +924,10 @@
 			  dev_specs->max_tm_rate);
 	*pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
 			  dev_specs->max_qset_num);
+	*pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+			  dev_specs->umv_size);
+	*pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+			  dev_specs->mc_mac_size);
 }
 
 static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 3324447..bfcfefa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1188,7 +1188,10 @@
 	__le16 max_frm_size;
 	__le16 max_qset_num;
 	__le16 max_int_gl;
-	u8 rsv1[18];
+	u8 rsv0[2];
+	__le16 umv_size;
+	__le16 mc_mac_size;
+	u8 rsv1[12];
 };
 
 /* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 87d96f82c..a983d01 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1972,6 +1972,9 @@
 	}
 	mutex_unlock(&hdev->vport_lock);
 
+	pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num  : %u\n",
+			 hdev->used_mc_mac_num);
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f1e46ba..a0d0fa4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1342,8 +1342,6 @@
 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
 					 HCLGE_CFG_UMV_TBL_SPACE_M,
 					 HCLGE_CFG_UMV_TBL_SPACE_S);
-	if (!cfg->umv_space)
-		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
 
 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
 					       HCLGE_CFG_PF_RSS_SIZE_M,
@@ -1419,6 +1417,7 @@
 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+	ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
 }
 
 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1440,6 +1439,8 @@
 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+	ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+	ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
 }
 
 static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1460,6 +1461,8 @@
 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
 	if (!dev_specs->max_frm_size)
 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+	if (!dev_specs->umv_size)
+		dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
 }
 
 static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -1549,7 +1552,10 @@
 	hdev->tm_info.num_pg = 1;
 	hdev->tc_max = cfg.tc_num;
 	hdev->tm_info.hw_pfc_map = 0;
-	hdev->wanted_umv_size = cfg.umv_space;
+	if (cfg.umv_space)
+		hdev->wanted_umv_size = cfg.umv_space;
+	else
+		hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
 	hdev->gro_en = true;
 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
@@ -8478,6 +8484,9 @@
 	hdev->share_umv_size = hdev->priv_umv_size +
 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
 
+	if (hdev->ae_dev->dev_specs.mc_mac_size)
+		set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
 	return 0;
 }
 
@@ -8495,6 +8504,8 @@
 	hdev->share_umv_size = hdev->priv_umv_size +
 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
 	mutex_unlock(&hdev->vport_lock);
+
+	hdev->used_mc_mac_num = 0;
 }
 
 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
@@ -8756,6 +8767,7 @@
 	struct hclge_dev *hdev = vport->back;
 	struct hclge_mac_vlan_tbl_entry_cmd req;
 	struct hclge_desc desc[3];
+	bool is_new_addr = false;
 	int status;
 
 	/* mac addr check */
@@ -8769,6 +8781,13 @@
 	hclge_prepare_mac_addr(&req, addr, true);
 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
 	if (status) {
+		if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+		    hdev->used_mc_mac_num >=
+		    hdev->ae_dev->dev_specs.mc_mac_size)
+			goto err_no_space;
+
+		is_new_addr = true;
+
 		/* This mac addr do not exist, add new entry for it */
 		memset(desc[0].data, 0, sizeof(desc[0].data));
 		memset(desc[1].data, 0, sizeof(desc[0].data));
@@ -8778,12 +8797,18 @@
 	if (status)
 		return status;
 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
-	/* if already overflow, not to print each time */
-	if (status == -ENOSPC &&
-	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
-		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+	if (status == -ENOSPC)
+		goto err_no_space;
+	else if (!status && is_new_addr)
+		hdev->used_mc_mac_num++;
 
 	return status;
+
+err_no_space:
+	/* if already overflow, not to print each time */
+	if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+	return -ENOSPC;
 }
 
 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@ -8820,12 +8845,15 @@
 		if (status)
 			return status;
 
-		if (hclge_is_all_function_id_zero(desc))
+		if (hclge_is_all_function_id_zero(desc)) {
 			/* All the vfid is zero, so need to delete this entry */
 			status = hclge_remove_mac_vlan_tbl(vport, &req);
-		else
+			if (!status)
+				hdev->used_mc_mac_num--;
+		} else {
 			/* Not all the vfid is zero, update the vfid */
 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+		}
 	} else if (status == -ENOENT) {
 		status = 0;
 	}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index de6afbc..ca25e2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -938,6 +938,8 @@
 	u16 priv_umv_size;
 	/* unicast mac vlan space shared by PF and its VFs */
 	u16 share_umv_size;
+	/* multicast mac address number used by PF and its VFs */
+	u16 used_mc_mac_num;
 
 	DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
 		      HCLGE_MAC_TNL_LOG_SIZE);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index ae707e3..9965e8d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -1379,10 +1379,8 @@
 {
 	int err = pci_enable_device(pdev);
 
-	if (err) {
-		dev_err(&pdev->dev, "Failed to enable PCI device\n");
-		return err;
-	}
+	if (err)
+		return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
 
 	err = pci_request_regions(pdev, HINIC_DRV_NAME);
 	if (err) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a4579b3..8f17096 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -108,6 +108,8 @@
 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
 					 struct ibmvnic_sub_crq_queue *tx_scrq);
+static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+				struct ibmvnic_long_term_buff *ltb);
 
 struct ibmvnic_stat {
 	char name[ETH_GSTRING_LEN];
@@ -214,22 +216,77 @@
 	return -ETIMEDOUT;
 }
 
+/**
+ * reuse_ltb() - Check if a long term buffer can be reused
+ * @ltb:  The long term buffer to be checked
+ * @size: The size of the long term buffer.
+ *
+ * An LTB can be reused unless its size has changed.
+ *
+ * Return: Return true if the LTB can be reused, false otherwise.
+ */
+static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
+{
+	return (ltb->buff && ltb->size == size);
+}
+
+/**
+ * alloc_long_term_buff() - Allocate a long term buffer (LTB)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb:     container object for the LTB
+ * @size:    size of the LTB
+ *
+ * Allocate an LTB of the specified size and notify VIOS.
+ *
+ * If the given @ltb already has the correct size, reuse it. Otherwise if
+ * its non-NULL, free it. Then allocate a new one of the correct size.
+ * Notify the VIOS either way since we may now be working with a new VIOS.
+ *
+ * Allocating larger chunks of memory during resets, specially LPM or under
+ * low memory situations can cause resets to fail/timeout and for LPAR to
+ * lose connectivity. So hold onto the LTB even if we fail to communicate
+ * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
+ *
+ * Return: 0 if we were able to allocate the LTB and notify the VIOS and
+ *	   a negative value otherwise.
+ */
 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
 				struct ibmvnic_long_term_buff *ltb, int size)
 {
 	struct device *dev = &adapter->vdev->dev;
 	int rc;
 
-	ltb->size = size;
-	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
-				       GFP_KERNEL);
-
-	if (!ltb->buff) {
-		dev_err(dev, "Couldn't alloc long term buffer\n");
-		return -ENOMEM;
+	if (!reuse_ltb(ltb, size)) {
+		dev_dbg(dev,
+			"LTB size changed from 0x%llx to 0x%x, reallocating\n",
+			 ltb->size, size);
+		free_long_term_buff(adapter, ltb);
 	}
-	ltb->map_id = adapter->map_id;
-	adapter->map_id++;
+
+	if (ltb->buff) {
+		dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
+			ltb->map_id, ltb->size);
+	} else {
+		ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
+					       GFP_KERNEL);
+		if (!ltb->buff) {
+			dev_err(dev, "Couldn't alloc long term buffer\n");
+			return -ENOMEM;
+		}
+		ltb->size = size;
+
+		ltb->map_id = find_first_zero_bit(adapter->map_ids,
+						  MAX_MAP_ID);
+		bitmap_set(adapter->map_ids, ltb->map_id, 1);
+
+		dev_dbg(dev,
+			"Allocated new LTB [map %d, size 0x%llx]\n",
+			 ltb->map_id, ltb->size);
+	}
+
+	/* Ensure ltb is zeroed - specially when reusing it. */
+	memset(ltb->buff, 0, ltb->size);
 
 	mutex_lock(&adapter->fw_lock);
 	adapter->fw_done_rc = 0;
@@ -243,24 +300,20 @@
 
 	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
 	if (rc) {
-		dev_err(dev,
-			"Long term map request aborted or timed out,rc = %d\n",
+		dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
 			rc);
 		goto out;
 	}
 
 	if (adapter->fw_done_rc) {
-		dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+		dev_err(dev, "Couldn't map LTB, rc = %d\n",
 			adapter->fw_done_rc);
 		rc = -1;
 		goto out;
 	}
 	rc = 0;
 out:
-	if (rc) {
-		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
-		ltb->buff = NULL;
-	}
+	/* don't free LTB on communication error - see function header */
 	mutex_unlock(&adapter->fw_lock);
 	return rc;
 }
@@ -281,48 +334,15 @@
 	    adapter->reset_reason != VNIC_RESET_MOBILITY &&
 	    adapter->reset_reason != VNIC_RESET_TIMEOUT)
 		send_request_unmap(adapter, ltb->map_id);
+
 	dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+
 	ltb->buff = NULL;
+	/* mark this map_id free */
+	bitmap_clear(adapter->map_ids, ltb->map_id, 1);
 	ltb->map_id = 0;
 }
 
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
-				struct ibmvnic_long_term_buff *ltb)
-{
-	struct device *dev = &adapter->vdev->dev;
-	int rc;
-
-	memset(ltb->buff, 0, ltb->size);
-
-	mutex_lock(&adapter->fw_lock);
-	adapter->fw_done_rc = 0;
-
-	reinit_completion(&adapter->fw_done);
-	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
-	if (rc) {
-		mutex_unlock(&adapter->fw_lock);
-		return rc;
-	}
-
-	rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
-	if (rc) {
-		dev_info(dev,
-			 "Reset failed, long term map request timed out or aborted\n");
-		mutex_unlock(&adapter->fw_lock);
-		return rc;
-	}
-
-	if (adapter->fw_done_rc) {
-		dev_info(dev,
-			 "Reset failed, attempting to free and reallocate buffer\n");
-		free_long_term_buff(adapter, ltb);
-		mutex_unlock(&adapter->fw_lock);
-		return alloc_long_term_buff(adapter, ltb, ltb->size);
-	}
-	mutex_unlock(&adapter->fw_lock);
-	return 0;
-}
-
 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
 {
 	int i;
@@ -363,31 +383,41 @@
 	 * be 0.
 	 */
 	for (i = ind_bufp->index; i < count; ++i) {
-		skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
-		if (!skb) {
-			dev_err(dev, "Couldn't replenish rx buff\n");
-			adapter->replenish_no_mem++;
-			break;
-		}
-
 		index = pool->free_map[pool->next_free];
 
-		if (pool->rx_buff[index].skb)
-			dev_err(dev, "Inconsistent free_map!\n");
+		/* We maybe reusing the skb from earlier resets. Allocate
+		 * only if necessary. But since the LTB may have changed
+		 * during reset (see init_rx_pools()), update LTB below
+		 * even if reusing skb.
+		 */
+		skb = pool->rx_buff[index].skb;
+		if (!skb) {
+			skb = netdev_alloc_skb(adapter->netdev,
+					       pool->buff_size);
+			if (!skb) {
+				dev_err(dev, "Couldn't replenish rx buff\n");
+				adapter->replenish_no_mem++;
+				break;
+			}
+		}
+
+		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+		pool->next_free = (pool->next_free + 1) % pool->size;
 
 		/* Copy the skb to the long term mapped DMA buffer */
 		offset = index * pool->buff_size;
 		dst = pool->long_term_buff.buff + offset;
 		memset(dst, 0, pool->buff_size);
 		dma_addr = pool->long_term_buff.addr + offset;
-		pool->rx_buff[index].data = dst;
 
-		pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+		/* add the skb to an rx_buff in the pool */
+		pool->rx_buff[index].data = dst;
 		pool->rx_buff[index].dma = dma_addr;
 		pool->rx_buff[index].skb = skb;
 		pool->rx_buff[index].pool_index = pool->index;
 		pool->rx_buff[index].size = pool->buff_size;
 
+		/* queue the rx_buff for the next send_subcrq_indirect */
 		sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
 		memset(sub_crq, 0, sizeof(*sub_crq));
 		sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
@@ -405,7 +435,8 @@
 		shift = 8;
 #endif
 		sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
-		pool->next_free = (pool->next_free + 1) % pool->size;
+
+		/* if send_subcrq_indirect queue is full, flush to VIOS */
 		if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
 		    i == count - 1) {
 			lpar_rc =
@@ -523,53 +554,12 @@
 	return 0;
 }
 
-static int reset_rx_pools(struct ibmvnic_adapter *adapter)
-{
-	struct ibmvnic_rx_pool *rx_pool;
-	u64 buff_size;
-	int rx_scrqs;
-	int i, j, rc;
-
-	if (!adapter->rx_pool)
-		return -1;
-
-	buff_size = adapter->cur_rx_buf_sz;
-	rx_scrqs = adapter->num_active_rx_pools;
-	for (i = 0; i < rx_scrqs; i++) {
-		rx_pool = &adapter->rx_pool[i];
-
-		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
-
-		if (rx_pool->buff_size != buff_size) {
-			free_long_term_buff(adapter, &rx_pool->long_term_buff);
-			rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
-			rc = alloc_long_term_buff(adapter,
-						  &rx_pool->long_term_buff,
-						  rx_pool->size *
-						  rx_pool->buff_size);
-		} else {
-			rc = reset_long_term_buff(adapter,
-						  &rx_pool->long_term_buff);
-		}
-
-		if (rc)
-			return rc;
-
-		for (j = 0; j < rx_pool->size; j++)
-			rx_pool->free_map[j] = j;
-
-		memset(rx_pool->rx_buff, 0,
-		       rx_pool->size * sizeof(struct ibmvnic_rx_buff));
-
-		atomic_set(&rx_pool->available, 0);
-		rx_pool->next_alloc = 0;
-		rx_pool->next_free = 0;
-		rx_pool->active = 1;
-	}
-
-	return 0;
-}
-
+/**
+ * release_rx_pools() - Release any rx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
 static void release_rx_pools(struct ibmvnic_adapter *adapter)
 {
 	struct ibmvnic_rx_pool *rx_pool;
@@ -584,6 +574,7 @@
 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
 
 		kfree(rx_pool->free_map);
+
 		free_long_term_buff(adapter, &rx_pool->long_term_buff);
 
 		if (!rx_pool->rx_buff)
@@ -602,21 +593,91 @@
 	kfree(adapter->rx_pool);
 	adapter->rx_pool = NULL;
 	adapter->num_active_rx_pools = 0;
+	adapter->prev_rx_pool_size = 0;
 }
 
+/**
+ * reuse_rx_pools() - Check if the existing rx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing rx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and size of each buffer) have not
+ * changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ *       which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the rx pools can be reused, false otherwise.
+ */
+static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
+{
+	u64 old_num_pools, new_num_pools;
+	u64 old_pool_size, new_pool_size;
+	u64 old_buff_size, new_buff_size;
+
+	if (!adapter->rx_pool)
+		return false;
+
+	old_num_pools = adapter->num_active_rx_pools;
+	new_num_pools = adapter->req_rx_queues;
+
+	old_pool_size = adapter->prev_rx_pool_size;
+	new_pool_size = adapter->req_rx_add_entries_per_subcrq;
+
+	old_buff_size = adapter->prev_rx_buf_sz;
+	new_buff_size = adapter->cur_rx_buf_sz;
+
+	/* Require buff size to be exactly same for now */
+	if (old_buff_size != new_buff_size)
+		return false;
+
+	if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+		return true;
+
+	if (old_num_pools < adapter->min_rx_queues ||
+	    old_num_pools > adapter->max_rx_queues ||
+	    old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
+	    old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+		return false;
+
+	return true;
+}
+
+/**
+ * init_rx_pools(): Initialize the set of receiver pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of receiver pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing rx pools.
+ * Otherwise free any existing pools and  allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
 static int init_rx_pools(struct net_device *netdev)
 {
 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 	struct device *dev = &adapter->vdev->dev;
 	struct ibmvnic_rx_pool *rx_pool;
-	int rxadd_subcrqs;
+	u64 num_pools;
+	u64 pool_size;		/* # of buffers in one pool */
 	u64 buff_size;
 	int i, j;
 
-	rxadd_subcrqs = adapter->num_active_rx_scrqs;
+	pool_size = adapter->req_rx_add_entries_per_subcrq;
+	num_pools = adapter->req_rx_queues;
 	buff_size = adapter->cur_rx_buf_sz;
 
-	adapter->rx_pool = kcalloc(rxadd_subcrqs,
+	if (reuse_rx_pools(adapter)) {
+		dev_dbg(dev, "Reusing rx pools\n");
+		goto update_ltb;
+	}
+
+	/* Allocate/populate the pools. */
+	release_rx_pools(adapter);
+
+	adapter->rx_pool = kcalloc(num_pools,
 				   sizeof(struct ibmvnic_rx_pool),
 				   GFP_KERNEL);
 	if (!adapter->rx_pool) {
@@ -624,26 +685,27 @@
 		return -1;
 	}
 
-	adapter->num_active_rx_pools = rxadd_subcrqs;
+	/* Set num_active_rx_pools early. If we fail below after partial
+	 * allocation, release_rx_pools() will know how many to look for.
+	 */
+	adapter->num_active_rx_pools = num_pools;
 
-	for (i = 0; i < rxadd_subcrqs; i++) {
+	for (i = 0; i < num_pools; i++) {
 		rx_pool = &adapter->rx_pool[i];
 
 		netdev_dbg(adapter->netdev,
 			   "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
-			   i, adapter->req_rx_add_entries_per_subcrq,
-			   buff_size);
+			   i, pool_size, buff_size);
 
-		rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+		rx_pool->size = pool_size;
 		rx_pool->index = i;
 		rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
-		rx_pool->active = 1;
 
 		rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
 					    GFP_KERNEL);
 		if (!rx_pool->free_map) {
-			release_rx_pools(adapter);
-			return -1;
+			dev_err(dev, "Couldn't alloc free_map %d\n", i);
+			goto out_release;
 		}
 
 		rx_pool->rx_buff = kcalloc(rx_pool->size,
@@ -651,69 +713,58 @@
 					   GFP_KERNEL);
 		if (!rx_pool->rx_buff) {
 			dev_err(dev, "Couldn't alloc rx buffers\n");
-			release_rx_pools(adapter);
-			return -1;
+			goto out_release;
 		}
+	}
+
+	adapter->prev_rx_pool_size = pool_size;
+	adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
+
+update_ltb:
+	for (i = 0; i < num_pools; i++) {
+		rx_pool = &adapter->rx_pool[i];
+		dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
+			i, rx_pool->size, rx_pool->buff_size);
 
 		if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
-					 rx_pool->size * rx_pool->buff_size)) {
-			release_rx_pools(adapter);
-			return -1;
-		}
+					 rx_pool->size * rx_pool->buff_size))
+			goto out;
 
-		for (j = 0; j < rx_pool->size; ++j)
+		for (j = 0; j < rx_pool->size; ++j) {
+			struct ibmvnic_rx_buff *rx_buff;
+
 			rx_pool->free_map[j] = j;
 
+			/* NOTE: Don't clear rx_buff->skb here - will leak
+			 * memory! replenish_rx_pool() will reuse skbs or
+			 * allocate as necessary.
+			 */
+			rx_buff = &rx_pool->rx_buff[j];
+			rx_buff->dma = 0;
+			rx_buff->data = 0;
+			rx_buff->size = 0;
+			rx_buff->pool_index = 0;
+		}
+
+		/* Mark pool "empty" so replenish_rx_pools() will
+		 * update the LTB info for each buffer
+		 */
 		atomic_set(&rx_pool->available, 0);
 		rx_pool->next_alloc = 0;
 		rx_pool->next_free = 0;
+		/* replenish_rx_pool() may have called deactivate_rx_pools()
+		 * on failover. Ensure pool is active now.
+		 */
+		rx_pool->active = 1;
 	}
-
 	return 0;
-}
-
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
-			     struct ibmvnic_tx_pool *tx_pool)
-{
-	int rc, i;
-
-	rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
-	if (rc)
-		return rc;
-
-	memset(tx_pool->tx_buff, 0,
-	       tx_pool->num_buffers *
-	       sizeof(struct ibmvnic_tx_buff));
-
-	for (i = 0; i < tx_pool->num_buffers; i++)
-		tx_pool->free_map[i] = i;
-
-	tx_pool->consumer_index = 0;
-	tx_pool->producer_index = 0;
-
-	return 0;
-}
-
-static int reset_tx_pools(struct ibmvnic_adapter *adapter)
-{
-	int tx_scrqs;
-	int i, rc;
-
-	if (!adapter->tx_pool)
-		return -1;
-
-	tx_scrqs = adapter->num_active_tx_pools;
-	for (i = 0; i < tx_scrqs; i++) {
-		ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
-		rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
-		if (rc)
-			return rc;
-		rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
-		if (rc)
-			return rc;
-	}
-
-	return 0;
+out_release:
+	release_rx_pools(adapter);
+out:
+	/* We failed to allocate one or more LTBs or map them on the VIOS.
+	 * Hold onto the pools and any LTBs that we did allocate/map.
+	 */
+	return -1;
 }
 
 static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -735,10 +786,19 @@
 	free_long_term_buff(adapter, &tx_pool->long_term_buff);
 }
 
+/**
+ * release_tx_pools() - Release any tx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
 static void release_tx_pools(struct ibmvnic_adapter *adapter)
 {
 	int i;
 
+	/* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
+	 * both NULL or both non-NULL. So we only need to check one.
+	 */
 	if (!adapter->tx_pool)
 		return;
 
@@ -752,84 +812,218 @@
 	kfree(adapter->tso_pool);
 	adapter->tso_pool = NULL;
 	adapter->num_active_tx_pools = 0;
+	adapter->prev_tx_pool_size = 0;
 }
 
 static int init_one_tx_pool(struct net_device *netdev,
 			    struct ibmvnic_tx_pool *tx_pool,
-			    int num_entries, int buf_size)
+			    int pool_size, int buf_size)
 {
-	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 	int i;
 
-	tx_pool->tx_buff = kcalloc(num_entries,
+	tx_pool->tx_buff = kcalloc(pool_size,
 				   sizeof(struct ibmvnic_tx_buff),
 				   GFP_KERNEL);
 	if (!tx_pool->tx_buff)
 		return -1;
 
-	if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
-				 num_entries * buf_size))
+	tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
+	if (!tx_pool->free_map) {
+		kfree(tx_pool->tx_buff);
+		tx_pool->tx_buff = NULL;
 		return -1;
+	}
 
-	tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
-	if (!tx_pool->free_map)
-		return -1;
-
-	for (i = 0; i < num_entries; i++)
+	for (i = 0; i < pool_size; i++)
 		tx_pool->free_map[i] = i;
 
 	tx_pool->consumer_index = 0;
 	tx_pool->producer_index = 0;
-	tx_pool->num_buffers = num_entries;
+	tx_pool->num_buffers = pool_size;
 	tx_pool->buf_size = buf_size;
 
 	return 0;
 }
 
+/**
+ * reuse_tx_pools() - Check if the existing tx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing tx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and mtu) have not changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ *       which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the tx pools can be reused, false otherwise.
+ */
+static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
+{
+	u64 old_num_pools, new_num_pools;
+	u64 old_pool_size, new_pool_size;
+	u64 old_mtu, new_mtu;
+
+	if (!adapter->tx_pool)
+		return false;
+
+	old_num_pools = adapter->num_active_tx_pools;
+	new_num_pools = adapter->num_active_tx_scrqs;
+	old_pool_size = adapter->prev_tx_pool_size;
+	new_pool_size = adapter->req_tx_entries_per_subcrq;
+	old_mtu = adapter->prev_mtu;
+	new_mtu = adapter->req_mtu;
+
+	/* Require MTU to be exactly same to reuse pools for now */
+	if (old_mtu != new_mtu)
+		return false;
+
+	if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+		return true;
+
+	if (old_num_pools < adapter->min_tx_queues ||
+	    old_num_pools > adapter->max_tx_queues ||
+	    old_pool_size < adapter->min_tx_entries_per_subcrq ||
+	    old_pool_size > adapter->max_tx_entries_per_subcrq)
+		return false;
+
+	return true;
+}
+
+/**
+ * init_tx_pools(): Initialize the set of transmit pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of transmit pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing tx pools.
+ * Otherwise free any existing pools and  allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
 static int init_tx_pools(struct net_device *netdev)
 {
 	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-	int tx_subcrqs;
+	struct device *dev = &adapter->vdev->dev;
+	int num_pools;
+	u64 pool_size;		/* # of buffers in pool */
 	u64 buff_size;
-	int i, rc;
+	int i, j, rc;
 
-	tx_subcrqs = adapter->num_active_tx_scrqs;
-	adapter->tx_pool = kcalloc(tx_subcrqs,
+	num_pools = adapter->req_tx_queues;
+
+	/* We must notify the VIOS about the LTB on all resets - but we only
+	 * need to alloc/populate pools if either the number of buffers or
+	 * size of each buffer in the pool has changed.
+	 */
+	if (reuse_tx_pools(adapter)) {
+		netdev_dbg(netdev, "Reusing tx pools\n");
+		goto update_ltb;
+	}
+
+	/* Allocate/populate the pools. */
+	release_tx_pools(adapter);
+
+	pool_size = adapter->req_tx_entries_per_subcrq;
+	num_pools = adapter->num_active_tx_scrqs;
+
+	adapter->tx_pool = kcalloc(num_pools,
 				   sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
 	if (!adapter->tx_pool)
 		return -1;
 
-	adapter->tso_pool = kcalloc(tx_subcrqs,
+	adapter->tso_pool = kcalloc(num_pools,
 				    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+	/* To simplify release_tx_pools() ensure that ->tx_pool and
+	 * ->tso_pool are either both NULL or both non-NULL.
+	 */
 	if (!adapter->tso_pool) {
 		kfree(adapter->tx_pool);
 		adapter->tx_pool = NULL;
 		return -1;
 	}
 
-	adapter->num_active_tx_pools = tx_subcrqs;
+	/* Set num_active_tx_pools early. If we fail below after partial
+	 * allocation, release_tx_pools() will know how many to look for.
+	 */
+	adapter->num_active_tx_pools = num_pools;
 
-	for (i = 0; i < tx_subcrqs; i++) {
-		buff_size = adapter->req_mtu + VLAN_HLEN;
-		buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
+	buff_size = adapter->req_mtu + VLAN_HLEN;
+	buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
+
+	for (i = 0; i < num_pools; i++) {
+		dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
+			i, adapter->req_tx_entries_per_subcrq, buff_size);
+
 		rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
-				      adapter->req_tx_entries_per_subcrq,
-				      buff_size);
-		if (rc) {
-			release_tx_pools(adapter);
-			return rc;
-		}
+				      pool_size, buff_size);
+		if (rc)
+			goto out_release;
 
 		rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
 				      IBMVNIC_TSO_BUFS,
 				      IBMVNIC_TSO_BUF_SZ);
-		if (rc) {
-			release_tx_pools(adapter);
-			return rc;
-		}
+		if (rc)
+			goto out_release;
+	}
+
+	adapter->prev_tx_pool_size = pool_size;
+	adapter->prev_mtu = adapter->req_mtu;
+
+update_ltb:
+	/* NOTE: All tx_pools have the same number of buffers (which is
+	 *       same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
+	 *       buffers (see calls init_one_tx_pool() for these).
+	 *       For consistency, we use tx_pool->num_buffers and
+	 *       tso_pool->num_buffers below.
+	 */
+	rc = -1;
+	for (i = 0; i < num_pools; i++) {
+		struct ibmvnic_tx_pool *tso_pool;
+		struct ibmvnic_tx_pool *tx_pool;
+		u32 ltb_size;
+
+		tx_pool = &adapter->tx_pool[i];
+		ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
+		if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
+					 ltb_size))
+			goto out;
+
+		dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
+			i, tx_pool->long_term_buff.buff,
+			tx_pool->num_buffers, tx_pool->buf_size);
+
+		tx_pool->consumer_index = 0;
+		tx_pool->producer_index = 0;
+
+		for (j = 0; j < tx_pool->num_buffers; j++)
+			tx_pool->free_map[j] = j;
+
+		tso_pool = &adapter->tso_pool[i];
+		ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
+		if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
+					 ltb_size))
+			goto out;
+
+		dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
+			i, tso_pool->long_term_buff.buff,
+			tso_pool->num_buffers, tso_pool->buf_size);
+
+		tso_pool->consumer_index = 0;
+		tso_pool->producer_index = 0;
+
+		for (j = 0; j < tso_pool->num_buffers; j++)
+			tso_pool->free_map[j] = j;
 	}
 
 	return 0;
+out_release:
+	release_tx_pools(adapter);
+out:
+	/* We failed to allocate one or more LTBs or map them on the VIOS.
+	 * Hold onto the pools and any LTBs that we did allocate/map.
+	 */
+	return rc;
 }
 
 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
@@ -1020,9 +1214,6 @@
 {
 	release_vpd_data(adapter);
 
-	release_tx_pools(adapter);
-	release_rx_pools(adapter);
-
 	release_napi(adapter);
 	release_login_buffer(adapter);
 	release_login_rsp_buffer(adapter);
@@ -1198,8 +1389,6 @@
 		return rc;
 	}
 
-	adapter->map_id = 1;
-
 	rc = init_napi(adapter);
 	if (rc)
 		return rc;
@@ -1296,6 +1485,8 @@
 		if (rc) {
 			netdev_err(netdev, "failed to initialize resources\n");
 			release_resources(adapter);
+			release_rx_pools(adapter);
+			release_tx_pools(adapter);
 			goto out;
 		}
 	}
@@ -1424,9 +1615,6 @@
 
 	ibmvnic_napi_disable(adapter);
 	ibmvnic_disable_irqs(adapter);
-
-	clean_rx_pools(adapter);
-	clean_tx_pools(adapter);
 }
 
 static int __ibmvnic_close(struct net_device *netdev)
@@ -1460,6 +1648,8 @@
 
 	rc = __ibmvnic_close(netdev);
 	ibmvnic_cleanup(netdev);
+	clean_rx_pools(adapter);
+	clean_tx_pools(adapter);
 
 	return rc;
 }
@@ -2036,9 +2226,9 @@
 static int do_reset(struct ibmvnic_adapter *adapter,
 		    struct ibmvnic_rwi *rwi, u32 reset_state)
 {
+	struct net_device *netdev = adapter->netdev;
 	u64 old_num_rx_queues, old_num_tx_queues;
 	u64 old_num_rx_slots, old_num_tx_slots;
-	struct net_device *netdev = adapter->netdev;
 	int rc;
 
 	netdev_dbg(adapter->netdev,
@@ -2188,8 +2378,6 @@
 		    !adapter->rx_pool ||
 		    !adapter->tso_pool ||
 		    !adapter->tx_pool) {
-			release_rx_pools(adapter);
-			release_tx_pools(adapter);
 			release_napi(adapter);
 			release_vpd_data(adapter);
 
@@ -2198,16 +2386,18 @@
 				goto out;
 
 		} else {
-			rc = reset_tx_pools(adapter);
+			rc = init_tx_pools(netdev);
 			if (rc) {
-				netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+				netdev_dbg(netdev,
+					   "init tx pools failed (%d)\n",
 					   rc);
 				goto out;
 			}
 
-			rc = reset_rx_pools(adapter);
+			rc = init_rx_pools(netdev);
 			if (rc) {
-				netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+				netdev_dbg(netdev,
+					   "init rx pools failed (%d)\n",
 					   rc);
 				goto out;
 			}
@@ -4786,9 +4976,10 @@
 		dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
 		return;
 	}
-	netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
-		   crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
-		   crq->query_map_rsp.free_pages);
+	netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
+		   crq->query_map_rsp.page_size,
+		   __be32_to_cpu(crq->query_map_rsp.tot_pages),
+		   __be32_to_cpu(crq->query_map_rsp.free_pages));
 }
 
 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
@@ -5535,6 +5726,9 @@
 	adapter->vdev = dev;
 	adapter->netdev = netdev;
 	adapter->login_pending = false;
+	memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
+	/* map_ids start at 1, so ensure map_id 0 is always "in-use" */
+	bitmap_set(adapter->map_ids, 0, 1);
 
 	ether_addr_copy(adapter->mac_addr, mac_addr_p);
 	ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
@@ -5555,6 +5749,8 @@
 	init_completion(&adapter->reset_done);
 	init_completion(&adapter->stats_done);
 	clear_bit(0, &adapter->resetting);
+	adapter->prev_rx_buf_sz = 0;
+	adapter->prev_mtu = 0;
 
 	init_success = false;
 	do {
@@ -5655,6 +5851,8 @@
 	unregister_netdevice(netdev);
 
 	release_resources(adapter);
+	release_rx_pools(adapter);
+	release_tx_pools(adapter);
 	release_sub_crqs(adapter, 1);
 	release_crq_queue(adapter);
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 22df602..b8e42f6 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -827,7 +827,7 @@
 
 struct ibmvnic_rx_pool {
 	struct ibmvnic_rx_buff *rx_buff;
-	int size;
+	int size;			/* # of buffers in the pool */
 	int index;
 	int buff_size;
 	atomic_t available;
@@ -967,6 +967,7 @@
 	u64 min_mtu;
 	u64 max_mtu;
 	u64 req_mtu;
+	u64 prev_mtu;
 	u64 max_multicast_filters;
 	u64 vlan_header_insertion;
 	u64 rx_vlan_header_insertion;
@@ -979,13 +980,18 @@
 	u64 opt_tx_entries_per_subcrq;
 	u64 opt_rxba_entries_per_subcrq;
 	__be64 tx_rx_desc_req;
-	u8 map_id;
+#define MAX_MAP_ID	255
+	DECLARE_BITMAP(map_ids, MAX_MAP_ID);
 	u32 num_active_rx_scrqs;
 	u32 num_active_rx_pools;
 	u32 num_active_rx_napi;
 	u32 num_active_tx_scrqs;
 	u32 num_active_tx_pools;
+
+	u32 prev_rx_pool_size;
+	u32 prev_tx_pool_size;
 	u32 cur_rx_buf_sz;
+	u32 prev_rx_buf_sz;
 
 	struct tasklet_struct tasklet;
 	enum vnic_state state;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 62f8c52..2258e3f 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -96,6 +96,9 @@
 	struct ltq_etop_chan ch[MAX_DMA_CHAN];
 	int tx_free[MAX_DMA_CHAN >> 1];
 
+	int tx_burst_len;
+	int rx_burst_len;
+
 	spinlock_t lock;
 };
 
@@ -259,7 +262,7 @@
 	/* enable crc generation */
 	ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
 
-	ltq_dma_init_port(DMA_PORT_ETOP);
+	ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
 
 	for (i = 0; i < MAX_DMA_CHAN; i++) {
 		int irq = LTQ_DMA_CH0_INT + i;
@@ -472,8 +475,8 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	/* dma needs to start on a 16 byte aligned address */
-	byte_offset = CPHYSADDR(skb->data) % 16;
+	/* dma needs to start on a burst length value aligned address */
+	byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4);
 	ch->skb[ch->dma.desc] = skb;
 
 	netif_trans_update(dev);
@@ -667,6 +670,18 @@
 	spin_lock_init(&priv->lock);
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
+	err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
+	if (err < 0) {
+		dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
+		return err;
+	}
+
+	err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
+	if (err < 0) {
+		dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
+		return err;
+	}
+
 	for (i = 0; i < MAX_DMA_CHAN; i++) {
 		if (IS_TX(i))
 			netif_napi_add(dev, &priv->ch[i].napi,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index fb78f17..5d96248 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -71,6 +71,9 @@
 	struct net_device *net_dev;
 	struct device *dev;
 
+	int tx_burst_len;
+	int rx_burst_len;
+
 	__iomem void *pmac_reg;
 };
 
@@ -316,8 +319,8 @@
 	if (unlikely(dma_mapping_error(priv->dev, mapping)))
 		goto err_drop;
 
-	/* dma needs to start on a 16 byte aligned address */
-	byte_offset = mapping % 16;
+	/* dma needs to start on a burst length value aligned address */
+	byte_offset = mapping % (priv->tx_burst_len * 4);
 
 	desc->addr = mapping - byte_offset;
 	/* Make sure the address is written before we give it to HW */
@@ -369,7 +372,7 @@
 	int ret = 0;
 	int i;
 
-	ltq_dma_init_port(DMA_PORT_ETOP);
+	ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
 
 	ch_rx->dma.nr = XRX200_DMA_RX;
 	ch_rx->dma.dev = priv->dev;
@@ -478,6 +481,18 @@
 	if (err)
 		eth_hw_addr_random(net_dev);
 
+	err = device_property_read_u32(dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
+	if (err < 0) {
+		dev_err(dev, "unable to read tx-burst-length property\n");
+		return err;
+	}
+
+	err = device_property_read_u32(dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
+	if (err < 0) {
+		dev_err(dev, "unable to read rx-burst-length property\n");
+		return err;
+	}
+
 	/* bring up the dma engine and IP core */
 	err = xrx200_dma_init(priv);
 	if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d9bea13..8931864 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -191,6 +191,7 @@
 #define NIX_CHAN_SDP_CH_START          (0x700ull)
 #define NIX_CHAN_SDP_CHX(a)            (NIX_CHAN_SDP_CH_START + (a))
 #define NIX_CHAN_SDP_NUM_CHANS		256
+#define NIX_CHAN_CPT_CH_START          (0x800ull)
 
 /* The mask is to extract lower 10-bits of channel number
  * which CPT will pass to X2P.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 1548777..f77f745 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -186,6 +186,8 @@
 M(CPT_LF_FREE,		0xA01, cpt_lf_free, msg_req, msg_rsp)		\
 M(CPT_RD_WR_REGISTER,	0xA02, cpt_rd_wr_register,  cpt_rd_wr_reg_msg,	\
 			       cpt_rd_wr_reg_msg)			\
+M(CPT_INLINE_IPSEC_CFG,	0xA04, cpt_inline_ipsec_cfg,			\
+			       cpt_inline_ipsec_cfg_msg, msg_rsp)	\
 M(CPT_STATS,            0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp)	\
 M(CPT_RXC_TIME_CFG,     0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req,  \
 			       msg_rsp)                                 \
@@ -270,6 +272,10 @@
 				nix_bp_cfg_rsp)	\
 M(NIX_BP_DISABLE,	0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
 M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg,			\
+				nix_inline_ipsec_cfg, msg_rsp)		\
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg,		\
+				nix_inline_ipsec_lf_cfg, msg_rsp)	\
 M(NIX_CN10K_AQ_ENQ,	0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
 				nix_cn10k_aq_enq_rsp)			\
 M(NIX_GET_HW_INFO,	0x801c, nix_get_hw_info, msg_req, nix_hw_info)	\
@@ -1065,6 +1071,40 @@
 	u8	chan_cnt; /* Number of channel for which bpids are assigned */
 };
 
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+	struct mbox_msghdr hdr;
+	u32 cpt_credit;
+	struct {
+		u8 egrp;
+		u8 opcode;
+		u16 param1;
+		u16 param2;
+	} gen_cfg;
+	struct {
+		u16 cpt_pf_func;
+		u8 cpt_slot;
+	} inst_qsel;
+	u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+	struct mbox_msghdr hdr;
+	u64 sa_base_addr;
+	struct {
+		u32 tag_const;
+		u16 lenm1_max;
+		u8 sa_pow2_size;
+		u8 tt;
+	} ipsec_cfg0;
+	struct {
+		u32 sa_idx_max;
+		u8 sa_idx_w;
+	} ipsec_cfg1;
+	u8 enable;
+};
+
 struct nix_hw_info {
 	struct mbox_msghdr hdr;
 	u16 rsvs16;
@@ -1399,7 +1439,9 @@
 	CPT_AF_ERR_LF_INVALID		= -903,
 	CPT_AF_ERR_ACCESS_DENIED	= -904,
 	CPT_AF_ERR_SSO_PF_FUNC_INVALID	= -905,
-	CPT_AF_ERR_NIX_PF_FUNC_INVALID	= -906
+	CPT_AF_ERR_NIX_PF_FUNC_INVALID	= -906,
+	CPT_AF_ERR_INLINE_IPSEC_INB_ENA	= -907,
+	CPT_AF_ERR_INLINE_IPSEC_OUT_ENA	= -908
 };
 
 /* CPT mbox message formats */
@@ -1420,6 +1462,22 @@
 	int blkaddr;
 };
 
+#define CPT_INLINE_INBOUND      0
+#define CPT_INLINE_OUTBOUND     1
+
+/* Mailbox message request format for CPT IPsec
+ * inline inbound and outbound configuration.
+ */
+struct cpt_inline_ipsec_cfg_msg {
+	struct mbox_msghdr hdr;
+	u8 enable;
+	u8 slot;
+	u8 dir;
+	u8 sso_pf_func_ovrd;
+	u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+	u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
 /* Mailbox message request and response format for CPT stats. */
 struct cpt_sts_req {
 	struct mbox_msghdr hdr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3583690..3bba8bc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1287,6 +1287,60 @@
 	return (val & 0xFFF);
 }
 
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+			      u16 global_slot, u16 *slot_in_block)
+{
+	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+	int numlfs, total_lfs = 0, nr_blocks = 0;
+	int i, num_blkaddr[BLK_COUNT] = { 0 };
+	struct rvu_block *block;
+	int blkaddr = -ENODEV;
+	u16 start_slot;
+
+	if (!is_blktype_attached(pfvf, blktype))
+		return -ENODEV;
+
+	/* Get all the block addresses from which LFs are attached to
+	 * the given pcifunc in num_blkaddr[].
+	 */
+	for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+		block = &rvu->hw->block[blkaddr];
+		if (block->type != blktype)
+			continue;
+		if (!is_block_implemented(rvu->hw, blkaddr))
+			continue;
+
+		numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+		if (numlfs) {
+			total_lfs += numlfs;
+			num_blkaddr[nr_blocks] = blkaddr;
+			nr_blocks++;
+		}
+	}
+
+	if (global_slot >= total_lfs)
+		return -ENODEV;
+
+	/* Based on the given global slot number retrieve the
+	 * correct block address out of all attached block
+	 * addresses and slot number in that block.
+	 */
+	total_lfs = 0;
+	blkaddr = -ENODEV;
+	for (i = 0; i < nr_blocks; i++) {
+		numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+		total_lfs += numlfs;
+		if (global_slot < total_lfs) {
+			blkaddr = num_blkaddr[i];
+			start_slot = total_lfs - numlfs;
+			*slot_in_block = global_slot - start_slot;
+			break;
+		}
+	}
+
+	return blkaddr;
+}
+
 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
 {
 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 1d94112..f59169c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -656,6 +656,8 @@
 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
 int rvu_get_num_lbk_chans(void);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+			      u16 global_slot, u16 *slot_in_block);
 
 /* RVU HW reg validation */
 enum regmap_block {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 46a41cf..7dbbc11 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -334,8 +334,8 @@
 	/* Out of 4096 channels start CPT from 2048 so
 	 * that MSB for CPT channels is always set
 	 */
-	if (cpt_chan_base <= 0x800) {
-		hw->cpt_chan_base = 0x800;
+	if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+		hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
 	} else {
 		dev_err(rvu->dev,
 			"CPT channels could not fit in the range 2048-4095\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 1f90a74..267d092 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -197,6 +197,141 @@
 	return ret;
 }
 
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+					struct cpt_inline_ipsec_cfg_msg *req)
+{
+	u16 sso_pf_func = req->sso_pf_func;
+	u8 nix_sel;
+	u64 val;
+
+	val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+	if (req->enable && (val & BIT_ULL(16))) {
+		/* IPSec inline outbound path is already enabled for a given
+		 * CPT LF, HRM states that inline inbound & outbound paths
+		 * must not be enabled at the same time for a given CPT LF
+		 */
+		return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+	}
+	/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+	if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+		return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+	nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+	/* Enable CPT LF for IPsec inline inbound operations */
+	if (req->enable)
+		val |= BIT_ULL(9);
+	else
+		val &= ~BIT_ULL(9);
+
+	val |= (u64)nix_sel << 8;
+	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+	if (sso_pf_func) {
+		/* Set SSO_PF_FUNC */
+		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+		val |= (u64)sso_pf_func << 32;
+		val |= (u64)req->nix_pf_func << 48;
+		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+	}
+	if (req->sso_pf_func_ovrd)
+		/* Set SSO_PF_FUNC_OVRD for inline IPSec */
+		rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+	/* Configure the X2P Link register with the cpt base channel number and
+	 * range of channels it should propagate to X2P
+	 */
+	if (!is_rvu_otx2(rvu)) {
+		val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+		val |= rvu->hw->cpt_chan_base;
+
+		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+	}
+
+	return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+					 struct cpt_inline_ipsec_cfg_msg *req)
+{
+	u16 nix_pf_func = req->nix_pf_func;
+	int nix_blkaddr;
+	u8 nix_sel;
+	u64 val;
+
+	val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+	if (req->enable && (val & BIT_ULL(9))) {
+		/* IPSec inline inbound path is already enabled for a given
+		 * CPT LF, HRM states that inline inbound & outbound paths
+		 * must not be enabled at the same time for a given CPT LF
+		 */
+		return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+	}
+
+	/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+	if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+		return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+	/* Enable CPT LF for IPsec inline outbound operations */
+	if (req->enable)
+		val |= BIT_ULL(16);
+	else
+		val &= ~BIT_ULL(16);
+	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+	if (nix_pf_func) {
+		/* Set NIX_PF_FUNC */
+		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+		val |= (u64)nix_pf_func << 48;
+		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+		nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+		nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+		val |= (u64)nix_sel << 8;
+		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+	}
+
+	return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+					  struct cpt_inline_ipsec_cfg_msg *req,
+					  struct msg_rsp *rsp)
+{
+	u16 pcifunc = req->hdr.pcifunc;
+	struct rvu_block *block;
+	int cptlf, blkaddr, ret;
+	u16 actual_slot;
+
+	blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+					    req->slot, &actual_slot);
+	if (blkaddr < 0)
+		return CPT_AF_ERR_LF_INVALID;
+
+	block = &rvu->hw->block[blkaddr];
+
+	cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+	if (cptlf < 0)
+		return CPT_AF_ERR_LF_INVALID;
+
+	switch (req->dir) {
+	case CPT_INLINE_INBOUND:
+		ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+		break;
+
+	case CPT_INLINE_OUTBOUND:
+		ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+		break;
+
+	default:
+		return CPT_AF_ERR_PARAM;
+	}
+
+	return ret;
+}
+
 static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
 {
 	u64 offset = req->reg_offset;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 9ef4e94..ea3e03f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4579,6 +4579,118 @@
 	return 0;
 }
 
+#define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+				 int blkaddr)
+{
+	u8 cpt_idx, cpt_blkaddr;
+	u64 val;
+
+	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+	if (req->enable) {
+		/* Enable context prefetching */
+		if (!is_rvu_otx2(rvu))
+			val = BIT_ULL(51);
+
+		/* Set OPCODE and EGRP */
+		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+		/* Set CPT queue for inline IPSec */
+		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+				  req->inst_qsel.cpt_pf_func);
+
+		if (!is_rvu_otx2(rvu)) {
+			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+						       BLKADDR_CPT1;
+			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+		}
+
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+			    val);
+
+		/* Set CPT credit */
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+			    req->cpt_credit);
+	} else {
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+			    0x0);
+		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+			    0x3FFFFF);
+	}
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+					  struct nix_inline_ipsec_cfg *req,
+					  struct msg_rsp *rsp)
+{
+	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+		return 0;
+
+	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+	return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+					     struct nix_inline_ipsec_lf_cfg *req,
+					     struct msg_rsp *rsp)
+{
+	int lf, blkaddr, err;
+	u64 val;
+
+	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+		return 0;
+
+	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+	if (err)
+		return err;
+
+	if (req->enable) {
+		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+		val = (u64)req->ipsec_cfg0.tt << 44 |
+		      (u64)req->ipsec_cfg0.tag_const << 20 |
+		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+		      req->ipsec_cfg0.lenm1_max;
+
+		if (blkaddr == BLKADDR_NIX1)
+			val |= BIT_ULL(46);
+
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+		/* Set SA_IDX_W and SA_IDX_MAX */
+		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+		      req->ipsec_cfg1.sa_idx_max;
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+		/* Set SA base address */
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+			    req->sa_base_addr);
+	} else {
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+			    0x0);
+	}
+
+	return 0;
+}
 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
 {
 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 21f1ed4..dbaeb10 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -236,6 +236,8 @@
 #define NIX_AF_RX_DEF_OIP6_DSCP		(0x02F8)
 #define NIX_AF_RX_IPSEC_GEN_CFG		(0x0300)
 #define NIX_AF_RX_CPTX_INST_ADDR	(0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a)	(0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a)	(0x0360ull | (uint64_t)(a) << 3)
 #define NIX_AF_NDC_TX_SYNC		(0x03F0)
 #define NIX_AF_AQ_CFG			(0x0400)
 #define NIX_AF_AQ_BASE			(0x0410)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index a51ecd7..8e51a1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -223,6 +223,7 @@
 #define HW_TSO			0
 #define CN10K_MBOX		1
 #define CN10K_LMTST		2
+#define CN10K_RPM		3
 	unsigned long		cap_flag;
 
 #define LMT_LINE_SIZE		128
@@ -452,6 +453,7 @@
 	if (!is_dev_otx2(pfvf->pdev)) {
 		__set_bit(CN10K_MBOX, &hw->cap_flag);
 		__set_bit(CN10K_LMTST, &hw->cap_flag);
+		__set_bit(CN10K_RPM, &hw->cap_flag);
 	}
 }
 
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index dbfa3bc..38e5924 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -121,14 +121,16 @@
 
 	otx2_get_qset_strings(pfvf, &data, 0);
 
-	for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
-		sprintf(data, "cgx_rxstat%d: ", stats);
-		data += ETH_GSTRING_LEN;
-	}
+	if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+		for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+			sprintf(data, "cgx_rxstat%d: ", stats);
+			data += ETH_GSTRING_LEN;
+		}
 
-	for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
-		sprintf(data, "cgx_txstat%d: ", stats);
-		data += ETH_GSTRING_LEN;
+		for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+			sprintf(data, "cgx_txstat%d: ", stats);
+			data += ETH_GSTRING_LEN;
+		}
 	}
 
 	strcpy(data, "reset_count");
@@ -205,11 +207,15 @@
 						[otx2_drv_stats[stat].index]);
 
 	otx2_get_qset_stats(pfvf, stats, &data);
-	otx2_update_lmac_stats(pfvf);
-	for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
-		*(data++) = pfvf->hw.cgx_rx_stats[stat];
-	for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
-		*(data++) = pfvf->hw.cgx_tx_stats[stat];
+
+	if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+		otx2_update_lmac_stats(pfvf);
+		for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+			*(data++) = pfvf->hw.cgx_rx_stats[stat];
+		for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+			*(data++) = pfvf->hw.cgx_tx_stats[stat];
+	}
+
 	*(data++) = pfvf->reset_count;
 
 	fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
@@ -242,18 +248,19 @@
 static int otx2_get_sset_count(struct net_device *netdev, int sset)
 {
 	struct otx2_nic *pfvf = netdev_priv(netdev);
-	int qstats_count;
+	int qstats_count, mac_stats = 0;
 
 	if (sset != ETH_SS_STATS)
 		return -EINVAL;
 
 	qstats_count = otx2_n_queue_stats *
 		       (pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+	if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+		mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
 	otx2_update_lmac_fec_stats(pfvf);
 
 	return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
-	       CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
-	       + 1;
+	       mac_stats + OTX2_FEC_STATS_CNT + 1;
 }
 
 /* Get no of queues device supports and current queue count */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index dcf9f27..7d56a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -625,7 +625,6 @@
 	devlink_param_driverinit_value_set(devlink,
 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
 					   value);
-	devlink_param_publish(devlink, &enable_eth_param);
 	return 0;
 }
 
@@ -636,7 +635,6 @@
 	if (!mlx5_eth_supported(dev))
 		return;
 
-	devlink_param_unpublish(devlink, &enable_eth_param);
 	devlink_param_unregister(devlink, &enable_eth_param);
 }
 
@@ -672,7 +670,6 @@
 	devlink_param_driverinit_value_set(devlink,
 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
 					   value);
-	devlink_param_publish(devlink, &enable_rdma_param);
 	return 0;
 }
 
@@ -681,7 +678,6 @@
 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
 		return;
 
-	devlink_param_unpublish(devlink, &enable_rdma_param);
 	devlink_param_unregister(devlink, &enable_rdma_param);
 }
 
@@ -706,7 +702,6 @@
 	devlink_param_driverinit_value_set(devlink,
 					   DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
 					   value);
-	devlink_param_publish(devlink, &enable_rdma_param);
 	return 0;
 }
 
@@ -717,7 +712,6 @@
 	if (!mlx5_vnet_supported(dev))
 		return;
 
-	devlink_param_unpublish(devlink, &enable_vnet_param);
 	devlink_param_unregister(devlink, &enable_vnet_param);
 }
 
@@ -808,7 +802,6 @@
 	if (err)
 		goto params_reg_err;
 	mlx5_devlink_set_params_init_values(devlink);
-	devlink_params_publish(devlink);
 
 	err = mlx5_devlink_auxdev_params_register(devlink);
 	if (err)
@@ -818,6 +811,7 @@
 	if (err)
 		goto traps_reg_err;
 
+	devlink_params_publish(devlink);
 	return 0;
 
 traps_reg_err:
@@ -832,9 +826,9 @@
 
 void mlx5_devlink_unregister(struct devlink *devlink)
 {
+	devlink_params_unpublish(devlink);
 	mlx5_devlink_traps_unregister(devlink);
 	mlx5_devlink_auxdev_params_unregister(devlink);
-	devlink_params_unpublish(devlink);
 	devlink_params_unregister(devlink, mlx5_devlink_params,
 				  ARRAY_SIZE(mlx5_devlink_params));
 	devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f080fab..8c63497 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -90,7 +90,6 @@
 		struct devlink_health_reporter *fw_fatal;
 	} health;
 	struct mlxsw_env *env;
-	bool is_initialized; /* Denotes if core was already initialized. */
 	unsigned long driver_priv[];
 	/* driver_priv has to be always the last item */
 };
@@ -1995,12 +1994,6 @@
 	if (err)
 		goto err_health_init;
 
-	if (mlxsw_driver->init) {
-		err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
-		if (err)
-			goto err_driver_init;
-	}
-
 	err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
 	if (err)
 		goto err_hwmon_init;
@@ -2014,7 +2007,12 @@
 	if (err)
 		goto err_env_init;
 
-	mlxsw_core->is_initialized = true;
+	if (mlxsw_driver->init) {
+		err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
+		if (err)
+			goto err_driver_init;
+	}
+
 	devlink_params_publish(devlink);
 
 	if (!reload)
@@ -2022,14 +2020,13 @@
 
 	return 0;
 
+err_driver_init:
+	mlxsw_env_fini(mlxsw_core->env);
 err_env_init:
 	mlxsw_thermal_fini(mlxsw_core->thermal);
 err_thermal_init:
 	mlxsw_hwmon_fini(mlxsw_core->hwmon);
 err_hwmon_init:
-	if (mlxsw_core->driver->fini)
-		mlxsw_core->driver->fini(mlxsw_core);
-err_driver_init:
 	mlxsw_core_health_fini(mlxsw_core);
 err_health_init:
 err_fw_rev_validate:
@@ -2100,12 +2097,11 @@
 	}
 
 	devlink_params_unpublish(devlink);
-	mlxsw_core->is_initialized = false;
+	if (mlxsw_core->driver->fini)
+		mlxsw_core->driver->fini(mlxsw_core);
 	mlxsw_env_fini(mlxsw_core->env);
 	mlxsw_thermal_fini(mlxsw_core->thermal);
 	mlxsw_hwmon_fini(mlxsw_core->hwmon);
-	if (mlxsw_core->driver->fini)
-		mlxsw_core->driver->fini(mlxsw_core);
 	mlxsw_core_health_fini(mlxsw_core);
 	if (!reload)
 		mlxsw_core_params_unregister(mlxsw_core);
@@ -2939,49 +2935,6 @@
 	return mlxsw_core->env;
 }
 
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
-{
-	return mlxsw_core->is_initialized;
-}
-
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
-{
-	enum mlxsw_reg_pmtm_module_type module_type;
-	char pmtm_pl[MLXSW_REG_PMTM_LEN];
-	int err;
-
-	mlxsw_reg_pmtm_pack(pmtm_pl, module);
-	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
-	if (err)
-		return err;
-	mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
-
-	/* Here we need to get the module width according to the module type. */
-
-	switch (module_type) {
-	case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
-	case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
-		return 8;
-	case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
-		return 4;
-	case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
-	case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
-		return 2;
-	case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
-	case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
-		return 1;
-	default:
-		return -EINVAL;
-	}
-}
-EXPORT_SYMBOL(mlxsw_core_module_max_width);
-
 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
 				    const char *buf, size_t size)
 {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 80712dc..12023a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -249,8 +249,6 @@
 				 u8 local_port);
 bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
 struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core);
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
 
 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
 bool mlxsw_core_schedule_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 3713c45..9e36717 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -5,6 +5,7 @@
 #include <linux/err.h>
 #include <linux/ethtool.h>
 #include <linux/sfp.h>
+#include <linux/mutex.h>
 
 #include "core.h"
 #include "core_env.h"
@@ -14,12 +15,14 @@
 struct mlxsw_env_module_info {
 	u64 module_overheat_counter;
 	bool is_overheat;
+	int num_ports_mapped;
+	int num_ports_up;
 };
 
 struct mlxsw_env {
 	struct mlxsw_core *core;
 	u8 module_count;
-	spinlock_t module_info_lock; /* Protects 'module_info'. */
+	struct mutex module_info_lock; /* Protects 'module_info'. */
 	struct mlxsw_env_module_info module_info[];
 };
 
@@ -389,6 +392,59 @@
 }
 EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
 
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+{
+	char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+	mlxsw_reg_pmaos_pack(pmaos_pl, module);
+	mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
+
+	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+int mlxsw_env_reset_module(struct net_device *netdev,
+			   struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+{
+	struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+	u32 req = *flags;
+	int err;
+
+	if (!(req & ETH_RESET_PHY) &&
+	    !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
+		return 0;
+
+	if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+		return -EINVAL;
+
+	mutex_lock(&mlxsw_env->module_info_lock);
+
+	if (mlxsw_env->module_info[module].num_ports_up) {
+		netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+	    !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
+		netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = mlxsw_env_module_reset(mlxsw_core, module);
+	if (err) {
+		netdev_err(netdev, "Failed to reset module\n");
+		goto out;
+	}
+
+	*flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
+
+out:
+	mutex_unlock(&mlxsw_env->module_info_lock);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_env_reset_module);
+
 static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
 					    u8 module,
 					    bool *p_has_temp_sensor)
@@ -482,22 +538,32 @@
 	return 0;
 }
 
-static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
-				      char *mtwe_pl, void *priv)
+struct mlxsw_env_module_temp_warn_event {
+	struct mlxsw_env *mlxsw_env;
+	char mtwe_pl[MLXSW_REG_MTWE_LEN];
+	struct work_struct work;
+};
+
+static void mlxsw_env_mtwe_event_work(struct work_struct *work)
 {
-	struct mlxsw_env *mlxsw_env = priv;
+	struct mlxsw_env_module_temp_warn_event *event;
+	struct mlxsw_env *mlxsw_env;
 	int i, sensor_warning;
 	bool is_overheat;
 
+	event = container_of(work, struct mlxsw_env_module_temp_warn_event,
+			     work);
+	mlxsw_env = event->mlxsw_env;
+
 	for (i = 0; i < mlxsw_env->module_count; i++) {
 		/* 64-127 of sensor_index are mapped to the port modules
 		 * sequentially (module 0 is mapped to sensor_index 64,
 		 * module 1 to sensor_index 65 and so on)
 		 */
 		sensor_warning =
-			mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl,
+			mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
 							  i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
-		spin_lock(&mlxsw_env->module_info_lock);
+		mutex_lock(&mlxsw_env->module_info_lock);
 		is_overheat =
 			mlxsw_env->module_info[i].is_overheat;
 
@@ -507,13 +573,13 @@
 			 * warning OR current state in "no warning" and MTWE
 			 * does not report warning.
 			 */
-			spin_unlock(&mlxsw_env->module_info_lock);
+			mutex_unlock(&mlxsw_env->module_info_lock);
 			continue;
 		} else if (is_overheat && !sensor_warning) {
 			/* MTWE reports "no warning", turn is_overheat off.
 			 */
 			mlxsw_env->module_info[i].is_overheat = false;
-			spin_unlock(&mlxsw_env->module_info_lock);
+			mutex_unlock(&mlxsw_env->module_info_lock);
 		} else {
 			/* Current state is "no warning" and MTWE reports
 			 * "warning", increase the counter and turn is_overheat
@@ -521,13 +587,32 @@
 			 */
 			mlxsw_env->module_info[i].is_overheat = true;
 			mlxsw_env->module_info[i].module_overheat_counter++;
-			spin_unlock(&mlxsw_env->module_info_lock);
+			mutex_unlock(&mlxsw_env->module_info_lock);
 		}
 	}
+
+	kfree(event);
+}
+
+static void
+mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
+			     void *priv)
+{
+	struct mlxsw_env_module_temp_warn_event *event;
+	struct mlxsw_env *mlxsw_env = priv;
+
+	event = kmalloc(sizeof(*event), GFP_ATOMIC);
+	if (!event)
+		return;
+
+	event->mlxsw_env = mlxsw_env;
+	memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN);
+	INIT_WORK(&event->work, mlxsw_env_mtwe_event_work);
+	mlxsw_core_schedule_work(&event->work);
 }
 
 static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
-	MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE);
+	MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
 
 static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
 {
@@ -568,9 +653,9 @@
 			     work);
 	mlxsw_env = event->mlxsw_env;
 
-	spin_lock_bh(&mlxsw_env->module_info_lock);
+	mutex_lock(&mlxsw_env->module_info_lock);
 	mlxsw_env->module_info[event->module].is_overheat = false;
-	spin_unlock_bh(&mlxsw_env->module_info_lock);
+	mutex_unlock(&mlxsw_env->module_info_lock);
 
 	err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
 					       &has_temp_sensor);
@@ -652,8 +737,10 @@
 	for (i = 0; i < module_count; i++) {
 		char pmaos_pl[MLXSW_REG_PMAOS_LEN];
 
-		mlxsw_reg_pmaos_pack(pmaos_pl, i,
-				     MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+		mlxsw_reg_pmaos_pack(pmaos_pl, i);
+		mlxsw_reg_pmaos_e_set(pmaos_pl,
+				      MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+		mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
 		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
 		if (err)
 			return err;
@@ -667,23 +754,71 @@
 {
 	struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
 
-	/* Prevent switch driver from accessing uninitialized data. */
-	if (!mlxsw_core_is_initialized(mlxsw_core)) {
-		*p_counter = 0;
-		return 0;
-	}
-
 	if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
 		return -EINVAL;
 
-	spin_lock_bh(&mlxsw_env->module_info_lock);
+	mutex_lock(&mlxsw_env->module_info_lock);
 	*p_counter = mlxsw_env->module_info[module].module_overheat_counter;
-	spin_unlock_bh(&mlxsw_env->module_info_lock);
+	mutex_unlock(&mlxsw_env->module_info_lock);
 
 	return 0;
 }
 EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
 
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+{
+	struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+	if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+		return;
+
+	mutex_lock(&mlxsw_env->module_info_lock);
+	mlxsw_env->module_info[module].num_ports_mapped++;
+	mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_map);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+{
+	struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+	if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+		return;
+
+	mutex_lock(&mlxsw_env->module_info_lock);
+	mlxsw_env->module_info[module].num_ports_mapped--;
+	mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+{
+	struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+	if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+		return -EINVAL;
+
+	mutex_lock(&mlxsw_env->module_info_lock);
+	mlxsw_env->module_info[module].num_ports_up++;
+	mutex_unlock(&mlxsw_env->module_info_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_up);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+{
+	struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+	if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+		return;
+
+	mutex_lock(&mlxsw_env->module_info_lock);
+	mlxsw_env->module_info[module].num_ports_up--;
+	mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_down);
+
 int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
 {
 	char mgpir_pl[MLXSW_REG_MGPIR_LEN];
@@ -702,7 +837,7 @@
 	if (!env)
 		return -ENOMEM;
 
-	spin_lock_init(&env->module_info_lock);
+	mutex_init(&env->module_info_lock);
 	env->core = mlxsw_core;
 	env->module_count = module_count;
 	*p_env = env;
@@ -732,6 +867,7 @@
 err_module_plug_event_register:
 	mlxsw_env_temp_warn_event_unregister(env);
 err_temp_warn_event_register:
+	mutex_destroy(&env->module_info_lock);
 	kfree(env);
 	return err;
 }
@@ -742,5 +878,6 @@
 	/* Make sure there is no more event work scheduled. */
 	mlxsw_core_flush_owq();
 	mlxsw_env_temp_warn_event_unregister(env);
+	mutex_destroy(&env->module_info_lock);
 	kfree(env);
 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 0bf5bd0..c486397 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -24,9 +24,22 @@
 				    const struct ethtool_module_eeprom *page,
 				    struct netlink_ext_ack *extack);
 
+int mlxsw_env_reset_module(struct net_device *netdev,
+			   struct mlxsw_core *mlxsw_core, u8 module,
+			   u32 *flags);
+
 int
 mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
 				      u64 *p_counter);
+
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+
 int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
 void mlxsw_env_fini(struct mlxsw_env *env);
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9d56c4..9644e9c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -54,8 +54,20 @@
 	return 0;
 }
 
-static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
+static int mlxsw_m_port_open(struct net_device *dev)
 {
+	struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+	struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+	return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+}
+
+static int mlxsw_m_port_stop(struct net_device *dev)
+{
+	struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+	struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+	mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
 	return 0;
 }
 
@@ -70,8 +82,8 @@
 }
 
 static const struct net_device_ops mlxsw_m_port_netdev_ops = {
-	.ndo_open		= mlxsw_m_port_dummy_open_stop,
-	.ndo_stop		= mlxsw_m_port_dummy_open_stop,
+	.ndo_open		= mlxsw_m_port_open,
+	.ndo_stop		= mlxsw_m_port_stop,
 	.ndo_get_devlink_port	= mlxsw_m_port_get_devlink_port,
 };
 
@@ -124,11 +136,21 @@
 						   page, extack);
 }
 
+static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
+{
+	struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+	struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+	return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+				      flags);
+}
+
 static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
 	.get_drvinfo		= mlxsw_m_module_get_drvinfo,
 	.get_module_info	= mlxsw_m_get_module_info,
 	.get_module_eeprom	= mlxsw_m_get_module_eeprom,
 	.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+	.reset			= mlxsw_m_reset,
 };
 
 static int
@@ -266,6 +288,7 @@
 
 	if (WARN_ON_ONCE(module >= max_ports))
 		return -EINVAL;
+	mlxsw_env_module_port_map(mlxsw_m->core, module);
 	mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
 
 	return 0;
@@ -274,6 +297,7 @@
 static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
 {
 	mlxsw_m->module_to_port[module] = -1;
+	mlxsw_env_module_port_unmap(mlxsw_m->core, module);
 }
 
 static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6fbda6e..6c96e12 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5681,6 +5681,14 @@
 
 MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
 
+/* reg_pmaos_rst
+ * Module reset toggle.
+ * Note: Setting reset while module is plugged-in will result in transition to
+ * "initializing" operational state.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1);
+
 /* reg_pmaos_slot_index
  * Slot index.
  * Access: Index
@@ -5693,6 +5701,24 @@
  */
 MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
 
+enum mlxsw_reg_pmaos_admin_status {
+	MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1,
+	MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2,
+	/* If the module is active and then unplugged, or experienced an error
+	 * event, the operational status should go to "disabled" and can only
+	 * be enabled upon explicit enable command.
+	 */
+	MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3,
+};
+
+/* reg_pmaos_admin_status
+ * Module administrative state (the desired state of the module).
+ * Note: To disable a module, all ports associated with the port must be
+ * administatively down first.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4);
+
 /* reg_pmaos_ase
  * Admin state update enable.
  * If this bit is set, admin state will be updated based on admin_state field.
@@ -5721,13 +5747,10 @@
  */
 MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
 
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module,
-					enum mlxsw_reg_pmaos_e e)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
 {
 	MLXSW_REG_ZERO(pmaos, payload);
 	mlxsw_reg_pmaos_module_set(payload, module);
-	mlxsw_reg_pmaos_e_set(payload, e);
-	mlxsw_reg_pmaos_ee_set(payload, true);
 }
 
 /* PPLR - Port Physical Loopback Register
@@ -5766,6 +5789,69 @@
 				 MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
 }
 
+/* PMTDB - Port Module To local DataBase Register
+ * ----------------------------------------------
+ * The PMTDB register allows to query the possible module<->local port
+ * mapping than can be used in PMLP. It does not represent the actual/current
+ * mapping of the local to module. Actual mapping is only defined by PMLP.
+ */
+#define MLXSW_REG_PMTDB_ID 0x501A
+#define MLXSW_REG_PMTDB_LEN 0x40
+
+MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN);
+
+/* reg_pmtdb_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4);
+
+/* reg_pmtdb_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8);
+
+/* reg_pmtdb_ports_width
+ * Port's width
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4);
+
+/* reg_pmtdb_num_ports
+ * Number of ports in a single module (split/breakout)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4);
+
+enum mlxsw_reg_pmtdb_status {
+	MLXSW_REG_PMTDB_STATUS_SUCCESS,
+};
+
+/* reg_pmtdb_status
+ * Status
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
+
+/* reg_pmtdb_port_num
+ * The local_port value which can be assigned to the module.
+ * In case of more than one port, port<x> represent the /<x> port of
+ * the module.
+ * Access: RO
+ */
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+
+static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
+					u8 ports_width, u8 num_ports)
+{
+	MLXSW_REG_ZERO(pmtdb, payload);
+	mlxsw_reg_pmtdb_slot_index_set(payload, slot_index);
+	mlxsw_reg_pmtdb_module_set(payload, module);
+	mlxsw_reg_pmtdb_ports_width_set(payload, ports_width);
+	mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
+}
+
 /* PMPE - Port Module Plug/Unplug Event Register
  * ---------------------------------------------
  * This register reports any operational status change of a module.
@@ -5860,67 +5946,51 @@
 	mlxsw_reg_pddr_page_select_set(payload, page_select);
 }
 
-/* PMTM - Port Module Type Mapping Register
- * ----------------------------------------
- * The PMTM allows query or configuration of module types.
+/* PLLP - Port Local port to Label Port mapping Register
+ * -----------------------------------------------------
+ * The PLLP register returns the mapping from Local Port into Label Port.
  */
-#define MLXSW_REG_PMTM_ID 0x5067
-#define MLXSW_REG_PMTM_LEN 0x10
+#define MLXSW_REG_PLLP_ID 0x504A
+#define MLXSW_REG_PLLP_LEN 0x10
 
-MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
 
-/* reg_pmtm_module
- * Module number.
+/* reg_pllp_local_port
+ * Local port number.
  * Access: Index
  */
-MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
 
-enum mlxsw_reg_pmtm_module_type {
-	/* Backplane with 4 lanes */
-	MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
-	/* QSFP */
-	MLXSW_REG_PMTM_MODULE_TYPE_QSFP,
-	/* SFP */
-	MLXSW_REG_PMTM_MODULE_TYPE_SFP,
-	/* Backplane with single lane */
-	MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
-	/* Backplane with two lane */
-	MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
-	/* Chip2Chip4x */
-	MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10,
-	/* Chip2Chip2x */
-	MLXSW_REG_PMTM_MODULE_TYPE_C2C2X,
-	/* Chip2Chip1x */
-	MLXSW_REG_PMTM_MODULE_TYPE_C2C1X,
-	/* QSFP-DD */
-	MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
-	/* OSFP */
-	MLXSW_REG_PMTM_MODULE_TYPE_OSFP,
-	/* SFP-DD */
-	MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD,
-	/* DSFP */
-	MLXSW_REG_PMTM_MODULE_TYPE_DSFP,
-	/* Chip2Chip8x */
-	MLXSW_REG_PMTM_MODULE_TYPE_C2C8X,
-};
-
-/* reg_pmtm_module_type
- * Module type.
- * Access: RW
+/* reg_pllp_label_port
+ * Front panel label of the port.
+ * Access: RO
  */
-MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8);
 
-static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+/* reg_pllp_split_num
+ * Label split mapping for local_port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
+
+/* reg_pllp_slot_index
+ * Slot index (0: Main board).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
+
+static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
 {
-	MLXSW_REG_ZERO(pmtm, payload);
-	mlxsw_reg_pmtm_module_set(payload, module);
+	MLXSW_REG_ZERO(pllp, payload);
+	mlxsw_reg_pllp_local_port_set(payload, local_port);
 }
 
-static inline void
-mlxsw_reg_pmtm_unpack(char *payload,
-		      enum mlxsw_reg_pmtm_module_type *module_type)
+static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
+					 u8 *split_num, u8 *slot_index)
 {
-	*module_type = mlxsw_reg_pmtm_module_type_get(payload);
+	*label_port = mlxsw_reg_pllp_label_port_get(payload);
+	*split_num = mlxsw_reg_pllp_split_num_get(payload);
+	*slot_index = mlxsw_reg_pllp_slot_index_get(payload);
 }
 
 /* HTGT - Host Trap Group Table
@@ -12200,9 +12270,10 @@
 	MLXSW_REG(pspa),
 	MLXSW_REG(pmaos),
 	MLXSW_REG(pplr),
+	MLXSW_REG(pmtdb),
 	MLXSW_REG(pmpe),
 	MLXSW_REG(pddr),
-	MLXSW_REG(pmtm),
+	MLXSW_REG(pllp),
 	MLXSW_REG(htgt),
 	MLXSW_REG(hpkt),
 	MLXSW_REG(rgcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index a56c9e1..a1512be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -25,9 +25,6 @@
 	MLXSW_RES_ID_MAX_SYSTEM_PORT,
 	MLXSW_RES_ID_MAX_LAG,
 	MLXSW_RES_ID_MAX_LAG_MEMBERS,
-	MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
-	MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
-	MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
 	MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
 	MLXSW_RES_ID_CELL_SIZE,
 	MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -84,9 +81,6 @@
 	[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
 	[MLXSW_RES_ID_MAX_LAG] = 0x2520,
 	[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
-	[MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
-	[MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
-	[MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
 	[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805,	/* Bytes */
 	[MLXSW_RES_ID_CELL_SIZE] = 0x2803,	/* Bytes */
 	[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811,	/* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 250c5a2..0e81ae7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -47,7 +47,7 @@
 
 #define MLXSW_SP1_FWREV_MAJOR 13
 #define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 2406
+#define MLXSW_SP1_FWREV_SUBMINOR 3326
 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
 
 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -64,7 +64,7 @@
 
 #define MLXSW_SP2_FWREV_MAJOR 29
 #define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 2406
+#define MLXSW_SP2_FWREV_SUBMINOR 3326
 
 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
 	.major = MLXSW_SP2_FWREV_MAJOR,
@@ -79,7 +79,7 @@
 
 #define MLXSW_SP3_FWREV_MAJOR 30
 #define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 2406
+#define MLXSW_SP3_FWREV_SUBMINOR 3326
 
 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
 	.major = MLXSW_SP3_FWREV_MAJOR,
@@ -351,12 +351,12 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 }
 
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
+				  u8 local_port, u8 swid)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	char pspa_pl[MLXSW_REG_PSPA_LEN];
 
-	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 }
 
@@ -529,55 +529,80 @@
 
 	port_mapping->module = module;
 	port_mapping->width = width;
+	port_mapping->module_width = width;
 	port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
 	return 0;
 }
 
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
+static int
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+			 const struct mlxsw_sp_port_mapping *port_mapping)
 {
-	struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
-	int i;
+	int i, err;
 
-	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+	mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
+
+	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 	mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
 	for (i = 0; i < port_mapping->width; i++) {
 		mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
 		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
 	}
 
-	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+	if (err)
+		goto err_pmlp_write;
+	return 0;
+
+err_pmlp_write:
+	mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+	return err;
 }
 
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				       u8 module)
 {
-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	char pmlp_pl[MLXSW_REG_PMLP_LEN];
 
-	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
 	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
-	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+	mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
 }
 
 static int mlxsw_sp_port_open(struct net_device *dev)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 	int err;
 
-	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+	err = mlxsw_env_module_port_up(mlxsw_sp->core,
+				       mlxsw_sp_port->mapping.module);
 	if (err)
 		return err;
+	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+	if (err)
+		goto err_port_admin_status_set;
 	netif_start_queue(dev);
 	return 0;
+
+err_port_admin_status_set:
+	mlxsw_env_module_port_down(mlxsw_sp->core,
+				   mlxsw_sp_port->mapping.module);
+	return err;
 }
 
 static int mlxsw_sp_port_stop(struct net_device *dev)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 
 	netif_stop_queue(dev);
-	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+	mlxsw_env_module_port_down(mlxsw_sp->core,
+				   mlxsw_sp_port->mapping.module);
+	return 0;
 }
 
 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
@@ -1442,29 +1467,68 @@
 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
 }
 
+static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
+					u8 local_port, u8 *port_number,
+					u8 *split_port_subnumber,
+					u8 *slot_index)
+{
+	char pllp_pl[MLXSW_REG_PLLP_LEN];
+	int err;
+
+	mlxsw_reg_pllp_pack(pllp_pl, local_port);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
+	if (err)
+		return err;
+	mlxsw_reg_pllp_unpack(pllp_pl, port_number,
+			      split_port_subnumber, slot_index);
+	return 0;
+}
+
 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-				u8 split_base_local_port,
+				bool split,
 				struct mlxsw_sp_port_mapping *port_mapping)
 {
 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-	bool split = !!split_base_local_port;
 	struct mlxsw_sp_port *mlxsw_sp_port;
 	u32 lanes = port_mapping->width;
+	u8 split_port_subnumber;
 	struct net_device *dev;
+	u8 port_number;
+	u8 slot_index;
 	bool splittable;
 	int err;
 
+	err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+			local_port);
+		return err;
+	}
+
+	err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+			local_port);
+		goto err_port_swid_set;
+	}
+
+	err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
+					   &split_port_subnumber, &slot_index);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
+			local_port);
+		goto err_port_label_info_get;
+	}
+
 	splittable = lanes > 1 && !split;
 	err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
-				   port_mapping->module + 1, split,
-				   port_mapping->lane / lanes,
-				   splittable, lanes,
-				   mlxsw_sp->base_mac,
+				   port_number, split, split_port_subnumber,
+				   splittable, lanes, mlxsw_sp->base_mac,
 				   sizeof(mlxsw_sp->base_mac));
 	if (err) {
 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
 			local_port);
-		return err;
+		goto err_core_port_init;
 	}
 
 	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
@@ -1480,7 +1544,6 @@
 	mlxsw_sp_port->local_port = local_port;
 	mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
 	mlxsw_sp_port->split = split;
-	mlxsw_sp_port->split_base_local_port = split_base_local_port;
 	mlxsw_sp_port->mapping = *port_mapping;
 	mlxsw_sp_port->link.autoneg = 1;
 	INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
@@ -1498,20 +1561,6 @@
 	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
 	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
 
-	err = mlxsw_sp_port_module_map(mlxsw_sp_port);
-	if (err) {
-		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
-			mlxsw_sp_port->local_port);
-		goto err_port_module_map;
-	}
-
-	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
-	if (err) {
-		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
-			mlxsw_sp_port->local_port);
-		goto err_port_swid_set;
-	}
-
 	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
 	if (err) {
 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -1712,21 +1761,24 @@
 err_port_speed_by_width_set:
 err_port_system_port_mapping_set:
 err_dev_addr_init:
-	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
-	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
-err_port_module_map:
 	free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
 	free_netdev(dev);
 err_alloc_etherdev:
 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+err_core_port_init:
+err_port_label_info_get:
+	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+			       MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
 	return err;
 }
 
 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 {
 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+	u8 module = mlxsw_sp_port->mapping.module;
 
 	cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
 	cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
@@ -1742,12 +1794,13 @@
 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
 	mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
 	mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
-	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-	mlxsw_sp_port_module_unmap(mlxsw_sp_port);
 	free_percpu(mlxsw_sp_port->pcpu_stats);
 	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
 	free_netdev(mlxsw_sp_port->dev);
 	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+	mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+			       MLXSW_PORT_SWID_DISABLED_PORT);
+	mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
 }
 
 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1789,8 +1842,15 @@
 	kfree(mlxsw_sp_port);
 }
 
+static bool mlxsw_sp_local_port_valid(u8 local_port)
+{
+	return local_port != MLXSW_PORT_CPU_PORT;
+}
+
 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 {
+	if (!mlxsw_sp_local_port_valid(local_port))
+		return false;
 	return mlxsw_sp->ports[local_port] != NULL;
 }
 
@@ -1827,7 +1887,7 @@
 		port_mapping = mlxsw_sp->port_mapping[i];
 		if (!port_mapping)
 			continue;
-		err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
+		err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
 		if (err)
 			goto err_port_create;
 	}
@@ -1894,17 +1954,10 @@
 	kfree(mlxsw_sp->port_mapping);
 }
 
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
-{
-	u8 offset = (local_port - 1) % max_width;
-
-	return local_port - offset;
-}
-
 static int
-mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
 			   struct mlxsw_sp_port_mapping *port_mapping,
-			   unsigned int count, u8 offset)
+			   unsigned int count, const char *pmtdb_pl)
 {
 	struct mlxsw_sp_port_mapping split_port_mapping;
 	int err, i;
@@ -1912,8 +1965,13 @@
 	split_port_mapping = *port_mapping;
 	split_port_mapping.width /= count;
 	for (i = 0; i < count; i++) {
-		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
-					   base_port, &split_port_mapping);
+		u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+		if (!mlxsw_sp_local_port_valid(s_local_port))
+			continue;
+
+		err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
+					   true, &split_port_mapping);
 		if (err)
 			goto err_port_create;
 		split_port_mapping.lane += split_port_mapping.width;
@@ -1922,49 +1980,34 @@
 	return 0;
 
 err_port_create:
-	for (i--; i >= 0; i--)
-		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
-			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+	for (i--; i >= 0; i--) {
+		u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+	}
 	return err;
 }
 
 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
-					 u8 base_port,
-					 unsigned int count, u8 offset)
+					 unsigned int count,
+					 const char *pmtdb_pl)
 {
 	struct mlxsw_sp_port_mapping *port_mapping;
 	int i;
 
 	/* Go over original unsplit ports in the gap and recreate them. */
-	for (i = 0; i < count * offset; i++) {
-		port_mapping = mlxsw_sp->port_mapping[base_port + i];
-		if (!port_mapping)
+	for (i = 0; i < count; i++) {
+		u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+		port_mapping = mlxsw_sp->port_mapping[local_port];
+		if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
 			continue;
-		mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
+		mlxsw_sp_port_create(mlxsw_sp, local_port,
+				     false, port_mapping);
 	}
 }
 
-static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
-				       unsigned int count,
-				       unsigned int max_width)
-{
-	enum mlxsw_res_id local_ports_in_x_res_id;
-	int split_width = max_width / count;
-
-	if (split_width == 1)
-		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
-	else if (split_width == 2)
-		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
-	else if (split_width == 4)
-		local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
-	else
-		return -EINVAL;
-
-	if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
-		return -EINVAL;
-	return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
-}
-
 static struct mlxsw_sp_port *
 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 {
@@ -1980,9 +2023,8 @@
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	struct mlxsw_sp_port_mapping port_mapping;
 	struct mlxsw_sp_port *mlxsw_sp_port;
-	int max_width;
-	u8 base_port;
-	int offset;
+	enum mlxsw_reg_pmtdb_status status;
+	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
 	int i;
 	int err;
 
@@ -1994,57 +2036,37 @@
 		return -EINVAL;
 	}
 
-	max_width = mlxsw_core_module_max_width(mlxsw_core,
-						mlxsw_sp_port->mapping.module);
-	if (max_width < 0) {
-		netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
-		NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
-		return max_width;
-	}
-
-	/* Split port with non-max cannot be split. */
-	if (mlxsw_sp_port->mapping.width != max_width) {
-		netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
-		NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
+	if (mlxsw_sp_port->split) {
+		NL_SET_ERR_MSG_MOD(extack, "Port is already split");
 		return -EINVAL;
 	}
 
-	offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
-	if (offset < 0) {
-		netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
-		NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
-		return -EINVAL;
+	mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+			     mlxsw_sp_port->mapping.module_width / count,
+			     count);
+	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+	if (err) {
+		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+		return err;
 	}
 
-	/* Only in case max split is being done, the local port and
-	 * base port may differ.
-	 */
-	base_port = count == max_width ?
-		    mlxsw_sp_cluster_base_port_get(local_port, max_width) :
-		    local_port;
-
-	for (i = 0; i < count * offset; i++) {
-		/* Expect base port to exist and also the one in the middle in
-		 * case of maximal split count.
-		 */
-		if (i == 0 || (count == max_width && i == count / 2))
-			continue;
-
-		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
-			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
-			NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
-			return -EINVAL;
-		}
+	status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
+	if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
+		NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
+		return -EINVAL;
 	}
 
 	port_mapping = mlxsw_sp_port->mapping;
 
-	for (i = 0; i < count; i++)
-		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
-			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+	for (i = 0; i < count; i++) {
+		u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
 
-	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
-					 count, offset);
+		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+	}
+
+	err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
+					 count, pmtdb_pl);
 	if (err) {
 		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
 		goto err_port_split_create;
@@ -2053,7 +2075,7 @@
 	return 0;
 
 err_port_split_create:
-	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
 	return err;
 }
 
@@ -2062,11 +2084,10 @@
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 	struct mlxsw_sp_port *mlxsw_sp_port;
+	char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
 	unsigned int count;
-	int max_width;
-	u8 base_port;
-	int offset;
 	int i;
+	int err;
 
 	mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
 	if (!mlxsw_sp_port) {
@@ -2077,35 +2098,30 @@
 	}
 
 	if (!mlxsw_sp_port->split) {
-		netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
 		NL_SET_ERR_MSG_MOD(extack, "Port was not split");
 		return -EINVAL;
 	}
 
-	max_width = mlxsw_core_module_max_width(mlxsw_core,
-						mlxsw_sp_port->mapping.module);
-	if (max_width < 0) {
-		netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
-		NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
-		return max_width;
+	count = mlxsw_sp_port->mapping.module_width /
+		mlxsw_sp_port->mapping.width;
+
+	mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+			     mlxsw_sp_port->mapping.module_width / count,
+			     count);
+	err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+	if (err) {
+		NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+		return err;
 	}
 
-	count = max_width / mlxsw_sp_port->mapping.width;
+	for (i = 0; i < count; i++) {
+		u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
 
-	offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
-	if (WARN_ON(offset < 0)) {
-		netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
-		NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
-		return -EINVAL;
+		if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+			mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
 	}
 
-	base_port = mlxsw_sp_port->split_base_local_port;
-
-	for (i = 0; i < count; i++)
-		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
-			mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
-
-	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+	mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3a43cba..83ab1ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -144,7 +144,8 @@
 
 struct mlxsw_sp_port_mapping {
 	u8 module;
-	u8 width;
+	u8 width; /* Number of lanes used by the port */
+	u8 module_width; /* Number of lanes in the module (static) */
 	u8 lane;
 };
 
@@ -345,7 +346,6 @@
 		u16 egr_types;
 		struct mlxsw_sp_ptp_port_stats stats;
 	} ptp;
-	u8 split_base_local_port;
 	int max_mtu;
 	u32 max_speed;
 	struct mlxsw_sp_hdroom *hdroom;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 267590a..06f1645 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1197,6 +1197,15 @@
 	*ranges = mlxsw_rmon_ranges;
 }
 
+static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u8 module = mlxsw_sp_port->mapping.module;
+
+	return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+}
+
 const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
 	.cap_link_lanes_supported	= true,
 	.get_drvinfo			= mlxsw_sp_port_get_drvinfo,
@@ -1218,6 +1227,7 @@
 	.get_eth_mac_stats		= mlxsw_sp_get_eth_mac_stats,
 	.get_eth_ctrl_stats		= mlxsw_sp_get_eth_ctrl_stats,
 	.get_rmon_stats			= mlxsw_sp_get_rmon_stats,
+	.reset				= mlxsw_sp_reset,
 };
 
 struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3b8e675..369f6ae7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -499,8 +499,7 @@
 {
 	struct nfp_reprs *reprs;
 
-	reprs = kzalloc(sizeof(*reprs) +
-			num_reprs * sizeof(struct net_device *), GFP_KERNEL);
+	reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL);
 	if (!reprs)
 		return NULL;
 	reprs->num_reprs = num_reprs;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index dfaf10e..ba8c7a3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2763,25 +2763,6 @@
 	return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
 }
 
-static int qed_configure_filter(struct qed_dev *cdev,
-				struct qed_filter_params *params)
-{
-	enum qed_filter_rx_mode_type accept_flags;
-
-	switch (params->type) {
-	case QED_FILTER_TYPE_UCAST:
-		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
-	case QED_FILTER_TYPE_MCAST:
-		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
-	case QED_FILTER_TYPE_RX_MODE:
-		accept_flags = params->filter.accept_flags;
-		return qed_configure_filter_rx_mode(cdev, accept_flags);
-	default:
-		DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
-		return -EINVAL;
-	}
-}
-
 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
 				       enum qed_filter_config_mode mode)
 {
@@ -2904,7 +2885,9 @@
 	.q_rx_stop = &qed_stop_rxq,
 	.q_tx_start = &qed_start_txq,
 	.q_tx_stop = &qed_stop_txq,
-	.filter_config = &qed_configure_filter,
+	.filter_config_rx_mode = &qed_configure_filter_rx_mode,
+	.filter_config_ucast = &qed_configure_filter_ucast,
+	.filter_config_mcast = &qed_configure_filter_mcast,
 	.fastpath_stop = &qed_fastpath_stop,
 	.eth_cqe_completion = &qed_fp_cqe_completion,
 	.get_vport_stats = &qed_get_vport_stats,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a2e4dfb..f99b085 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -619,30 +619,28 @@
 				 enum qed_filter_xcast_params_type opcode,
 				 unsigned char mac[ETH_ALEN])
 {
-	struct qed_filter_params filter_cmd;
+	struct qed_filter_ucast_params ucast;
 
-	memset(&filter_cmd, 0, sizeof(filter_cmd));
-	filter_cmd.type = QED_FILTER_TYPE_UCAST;
-	filter_cmd.filter.ucast.type = opcode;
-	filter_cmd.filter.ucast.mac_valid = 1;
-	ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+	memset(&ucast, 0, sizeof(ucast));
+	ucast.type = opcode;
+	ucast.mac_valid = 1;
+	ether_addr_copy(ucast.mac, mac);
 
-	return edev->ops->filter_config(edev->cdev, &filter_cmd);
+	return edev->ops->filter_config_ucast(edev->cdev, &ucast);
 }
 
 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
 				  enum qed_filter_xcast_params_type opcode,
 				  u16 vid)
 {
-	struct qed_filter_params filter_cmd;
+	struct qed_filter_ucast_params ucast;
 
-	memset(&filter_cmd, 0, sizeof(filter_cmd));
-	filter_cmd.type = QED_FILTER_TYPE_UCAST;
-	filter_cmd.filter.ucast.type = opcode;
-	filter_cmd.filter.ucast.vlan_valid = 1;
-	filter_cmd.filter.ucast.vlan = vid;
+	memset(&ucast, 0, sizeof(ucast));
+	ucast.type = opcode;
+	ucast.vlan_valid = 1;
+	ucast.vlan = vid;
 
-	return edev->ops->filter_config(edev->cdev, &filter_cmd);
+	return edev->ops->filter_config_ucast(edev->cdev, &ucast);
 }
 
 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
@@ -1057,18 +1055,17 @@
 				 enum qed_filter_xcast_params_type opcode,
 				 unsigned char *mac, int num_macs)
 {
-	struct qed_filter_params filter_cmd;
+	struct qed_filter_mcast_params mcast;
 	int i;
 
-	memset(&filter_cmd, 0, sizeof(filter_cmd));
-	filter_cmd.type = QED_FILTER_TYPE_MCAST;
-	filter_cmd.filter.mcast.type = opcode;
-	filter_cmd.filter.mcast.num = num_macs;
+	memset(&mcast, 0, sizeof(mcast));
+	mcast.type = opcode;
+	mcast.num = num_macs;
 
 	for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
-		ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+		ether_addr_copy(mcast.mac[i], mac);
 
-	return edev->ops->filter_config(edev->cdev, &filter_cmd);
+	return edev->ops->filter_config_mcast(edev->cdev, &mcast);
 }
 
 int qede_set_mac_addr(struct net_device *ndev, void *p)
@@ -1194,7 +1191,6 @@
 {
 	enum qed_filter_rx_mode_type accept_flags;
 	struct qede_dev *edev = netdev_priv(ndev);
-	struct qed_filter_params rx_mode;
 	unsigned char *uc_macs, *temp;
 	struct netdev_hw_addr *ha;
 	int rc, uc_count;
@@ -1220,10 +1216,6 @@
 
 	netif_addr_unlock_bh(ndev);
 
-	/* Configure the struct for the Rx mode */
-	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
-	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
 	/* Remove all previous unicast secondary macs and multicast macs
 	 * (configure / leave the primary mac)
 	 */
@@ -1271,8 +1263,7 @@
 		qede_config_accept_any_vlan(edev, false);
 	}
 
-	rx_mode.filter.accept_flags = accept_flags;
-	edev->ops->filter_config(edev->cdev, &rx_mode);
+	edev->ops->filter_config_rx_mode(edev->cdev, accept_flags);
 out:
 	kfree(uc_macs);
 }
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 2728df4..8da4b66 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -37,7 +37,7 @@
 	RTL_GIGA_MAC_VER_24,
 	RTL_GIGA_MAC_VER_25,
 	RTL_GIGA_MAC_VER_26,
-	RTL_GIGA_MAC_VER_27,
+	/* support for RTL_GIGA_MAC_VER_27 has been removed */
 	RTL_GIGA_MAC_VER_28,
 	RTL_GIGA_MAC_VER_29,
 	RTL_GIGA_MAC_VER_30,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46a6ff9..0199914 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -118,7 +118,6 @@
 	[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp"			},
 	[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d",	FIRMWARE_8168D_1},
 	[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d",	FIRMWARE_8168D_2},
-	[RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp"			},
 	[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp"			},
 	[RTL_GIGA_MAC_VER_29] = {"RTL8105e",		FIRMWARE_8105E_1},
 	[RTL_GIGA_MAC_VER_30] = {"RTL8105e",		FIRMWARE_8105E_1},
@@ -985,33 +984,6 @@
 	return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
 }
 
-static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
-{
-	RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
-	RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
-	RTL_W32(tp, EPHY_RXER_NUM, 0);
-
-	rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
-}
-
-static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
-{
-	r8168dp_1_mdio_access(tp, reg,
-			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
-}
-
-static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
-{
-	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
-
-	mdelay(1);
-	RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
-	RTL_W32(tp, EPHY_RXER_NUM, 0);
-
-	return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
-		RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
-}
-
 #define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
 
 static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
@@ -1053,9 +1025,6 @@
 static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
 {
 	switch (tp->mac_version) {
-	case RTL_GIGA_MAC_VER_27:
-		r8168dp_1_mdio_write(tp, location, val);
-		break;
 	case RTL_GIGA_MAC_VER_28:
 	case RTL_GIGA_MAC_VER_31:
 		r8168dp_2_mdio_write(tp, location, val);
@@ -1072,8 +1041,6 @@
 static int rtl_readphy(struct rtl8169_private *tp, int location)
 {
 	switch (tp->mac_version) {
-	case RTL_GIGA_MAC_VER_27:
-		return r8168dp_1_mdio_read(tp, location);
 	case RTL_GIGA_MAC_VER_28:
 	case RTL_GIGA_MAC_VER_31:
 		return r8168dp_2_mdio_read(tp, location);
@@ -1235,7 +1202,6 @@
 static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
 {
 	switch (tp->mac_version) {
-	case RTL_GIGA_MAC_VER_27:
 	case RTL_GIGA_MAC_VER_28:
 	case RTL_GIGA_MAC_VER_31:
 		return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
@@ -2040,8 +2006,7 @@
 
 		/* 8168DP family. */
 		/* It seems this early RTL8168dp version never made it to
-		 * the wild. Let's see whether somebody complains, if not
-		 * we'll remove support for this chip version completely.
+		 * the wild. Support has been removed.
 		 * { 0x7cf, 0x288,      RTL_GIGA_MAC_VER_27 },
 		 */
 		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
@@ -2371,7 +2336,7 @@
 			r8168c_hw_jumbo_disable(tp);
 		}
 		break;
-	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+	case RTL_GIGA_MAC_VER_28:
 		if (jumbo)
 			r8168dp_hw_jumbo_enable(tp);
 		else
@@ -3719,7 +3684,6 @@
 		[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
 		[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
 		[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
-		[RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
 		[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
 		[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
 		[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
@@ -3982,7 +3946,6 @@
 		goto no_reset;
 
 	switch (tp->mac_version) {
-	case RTL_GIGA_MAC_VER_27:
 	case RTL_GIGA_MAC_VER_28:
 	case RTL_GIGA_MAC_VER_31:
 		rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 50f0f62..f7ad548 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -548,64 +548,6 @@
 	rtl8168d_apply_firmware_cond(tp, phydev, 0xb300);
 }
 
-static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp,
-				     struct phy_device *phydev)
-{
-	static const struct phy_reg phy_reg_init[] = {
-		{ 0x1f, 0x0002 },
-		{ 0x10, 0x0008 },
-		{ 0x0d, 0x006c },
-
-		{ 0x1f, 0x0000 },
-		{ 0x0d, 0xf880 },
-
-		{ 0x1f, 0x0001 },
-		{ 0x17, 0x0cc0 },
-
-		{ 0x1f, 0x0001 },
-		{ 0x0b, 0xa4d8 },
-		{ 0x09, 0x281c },
-		{ 0x07, 0x2883 },
-		{ 0x0a, 0x6b35 },
-		{ 0x1d, 0x3da4 },
-		{ 0x1c, 0xeffd },
-		{ 0x14, 0x7f52 },
-		{ 0x18, 0x7fc6 },
-		{ 0x08, 0x0601 },
-		{ 0x06, 0x4063 },
-		{ 0x10, 0xf074 },
-		{ 0x1f, 0x0003 },
-		{ 0x13, 0x0789 },
-		{ 0x12, 0xf4bd },
-		{ 0x1a, 0x04fd },
-		{ 0x14, 0x84b0 },
-		{ 0x1f, 0x0000 },
-		{ 0x00, 0x9200 },
-
-		{ 0x1f, 0x0005 },
-		{ 0x01, 0x0340 },
-		{ 0x1f, 0x0001 },
-		{ 0x04, 0x4000 },
-		{ 0x03, 0x1d21 },
-		{ 0x02, 0x0c32 },
-		{ 0x01, 0x0200 },
-		{ 0x00, 0x5554 },
-		{ 0x04, 0x4800 },
-		{ 0x04, 0x4000 },
-		{ 0x04, 0xf000 },
-		{ 0x03, 0xdf01 },
-		{ 0x02, 0xdf20 },
-		{ 0x01, 0x101a },
-		{ 0x00, 0xa0ff },
-		{ 0x04, 0xf800 },
-		{ 0x04, 0xf000 },
-		{ 0x1f, 0x0000 },
-	};
-
-	rtl_writephy_batch(phydev, phy_reg_init);
-	r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000);
-}
-
 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp,
 				     struct phy_device *phydev)
 {
@@ -1332,7 +1274,6 @@
 		[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
 		[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
 		[RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
-		[RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
 		[RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
 		[RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
 		[RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index b03a051..0ab6a40 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -81,6 +81,30 @@
 	return 0;
 }
 
+static void
+nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	ch->max_combined = ns->nsim_bus_dev->num_queues;
+	ch->combined_count = ns->ethtool.channels;
+}
+
+static int
+nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	int err;
+
+	err = netif_set_real_num_queues(dev, ch->combined_count,
+					ch->combined_count);
+	if (err)
+		return err;
+
+	ns->ethtool.channels = ch->combined_count;
+	return 0;
+}
+
 static int
 nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
 {
@@ -118,6 +142,8 @@
 	.get_coalesce			= nsim_get_coalesce,
 	.get_ringparam			= nsim_get_ringparam,
 	.set_ringparam			= nsim_set_ringparam,
+	.get_channels			= nsim_get_channels,
+	.set_channels			= nsim_set_channels,
 	.get_fecparam			= nsim_get_fecparam,
 	.set_fecparam			= nsim_set_fecparam,
 };
@@ -141,6 +167,8 @@
 	ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
 	ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
 
+	ns->ethtool.channels = ns->nsim_bus_dev->num_queues;
+
 	ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
 
 	debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 793c86d..d42eec0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -62,6 +62,7 @@
 struct nsim_ethtool {
 	u32 get_err;
 	u32 set_err;
+	u32 channels;
 	struct nsim_ethtool_pauseparam pauseparam;
 	struct ethtool_coalesce coalesce;
 	struct ethtool_ringparam ring;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index bdac087..719860a 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1420,6 +1420,19 @@
 	.get_sset_count = at803x_get_sset_count,
 	.get_strings = at803x_get_strings,
 	.get_stats = at803x_get_stats,
+}, {
+	/* QCA8327 */
+	.phy_id = QCA8327_PHY_ID,
+	.phy_id_mask = QCA8K_PHY_ID_MASK,
+	.name = "QCA PHY 8327",
+	/* PHY_GBIT_FEATURES */
+	.probe = at803x_probe,
+	.flags = PHY_IS_INTERNAL,
+	.config_init = qca83xx_config_init,
+	.soft_reset = genphy_soft_reset,
+	.get_sset_count = at803x_get_sset_count,
+	.get_strings = at803x_get_strings,
+	.get_stats = at803x_get_stats,
 }, };
 
 module_phy_driver(at803x_driver);
@@ -1430,6 +1443,8 @@
 	{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
 	{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
 	{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+	{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+	{ PHY_ID_MATCH_EXACT(QCA8327_PHY_ID) },
 	{ }
 };
 
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 051c43a..f78670b 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -335,7 +335,6 @@
 		return r;
 	}
 
-	dev_dbg(dev, "I2C driver loaded\n");
 	return 0;
 }
 
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 8edf761..00689e1 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -26,10 +26,8 @@
 	pr_info("Probing NFC microread\n");
 
 	phy = nfc_mei_phy_alloc(cldev);
-	if (!phy) {
-		pr_err("Cannot allocate memory for microread mei phy.\n");
+	if (!phy)
 		return -ENOMEM;
-	}
 
 	r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
 			    MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index edac56b..e83f655 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -76,10 +76,8 @@
 	struct nci_data_hdr *hdr;
 
 	skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL);
-	if (!skb) {
-		pr_err("no memory for data\n");
+	if (!skb)
 		return NULL;
-	}
 
 	hdr = skb_put(skb, NCI_DATA_HDR_SIZE);
 	hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL;
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index e6bf8cf..f5610b6 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -128,7 +128,6 @@
 static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
 {
 	struct pn533_i2c_phy *phy = data;
-	struct i2c_client *client;
 	struct sk_buff *skb = NULL;
 	int r;
 
@@ -137,9 +136,6 @@
 		return IRQ_NONE;
 	}
 
-	client = phy->i2c_dev;
-	dev_dbg(&client->dev, "IRQ\n");
-
 	if (phy->hard_fault != 0)
 		return IRQ_HANDLED;
 
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 2f3f3fe..da18033 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1235,8 +1235,6 @@
 {
 	struct pn533 *dev = from_timer(dev, t, listen_timer);
 
-	dev_dbg(dev->dev, "Listen mode timeout\n");
-
 	dev->cancel_listen = 1;
 
 	pn533_poll_next_mod(dev);
@@ -2173,7 +2171,7 @@
 	}
 
 	if (skb == NULL) {
-		pr_err("NULL Frame -> link is dead\n");
+		dev_err(dev->dev, "NULL Frame -> link is dead\n");
 		goto sched_wq;
 	}
 
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 5c10aac..c493f2d 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -22,13 +22,9 @@
 	struct nfc_mei_phy *phy;
 	int r;
 
-	pr_info("Probing NFC pn544\n");
-
 	phy = nfc_mei_phy_alloc(cldev);
-	if (!phy) {
-		pr_err("Cannot allocate memory for pn544 mei phy.\n");
+	if (!phy)
 		return -ENOMEM;
-	}
 
 	r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
 			    MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
@@ -46,8 +42,6 @@
 {
 	struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
 
-	pr_info("Removing pn544\n");
-
 	pn544_hci_remove(phy->hdev);
 
 	nfc_mei_phy_free(phy);
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
index ac524cf..1c52101 100644
--- a/drivers/ptp/idt8a340_reg.h
+++ b/drivers/ptp/idt8a340_reg.h
@@ -5,7 +5,7 @@
  * https://github.com/richardcochran/regen
  *
  * Hand modified to include some HW registers.
- * Based on 4.8.0, SCSR rev C commit a03c7ae5
+ * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
  */
 #ifndef HAVE_IDT8A340_REG
 #define HAVE_IDT8A340_REG
@@ -100,6 +100,7 @@
 
 #define RESET_CTRL                        0xc000
 #define SM_RESET                          0x0012
+#define SM_RESET_V520                     0x0013
 #define SM_RESET_CMD                      0x5A
 
 #define GENERAL_STATUS                    0xc014
@@ -130,6 +131,8 @@
 #define GPIO_USER_CONTROL                 0xc160
 #define GPIO0_TO_7_OUT                    0x0000
 #define GPIO8_TO_15_OUT                   0x0001
+#define GPIO0_TO_7_OUT_V520               0x0002
+#define GPIO8_TO_15_OUT_V520              0x0003
 
 #define STICKY_STATUS_CLEAR               0xc164
 
@@ -216,22 +219,27 @@
 #define DPLL_REF_MODE                     0x0035
 #define DPLL_PHASE_MEASUREMENT_CFG        0x0036
 #define DPLL_MODE                         0x0037
+#define DPLL_MODE_V520                    0x003B
 
 #define DPLL_1                            0xc400
 
 #define DPLL_2                            0xc438
+#define DPLL_2_V520                       0xc43c
 
 #define DPLL_3                            0xc480
 
 #define DPLL_4                            0xc4b8
+#define DPLL_4_V520                       0xc4bc
 
 #define DPLL_5                            0xc500
 
 #define DPLL_6                            0xc538
+#define DPLL_6_V520                       0xc53c
 
 #define DPLL_7                            0xc580
 
 #define SYS_DPLL                          0xc5b8
+#define SYS_DPLL_V520                     0xc5bc
 
 #define DPLL_CTRL_0                       0xc600
 #define DPLL_CTRL_DPLL_MANU_REF_CFG       0x0001
@@ -331,6 +339,7 @@
 #define GPIO_ALERT_OUT_CFG                0x000e
 #define GPIO_TOD_NOTIFICATION_CFG         0x000f
 #define GPIO_CTRL                         0x0010
+#define GPIO_CTRL_V520                    0x0011
 
 #define GPIO_1                            0xc8d4
 
@@ -365,6 +374,7 @@
 #define OUT_DIV_MUX                       0xca12
 
 #define OUTPUT_0                          0xca14
+#define OUTPUT_0_V520                     0xca20
 /* FOD frequency output divider value */
 #define OUT_DIV                           0x0000
 #define OUT_DUTY_CYCLE_HIGH               0x0004
@@ -374,28 +384,40 @@
 #define OUT_PHASE_ADJ                     0x000c
 
 #define OUTPUT_1                          0xca24
+#define OUTPUT_1_V520                     0xca30
 
 #define OUTPUT_2                          0xca34
+#define OUTPUT_2_V520                     0xca40
 
 #define OUTPUT_3                          0xca44
+#define OUTPUT_3_V520                     0xca50
 
 #define OUTPUT_4                          0xca54
+#define OUTPUT_4_V520                     0xca60
 
 #define OUTPUT_5                          0xca64
+#define OUTPUT_5_V520                     0xca80
 
 #define OUTPUT_6                          0xca80
+#define OUTPUT_6_V520                     0xca90
 
 #define OUTPUT_7                          0xca90
+#define OUTPUT_7_V520                     0xcaa0
 
 #define OUTPUT_8                          0xcaa0
+#define OUTPUT_8_V520                     0xcab0
 
 #define OUTPUT_9                          0xcab0
+#define OUTPUT_9_V520                     0xcac0
 
 #define OUTPUT_10                         0xcac0
+#define OUTPUT_10_V520                     0xcad0
 
 #define OUTPUT_11                         0xcad0
+#define OUTPUT_11_V520                    0xcae0
 
 #define SERIAL                            0xcae0
+#define SERIAL_V520                       0xcaf0
 
 #define PWM_ENCODER_0                     0xcb00
 
@@ -416,50 +438,72 @@
 #define PWM_DECODER_0                     0xcb40
 
 #define PWM_DECODER_1                     0xcb48
+#define PWM_DECODER_1_V520                0xcb4a
 
 #define PWM_DECODER_2                     0xcb50
+#define PWM_DECODER_2_V520                0xcb54
 
 #define PWM_DECODER_3                     0xcb58
+#define PWM_DECODER_3_V520                0xcb5e
 
 #define PWM_DECODER_4                     0xcb60
+#define PWM_DECODER_4_V520                0xcb68
 
 #define PWM_DECODER_5                     0xcb68
+#define PWM_DECODER_5_V520                0xcb80
 
 #define PWM_DECODER_6                     0xcb70
+#define PWM_DECODER_6_V520                0xcb8a
 
 #define PWM_DECODER_7                     0xcb80
+#define PWM_DECODER_7_V520                0xcb94
 
 #define PWM_DECODER_8                     0xcb88
+#define PWM_DECODER_8_V520                0xcb9e
 
 #define PWM_DECODER_9                     0xcb90
+#define PWM_DECODER_9_V520                0xcba8
 
 #define PWM_DECODER_10                    0xcb98
+#define PWM_DECODER_10_V520               0xcbb2
 
 #define PWM_DECODER_11                    0xcba0
+#define PWM_DECODER_11_V520               0xcbbc
 
 #define PWM_DECODER_12                    0xcba8
+#define PWM_DECODER_12_V520               0xcbc6
 
 #define PWM_DECODER_13                    0xcbb0
+#define PWM_DECODER_13_V520               0xcbd0
 
 #define PWM_DECODER_14                    0xcbb8
+#define PWM_DECODER_14_V520               0xcbda
 
 #define PWM_DECODER_15                    0xcbc0
+#define PWM_DECODER_15_V520               0xcbe4
 
 #define PWM_USER_DATA                     0xcbc8
+#define PWM_USER_DATA_V520                0xcbf0
 
 #define TOD_0                             0xcbcc
+#define TOD_0_V520                        0xcc00
 
 /* Enable TOD counter, output channel sync and even-PPS mode */
 #define TOD_CFG                           0x0000
+#define TOD_CFG_V520                      0x0001
 
 #define TOD_1                             0xcbce
+#define TOD_1_V520                        0xcc02
 
 #define TOD_2                             0xcbd0
+#define TOD_2_V520                        0xcc04
 
 #define TOD_3                             0xcbd2
+#define TOD_3_V520                        0xcc06
 
 
 #define TOD_WRITE_0                       0xcc00
+#define TOD_WRITE_0_V520                  0xcc10
 /* 8-bit subns, 32-bit ns, 48-bit seconds */
 #define TOD_WRITE                         0x0000
 /* Counter increments after TOD write is completed */
@@ -470,12 +514,16 @@
 #define TOD_WRITE_CMD                     0x000f
 
 #define TOD_WRITE_1                       0xcc10
+#define TOD_WRITE_1_V520                  0xcc20
 
 #define TOD_WRITE_2                       0xcc20
+#define TOD_WRITE_2_V520                  0xcc30
 
 #define TOD_WRITE_3                       0xcc30
+#define TOD_WRITE_3_V520                  0xcc40
 
 #define TOD_READ_PRIMARY_0                0xcc40
+#define TOD_READ_PRIMARY_0_V520           0xcc50
 /* 8-bit subns, 32-bit ns, 48-bit seconds */
 #define TOD_READ_PRIMARY                  0x0000
 /* Counter increments after TOD write is completed */
@@ -484,22 +532,31 @@
 #define TOD_READ_PRIMARY_SEL_CFG_0        0x000c
 /* Read trigger selection */
 #define TOD_READ_PRIMARY_CMD              0x000e
+#define TOD_READ_PRIMARY_CMD_V520         0x000f
 
 #define TOD_READ_PRIMARY_1                0xcc50
+#define TOD_READ_PRIMARY_1_V520           0xcc60
 
 #define TOD_READ_PRIMARY_2                0xcc60
+#define TOD_READ_PRIMARY_2_V520           0xcc80
 
 #define TOD_READ_PRIMARY_3                0xcc80
+#define TOD_READ_PRIMARY_3_V520           0xcc90
 
 #define TOD_READ_SECONDARY_0              0xcc90
+#define TOD_READ_SECONDARY_0_V520         0xcca0
 
 #define TOD_READ_SECONDARY_1              0xcca0
+#define TOD_READ_SECONDARY_1_V520         0xccb0
 
 #define TOD_READ_SECONDARY_2              0xccb0
+#define TOD_READ_SECONDARY_2_V520         0xccc0
 
 #define TOD_READ_SECONDARY_3              0xccc0
+#define TOD_READ_SECONDARY_3_V520         0xccd0
 
 #define OUTPUT_TDC_CFG                    0xccd0
+#define OUTPUT_TDC_CFG_V520               0xcce0
 
 #define OUTPUT_TDC_0                      0xcd00
 
@@ -512,8 +569,10 @@
 #define INPUT_TDC                         0xcd20
 
 #define SCRATCH                           0xcf50
+#define SCRATCH_V520                      0xcf4c
 
 #define EEPROM                            0xcf68
+#define EEPROM_V520                       0xcf64
 
 #define OTP                               0xcf70
 
@@ -576,6 +635,10 @@
 #define STATE_MODE_SHIFT                  (0)
 #define STATE_MODE_MASK                   (0x7)
 
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT            (0)
+#define MANUAL_REFERENCE_MASK             (0x1f)
+
 /* Bit definitions for the GPIO_CFG_GBL register */
 #define SUPPLY_MODE_SHIFT                 (0)
 #define SUPPLY_MODE_MASK                  (0x3)
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index fa63695..1a2e3c2 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -33,15 +33,23 @@
 
 #define SETTIME_CORRECTION (0)
 
-static int contains_full_configuration(const struct firmware *fw)
+static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm);
+
+static int contains_full_configuration(struct idtcm *idtcm,
+				       const struct firmware *fw)
 {
-	s32 full_count = FULL_FW_CFG_BYTES - FULL_FW_CFG_SKIPPED_BYTES;
 	struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data;
+	u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
+	s32 full_count;
 	s32 count = 0;
 	u16 regaddr;
 	u8 loaddr;
 	s32 len;
 
+	/* 4 bytes skipped every 0x80 */
+	full_count = (scratch - GPIO_USER_CONTROL) -
+		     ((scratch >> 7) - (GPIO_USER_CONTROL >> 7)) * 4;
+
 	/* If the firmware contains 'full configuration' SM_RESET can be used
 	 * to ensure proper configuration.
 	 *
@@ -57,7 +65,7 @@
 		rec++;
 
 		/* Top (status registers) and bottom are read-only */
-		if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+		if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
 			continue;
 
 		/* Page size 128, last 4 bytes of page skipped */
@@ -152,6 +160,19 @@
 	return 0;
 }
 
+static enum fw_version idtcm_fw_version(const char *version)
+{
+	enum fw_version ver = V_DEFAULT;
+
+	if (idtcm_strverscmp(version, "4.8.7") >= 0)
+		ver = V487;
+
+	if (idtcm_strverscmp(version, "5.2.0") >= 0)
+		ver = V520;
+
+	return ver;
+}
+
 static int idtcm_xfer_read(struct idtcm *idtcm,
 			   u8 regaddr,
 			   u8 *buf,
@@ -388,13 +409,14 @@
 			  struct timespec64 *ts)
 {
 	struct idtcm *idtcm = channel->idtcm;
+	u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
 	u8 buf[TOD_BYTE_COUNT];
 	u8 timeout = 10;
 	u8 trigger;
 	int err;
 
 	err = idtcm_read(idtcm, channel->tod_read_primary,
-			 TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+			 tod_read_cmd, &trigger, sizeof(trigger));
 	if (err)
 		return err;
 
@@ -403,7 +425,7 @@
 	trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */
 
 	err = idtcm_write(idtcm, channel->tod_read_primary,
-			  TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
+			  tod_read_cmd, &trigger, sizeof(trigger));
 	if (err)
 		return err;
 
@@ -413,7 +435,7 @@
 			idtcm->start_time = ktime_get_raw();
 
 		err = idtcm_read(idtcm, channel->tod_read_primary,
-				 TOD_READ_PRIMARY_CMD, &trigger,
+				 tod_read_cmd, &trigger,
 				 sizeof(trigger));
 		if (err)
 			return err;
@@ -559,35 +581,10 @@
 	return err;
 }
 
-static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src)
-{
-	int err = 0;
-
-	switch (tod_addr) {
-	case TOD_0:
-		*sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
-		break;
-	case TOD_1:
-		*sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
-		break;
-	case TOD_2:
-		*sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
-		break;
-	case TOD_3:
-		*sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
-		break;
-	default:
-		err = -EINVAL;
-	}
-
-	return err;
-}
-
 static int idtcm_sync_pps_output(struct idtcm_channel *channel)
 {
 	struct idtcm *idtcm = channel->idtcm;
 	u8 pll;
-	u8 sync_src;
 	u8 qn;
 	u8 qn_plus_1;
 	int err = 0;
@@ -596,10 +593,6 @@
 	u8 temp;
 	u16 output_mask = channel->output_mask;
 
-	err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src);
-	if (err)
-		return err;
-
 	err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
 			 &temp, sizeof(temp));
 	if (err)
@@ -655,8 +648,8 @@
 		}
 
 		if (qn != 0 || qn_plus_1 != 0)
-			err = _sync_pll_output(idtcm, pll, sync_src, qn,
-					       qn_plus_1);
+			err = _sync_pll_output(idtcm, pll, channel->sync_src,
+					       qn, qn_plus_1);
 
 		if (err)
 			return err;
@@ -666,8 +659,8 @@
 }
 
 static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
-			       struct timespec64 const *ts,
-			       enum hw_tod_write_trig_sel wr_trig)
+				  struct timespec64 const *ts,
+				  enum hw_tod_write_trig_sel wr_trig)
 {
 	struct idtcm *idtcm = channel->idtcm;
 	u8 buf[TOD_BYTE_COUNT];
@@ -793,46 +786,46 @@
 	return 0;
 }
 
-static int get_output_base_addr(u8 outn)
+static int get_output_base_addr(enum fw_version ver, u8 outn)
 {
 	int base;
 
 	switch (outn) {
 	case 0:
-		base = OUTPUT_0;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_0);
 		break;
 	case 1:
-		base = OUTPUT_1;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_1);
 		break;
 	case 2:
-		base = OUTPUT_2;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_2);
 		break;
 	case 3:
-		base = OUTPUT_3;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_3);
 		break;
 	case 4:
-		base = OUTPUT_4;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_4);
 		break;
 	case 5:
-		base = OUTPUT_5;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_5);
 		break;
 	case 6:
-		base = OUTPUT_6;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_6);
 		break;
 	case 7:
-		base = OUTPUT_7;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_7);
 		break;
 	case 8:
-		base = OUTPUT_8;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_8);
 		break;
 	case 9:
-		base = OUTPUT_9;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_9);
 		break;
 	case 10:
-		base = OUTPUT_10;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_10);
 		break;
 	case 11:
-		base = OUTPUT_11;
+		base = IDTCM_FW_REG(ver, V520, OUTPUT_11);
 		break;
 	default:
 		base = -EINVAL;
@@ -929,9 +922,9 @@
 	return err;
 }
 
-static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
-				  s32 offset_ns,
-				  u32 max_ffo_ppb)
+static int do_phase_pull_in_fw(struct idtcm_channel *channel,
+			       s32 offset_ns,
+			       u32 max_ffo_ppb)
 {
 	int err;
 
@@ -1000,7 +993,7 @@
 	s64 now;
 
 	if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) {
-		err = idtcm_do_phase_pull_in(channel, delta, 0);
+		err = channel->do_phase_pull_in(channel, delta, 0);
 	} else {
 		idtcm->calculate_overhead_flag = 1;
 
@@ -1032,7 +1025,9 @@
 
 	clear_boot_status(idtcm);
 
-	err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+	err = idtcm_write(idtcm, RESET_CTRL,
+			  IDTCM_FW_REG(idtcm->fw_ver, V520, SM_RESET),
+			  &byte, sizeof(byte));
 
 	if (!err) {
 		for (i = 0; i < 30; i++) {
@@ -1214,6 +1209,7 @@
 static int idtcm_load_firmware(struct idtcm *idtcm,
 			       struct device *dev)
 {
+	u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
 	char fname[128] = FW_FILENAME;
 	const struct firmware *fw;
 	struct idtcm_fwrc *rec;
@@ -1226,7 +1222,7 @@
 	if (firmware) /* module parameter */
 		snprintf(fname, sizeof(fname), "%s", firmware);
 
-	dev_dbg(&idtcm->client->dev, "requesting firmware '%s'", fname);
+	dev_info(&idtcm->client->dev, "firmware '%s'", fname);
 
 	err = request_firmware(&fw, fname, dev);
 	if (err) {
@@ -1239,7 +1235,7 @@
 
 	rec = (struct idtcm_fwrc *) fw->data;
 
-	if (contains_full_configuration(fw))
+	if (contains_full_configuration(idtcm, fw))
 		idtcm_state_machine_reset(idtcm);
 
 	for (len = fw->size; len > 0; len -= sizeof(*rec)) {
@@ -1263,7 +1259,7 @@
 			err = 0;
 
 			/* Top (status registers) and bottom are read-only */
-			if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+			if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
 				continue;
 
 			/* Page size 128, last 4 bytes of page skipped */
@@ -1292,7 +1288,7 @@
 	int err;
 	u8 val;
 
-	base = get_output_base_addr(outn);
+	base = get_output_base_addr(idtcm->fw_ver, outn);
 
 	if (!(base > 0)) {
 		dev_err(&idtcm->client->dev,
@@ -1360,53 +1356,331 @@
 }
 
 static int idtcm_get_pll_mode(struct idtcm_channel *channel,
-			      enum pll_mode *pll_mode)
+			      enum pll_mode *mode)
 {
 	struct idtcm *idtcm = channel->idtcm;
 	int err;
 	u8 dpll_mode;
 
-	err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+	err = idtcm_read(idtcm, channel->dpll_n,
+			 IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
 			 &dpll_mode, sizeof(dpll_mode));
 	if (err)
 		return err;
 
-	*pll_mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+	*mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
 
 	return 0;
 }
 
 static int idtcm_set_pll_mode(struct idtcm_channel *channel,
-			      enum pll_mode pll_mode)
+			      enum pll_mode mode)
 {
 	struct idtcm *idtcm = channel->idtcm;
 	int err;
 	u8 dpll_mode;
 
-	err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+	err = idtcm_read(idtcm, channel->dpll_n,
+			 IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
 			 &dpll_mode, sizeof(dpll_mode));
 	if (err)
 		return err;
 
 	dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
 
-	dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
+	dpll_mode |= (mode << PLL_MODE_SHIFT);
 
-	channel->pll_mode = pll_mode;
-
-	err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+	err = idtcm_write(idtcm, channel->dpll_n,
+			  IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
 			  &dpll_mode, sizeof(dpll_mode));
+	return err;
+}
+
+static int idtcm_get_manual_reference(struct idtcm_channel *channel,
+				      enum manual_reference *ref)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	u8 dpll_manu_ref_cfg;
+	int err;
+
+	err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+			 DPLL_CTRL_DPLL_MANU_REF_CFG,
+			 &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
 	if (err)
 		return err;
 
+	dpll_manu_ref_cfg &= (MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
+
+	*ref = dpll_manu_ref_cfg >> MANUAL_REFERENCE_SHIFT;
+
 	return 0;
 }
 
+static int idtcm_set_manual_reference(struct idtcm_channel *channel,
+				      enum manual_reference ref)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	u8 dpll_manu_ref_cfg;
+	int err;
+
+	err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+			 DPLL_CTRL_DPLL_MANU_REF_CFG,
+			 &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
+	if (err)
+		return err;
+
+	dpll_manu_ref_cfg &= ~(MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
+
+	dpll_manu_ref_cfg |= (ref << MANUAL_REFERENCE_SHIFT);
+
+	err = idtcm_write(idtcm, channel->dpll_ctrl_n,
+			  DPLL_CTRL_DPLL_MANU_REF_CFG,
+			  &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
+
+	return err;
+}
+
+static int configure_dpll_mode_write_frequency(struct idtcm_channel *channel)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	int err;
+
+	err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+
+	if (err)
+		dev_err(&idtcm->client->dev, "Failed to set pll mode to write frequency");
+	else
+		channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+
+	return err;
+}
+
+static int configure_dpll_mode_write_phase(struct idtcm_channel *channel)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	int err;
+
+	err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+
+	if (err)
+		dev_err(&idtcm->client->dev, "Failed to set pll mode to write phase");
+	else
+		channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+
+	return err;
+}
+
+static int configure_manual_reference_write_frequency(struct idtcm_channel *channel)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	int err;
+
+	err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_FREQUENCY);
+
+	if (err)
+		dev_err(&idtcm->client->dev, "Failed to set manual reference to write frequency");
+	else
+		channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+
+	return err;
+}
+
+static int configure_manual_reference_write_phase(struct idtcm_channel *channel)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	int err;
+
+	err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_PHASE);
+
+	if (err)
+		dev_err(&idtcm->client->dev, "Failed to set manual reference to write phase");
+	else
+		channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+
+	return err;
+}
+
+static int idtcm_stop_phase_pull_in(struct idtcm_channel *channel)
+{
+	int err;
+
+	err = _idtcm_adjfine(channel, channel->current_freq_scaled_ppm);
+	if (err)
+		return err;
+
+	channel->phase_pull_in = false;
+
+	return 0;
+}
+
+static long idtcm_work_handler(struct ptp_clock_info *ptp)
+{
+	struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
+	struct idtcm *idtcm = channel->idtcm;
+
+	mutex_lock(&idtcm->reg_lock);
+
+	(void)idtcm_stop_phase_pull_in(channel);
+
+	mutex_unlock(&idtcm->reg_lock);
+
+	/* Return a negative value here to not reschedule */
+	return -1;
+}
+
+static s32 phase_pull_in_scaled_ppm(s32 current_ppm, s32 phase_pull_in_ppb)
+{
+	/* ppb = scaled_ppm * 125 / 2^13 */
+	/* scaled_ppm = ppb * 2^13 / 125 */
+
+	s64 max_scaled_ppm = (PHASE_PULL_IN_MAX_PPB << 13) / 125;
+	s64 scaled_ppm = (phase_pull_in_ppb << 13) / 125;
+
+	current_ppm += scaled_ppm;
+
+	if (current_ppm > max_scaled_ppm)
+		current_ppm = max_scaled_ppm;
+	else if (current_ppm < -max_scaled_ppm)
+		current_ppm = -max_scaled_ppm;
+
+	return current_ppm;
+}
+
+static int do_phase_pull_in_sw(struct idtcm_channel *channel,
+			       s32 delta_ns,
+			       u32 max_ffo_ppb)
+{
+	s32 current_ppm = channel->current_freq_scaled_ppm;
+	u32 duration_ms = MSEC_PER_SEC;
+	s32 delta_ppm;
+	s32 ppb;
+	int err;
+
+	/* If the ToD correction is less than PHASE_PULL_IN_MIN_THRESHOLD_NS,
+	 * skip. The error introduced by the ToD adjustment procedure would
+	 * be bigger than the required ToD correction
+	 */
+	if (abs(delta_ns) < PHASE_PULL_IN_MIN_THRESHOLD_NS)
+		return 0;
+
+	if (max_ffo_ppb == 0)
+		max_ffo_ppb = PHASE_PULL_IN_MAX_PPB;
+
+	/* For most cases, keep phase pull-in duration 1 second */
+	ppb = delta_ns;
+	while (abs(ppb) > max_ffo_ppb) {
+		duration_ms *= 2;
+		ppb /= 2;
+	}
+
+	delta_ppm = phase_pull_in_scaled_ppm(current_ppm, ppb);
+
+	err = _idtcm_adjfine(channel, delta_ppm);
+
+	if (err)
+		return err;
+
+	/* schedule the worker to cancel phase pull-in */
+	ptp_schedule_worker(channel->ptp_clock,
+			    msecs_to_jiffies(duration_ms) - 1);
+
+	channel->phase_pull_in = true;
+
+	return 0;
+}
+
+static int initialize_operating_mode_with_manual_reference(struct idtcm_channel *channel,
+							   enum manual_reference ref)
+{
+	struct idtcm *idtcm = channel->idtcm;
+
+	channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+	channel->configure_write_frequency = configure_manual_reference_write_frequency;
+	channel->configure_write_phase = configure_manual_reference_write_phase;
+	channel->do_phase_pull_in = do_phase_pull_in_sw;
+
+	switch (ref) {
+	case MANU_REF_WRITE_PHASE:
+		channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+		break;
+	case MANU_REF_WRITE_FREQUENCY:
+		channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+		break;
+	default:
+		dev_warn(&idtcm->client->dev,
+			 "Unsupported MANUAL_REFERENCE: 0x%02x", ref);
+	}
+
+	return 0;
+}
+
+static int initialize_operating_mode_with_pll_mode(struct idtcm_channel *channel,
+						   enum pll_mode mode)
+{
+	struct idtcm *idtcm = channel->idtcm;
+	int err = 0;
+
+	channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+	channel->configure_write_frequency = configure_dpll_mode_write_frequency;
+	channel->configure_write_phase = configure_dpll_mode_write_phase;
+	channel->do_phase_pull_in = do_phase_pull_in_fw;
+
+	switch (mode) {
+	case  PLL_MODE_WRITE_PHASE:
+		channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+		break;
+	case PLL_MODE_WRITE_FREQUENCY:
+		channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+		break;
+	default:
+		dev_err(&idtcm->client->dev,
+			"Unsupported PLL_MODE: 0x%02x", mode);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+static int initialize_dco_operating_mode(struct idtcm_channel *channel)
+{
+	enum manual_reference ref = MANU_REF_XO_DPLL;
+	enum pll_mode mode = PLL_MODE_DISABLED;
+	struct idtcm *idtcm = channel->idtcm;
+	int err;
+
+	channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+
+	err = idtcm_get_pll_mode(channel, &mode);
+	if (err) {
+		dev_err(&idtcm->client->dev, "Unable to read pll mode!");
+		return err;
+	}
+
+	if (mode == PLL_MODE_PLL) {
+		err = idtcm_get_manual_reference(channel, &ref);
+		if (err) {
+			dev_err(&idtcm->client->dev, "Unable to read manual reference!");
+			return err;
+		}
+		err = initialize_operating_mode_with_manual_reference(channel, ref);
+	} else {
+		err = initialize_operating_mode_with_pll_mode(channel, mode);
+	}
+
+	if (channel->mode == PTP_PLL_MODE_WRITE_PHASE)
+		channel->configure_write_frequency(channel);
+
+	return err;
+}
+
 /* PTP Hardware Clock interface */
 
-/*
+/**
  * Maximum absolute value for write phase offset in picoseconds
  *
+ * @channel:  channel
+ * @delta_ns: delta in nanoseconds
+ *
  * Destination signed register is 32-bit register in resolution of 50ps
  *
  * 0x7fffffff * 50 =  2147483647 * 50 = 107374182350
@@ -1420,8 +1694,8 @@
 	s32 phase_50ps;
 	s64 offset_ps;
 
-	if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
-		err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+	if (channel->mode != PTP_PLL_MODE_WRITE_PHASE) {
+		err = channel->configure_write_phase(channel);
 		if (err)
 			return err;
 	}
@@ -1459,8 +1733,8 @@
 	u8 buf[6] = {0};
 	s64 fcw;
 
-	if (channel->pll_mode  != PLL_MODE_WRITE_FREQUENCY) {
-		err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+	if (channel->mode  != PTP_PLL_MODE_WRITE_FREQUENCY) {
+		err = channel->configure_write_frequency(channel);
 		if (err)
 			return err;
 	}
@@ -1577,29 +1851,29 @@
 	enum scsr_tod_write_type_sel type;
 	int err;
 
-	if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
-		err = idtcm_do_phase_pull_in(channel, delta, 0);
-		if (err)
-			dev_err(&idtcm->client->dev,
-				"Failed at line %d in %s!", __LINE__, __func__);
-		return err;
-	}
-
-	if (delta >= 0) {
-		ts = ns_to_timespec64(delta);
-		type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
-	} else {
-		ts = ns_to_timespec64(-delta);
-		type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
-	}
+	if (channel->phase_pull_in == true)
+		return 0;
 
 	mutex_lock(&idtcm->reg_lock);
 
-	err = _idtcm_settime(channel, &ts, type);
-	if (err)
-		dev_err(&idtcm->client->dev,
-			"Failed at line %d in %s!", __LINE__, __func__);
-
+	if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+		err = channel->do_phase_pull_in(channel, delta, 0);
+		if (err)
+			dev_err(&idtcm->client->dev,
+				"Failed at line %d in %s!", __LINE__, __func__);
+	} else {
+		if (delta >= 0) {
+			ts = ns_to_timespec64(delta);
+			type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+		} else {
+			ts = ns_to_timespec64(-delta);
+			type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+		}
+		err = _idtcm_settime(channel, &ts, type);
+		if (err)
+			dev_err(&idtcm->client->dev,
+				"Failed at line %d in %s!", __LINE__, __func__);
+	}
 	mutex_unlock(&idtcm->reg_lock);
 
 	return err;
@@ -1629,15 +1903,21 @@
 	struct idtcm *idtcm = channel->idtcm;
 	int err;
 
+	if (channel->phase_pull_in == true)
+		return 0;
+
+	if (scaled_ppm == channel->current_freq_scaled_ppm)
+		return 0;
+
 	mutex_lock(&idtcm->reg_lock);
 
 	err = _idtcm_adjfine(channel, scaled_ppm);
-	if (err)
-		dev_err(&idtcm->client->dev,
-			"Failed at line %d in %s!", __LINE__, __func__);
 
 	mutex_unlock(&idtcm->reg_lock);
 
+	if (!err)
+		channel->current_freq_scaled_ppm = scaled_ppm;
+
 	return err;
 }
 
@@ -1675,243 +1955,35 @@
 	return -EOPNOTSUPP;
 }
 
-static int _enable_pll_tod_sync(struct idtcm *idtcm,
-				u8 pll,
-				u8 sync_src,
-				u8 qn,
-				u8 qn_plus_1)
-{
-	int err;
-	u8 val;
-	u16 dpll;
-	u16 out0 = 0, out1 = 0;
-
-	if (qn == 0 && qn_plus_1 == 0)
-		return 0;
-
-	switch (pll) {
-	case 0:
-		dpll = DPLL_0;
-		if (qn)
-			out0 = OUTPUT_0;
-		if (qn_plus_1)
-			out1 = OUTPUT_1;
-		break;
-	case 1:
-		dpll = DPLL_1;
-		if (qn)
-			out0 = OUTPUT_2;
-		if (qn_plus_1)
-			out1 = OUTPUT_3;
-		break;
-	case 2:
-		dpll = DPLL_2;
-		if (qn)
-			out0 = OUTPUT_4;
-		if (qn_plus_1)
-			out1 = OUTPUT_5;
-		break;
-	case 3:
-		dpll = DPLL_3;
-		if (qn)
-			out0 = OUTPUT_6;
-		if (qn_plus_1)
-			out1 = OUTPUT_7;
-		break;
-	case 4:
-		dpll = DPLL_4;
-		if (qn)
-			out0 = OUTPUT_8;
-		break;
-	case 5:
-		dpll = DPLL_5;
-		if (qn)
-			out0 = OUTPUT_9;
-		if (qn_plus_1)
-			out1 = OUTPUT_8;
-		break;
-	case 6:
-		dpll = DPLL_6;
-		if (qn)
-			out0 = OUTPUT_10;
-		if (qn_plus_1)
-			out1 = OUTPUT_11;
-		break;
-	case 7:
-		dpll = DPLL_7;
-		if (qn)
-			out0 = OUTPUT_11;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/*
-	 * Enable OUTPUT OUT_SYNC.
-	 */
-	if (out0) {
-		err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
-		if (err)
-			return err;
-
-		val &= ~OUT_SYNC_DISABLE;
-
-		err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
-		if (err)
-			return err;
-	}
-
-	if (out1) {
-		err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
-		if (err)
-			return err;
-
-		val &= ~OUT_SYNC_DISABLE;
-
-		err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
-		if (err)
-			return err;
-	}
-
-	/* enable dpll sync tod pps, must be set before dpll_mode */
-	err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
-	if (err)
-		return err;
-
-	val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT);
-	val |= (sync_src << TOD_SYNC_SOURCE_SHIFT);
-	val |= TOD_SYNC_EN;
-
-	return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
-}
-
-static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
-{
-	struct idtcm *idtcm = channel->idtcm;
-	u8 pll;
-	u8 sync_src;
-	u8 qn;
-	u8 qn_plus_1;
-	u8 cfg;
-	int err = 0;
-	u16 output_mask = channel->output_mask;
-	u8 out8_mux = 0;
-	u8 out11_mux = 0;
-	u8 temp;
-
-	/*
-	 * set tod_out_sync_enable to 0.
-	 */
-	err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
-	if (err)
-		return err;
-
-	cfg &= ~TOD_OUT_SYNC_ENABLE;
-
-	err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
-	if (err)
-		return err;
-
-	switch (channel->tod_n) {
-	case TOD_0:
-		sync_src = 0;
-		break;
-	case TOD_1:
-		sync_src = 1;
-		break;
-	case TOD_2:
-		sync_src = 2;
-		break;
-	case TOD_3:
-		sync_src = 3;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, &temp, sizeof(temp));
-	if (err)
-		return err;
-
-	if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
-	    Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
-		out8_mux = 1;
-
-	err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, &temp, sizeof(temp));
-	if (err)
-		return err;
-
-	if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
-	    Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
-		out11_mux = 1;
-
-	for (pll = 0; pll < 8; pll++) {
-		qn = 0;
-		qn_plus_1 = 0;
-
-		if (pll < 4) {
-			/* First 4 pll has 2 outputs */
-			qn = output_mask & 0x1;
-			output_mask = output_mask >> 1;
-			qn_plus_1 = output_mask & 0x1;
-			output_mask = output_mask >> 1;
-		} else if (pll == 4) {
-			if (out8_mux == 0) {
-				qn = output_mask & 0x1;
-				output_mask = output_mask >> 1;
-			}
-		} else if (pll == 5) {
-			if (out8_mux) {
-				qn_plus_1 = output_mask & 0x1;
-				output_mask = output_mask >> 1;
-			}
-			qn = output_mask & 0x1;
-			output_mask = output_mask >> 1;
-		} else if (pll == 6) {
-			qn = output_mask & 0x1;
-			output_mask = output_mask >> 1;
-			if (out11_mux) {
-				qn_plus_1 = output_mask & 0x1;
-				output_mask = output_mask >> 1;
-			}
-		} else if (pll == 7) {
-			if (out11_mux == 0) {
-				qn = output_mask & 0x1;
-				output_mask = output_mask >> 1;
-			}
-		}
-
-		if (qn != 0 || qn_plus_1 != 0)
-			err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn,
-					       qn_plus_1);
-		if (err)
-			return err;
-	}
-
-	return err;
-}
-
 static int idtcm_enable_tod(struct idtcm_channel *channel)
 {
 	struct idtcm *idtcm = channel->idtcm;
 	struct timespec64 ts = {0, 0};
+	u16 tod_cfg = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_CFG);
 	u8 cfg;
 	int err;
 
+	/* STEELAI-366 - Temporary workaround for ts2phc compatibility */
+	if (0) {
+		err = idtcm_output_mask_enable(channel, false);
+		if (err)
+			return err;
+	}
+
 	/*
 	 * Start the TOD clock ticking.
 	 */
-	err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+	err = idtcm_read(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
 	if (err)
 		return err;
 
 	cfg |= TOD_ENABLE;
 
-	err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+	err = idtcm_write(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
 	if (err)
 		return err;
 
-	if (idtcm->deprecated)
+	if (idtcm->fw_ver < V487)
 		return _idtcm_settime_deprecated(channel, &ts);
 	else
 		return _idtcm_settime(channel, &ts,
@@ -1939,10 +2011,7 @@
 	snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
 		 major, minor, hotfix);
 
-	if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
-		idtcm->deprecated = 0;
-	else
-		idtcm->deprecated = 1;
+	idtcm->fw_ver = idtcm_fw_version(idtcm->version);
 
 	dev_info(&idtcm->client->dev,
 		 "%d.%d.%d, Id: 0x%04x  HW Rev: %d  OTP Config Select: %d",
@@ -1960,6 +2029,7 @@
 	.gettime64	= &idtcm_gettime,
 	.settime64	= &idtcm_settime,
 	.enable		= &idtcm_enable,
+	.do_aux_work	= &idtcm_work_handler,
 };
 
 static const struct ptp_clock_info idtcm_caps_deprecated = {
@@ -1972,10 +2042,12 @@
 	.gettime64	= &idtcm_gettime,
 	.settime64	= &idtcm_settime_deprecated,
 	.enable		= &idtcm_enable,
+	.do_aux_work	= &idtcm_work_handler,
 };
 
 static int configure_channel_pll(struct idtcm_channel *channel)
 {
+	struct idtcm *idtcm = channel->idtcm;
 	int err = 0;
 
 	switch (channel->pll) {
@@ -1997,7 +2069,7 @@
 		break;
 	case 2:
 		channel->dpll_freq = DPLL_FREQ_2;
-		channel->dpll_n = DPLL_2;
+		channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_2);
 		channel->hw_dpll_n = HW_DPLL_2;
 		channel->dpll_phase = DPLL_PHASE_2;
 		channel->dpll_ctrl_n = DPLL_CTRL_2;
@@ -2013,7 +2085,7 @@
 		break;
 	case 4:
 		channel->dpll_freq = DPLL_FREQ_4;
-		channel->dpll_n = DPLL_4;
+		channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_4);
 		channel->hw_dpll_n = HW_DPLL_4;
 		channel->dpll_phase = DPLL_PHASE_4;
 		channel->dpll_ctrl_n = DPLL_CTRL_4;
@@ -2029,7 +2101,7 @@
 		break;
 	case 6:
 		channel->dpll_freq = DPLL_FREQ_6;
-		channel->dpll_n = DPLL_6;
+		channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_6);
 		channel->hw_dpll_n = HW_DPLL_6;
 		channel->dpll_phase = DPLL_PHASE_6;
 		channel->dpll_ctrl_n = DPLL_CTRL_6;
@@ -2052,6 +2124,7 @@
 
 static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
 {
+	enum fw_version fw_ver = idtcm->fw_ver;
 	struct idtcm_channel *channel;
 	int err;
 
@@ -2060,6 +2133,9 @@
 
 	channel = &idtcm->channel[index];
 
+	channel->idtcm = idtcm;
+	channel->current_freq_scaled_ppm = 0;
+
 	/* Set pll addresses */
 	err = configure_channel_pll(channel);
 	if (err)
@@ -2068,32 +2144,34 @@
 	/* Set tod addresses */
 	switch (index) {
 	case 0:
-		channel->tod_read_primary = TOD_READ_PRIMARY_0;
-		channel->tod_write = TOD_WRITE_0;
-		channel->tod_n = TOD_0;
+		channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0);
+		channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0);
+		channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0);
+		channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
 		break;
 	case 1:
-		channel->tod_read_primary = TOD_READ_PRIMARY_1;
-		channel->tod_write = TOD_WRITE_1;
-		channel->tod_n = TOD_1;
+		channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1);
+		channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1);
+		channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1);
+		channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
 		break;
 	case 2:
-		channel->tod_read_primary = TOD_READ_PRIMARY_2;
-		channel->tod_write = TOD_WRITE_2;
-		channel->tod_n = TOD_2;
+		channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2);
+		channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2);
+		channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2);
+		channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
 		break;
 	case 3:
-		channel->tod_read_primary = TOD_READ_PRIMARY_3;
-		channel->tod_write = TOD_WRITE_3;
-		channel->tod_n = TOD_3;
+		channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3);
+		channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3);
+		channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3);
+		channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	channel->idtcm = idtcm;
-
-	if (idtcm->deprecated)
+	if (idtcm->fw_ver < V487)
 		channel->caps = idtcm_caps_deprecated;
 	else
 		channel->caps = idtcm_caps;
@@ -2101,22 +2179,9 @@
 	snprintf(channel->caps.name, sizeof(channel->caps.name),
 		 "IDT CM TOD%u", index);
 
-	if (!idtcm->deprecated) {
-		err = idtcm_enable_tod_sync(channel);
-		if (err) {
-			dev_err(&idtcm->client->dev,
-				"Failed at line %d in %s!", __LINE__, __func__);
-			return err;
-		}
-	}
-
-	/* Sync pll mode with hardware */
-	err = idtcm_get_pll_mode(channel, &channel->pll_mode);
-	if (err) {
-		dev_err(&idtcm->client->dev,
-			"Error: %s - Unable to read pll mode", __func__);
+	err = initialize_dco_operating_mode(channel);
+	if (err)
 		return err;
-	}
 
 	err = idtcm_enable_tod(channel);
 	if (err) {
@@ -2149,7 +2214,6 @@
 
 	for (i = 0; i < MAX_TOD; i++) {
 		channel = &idtcm->channel[i];
-
 		if (channel->ptp_clock)
 			ptp_clock_unregister(channel->ptp_clock);
 	}
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index fb32327..833e590 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -57,15 +57,27 @@
 
 #define IDTCM_MAX_WRITE_COUNT		(512)
 
-#define FULL_FW_CFG_BYTES		(SCRATCH - GPIO_USER_CONTROL)
-#define FULL_FW_CFG_SKIPPED_BYTES	(((SCRATCH >> 7) \
-					  - (GPIO_USER_CONTROL >> 7)) \
-					 * 4) /* 4 bytes skipped every 0x80 */
+#define PHASE_PULL_IN_MAX_PPB		(144000)
+#define PHASE_PULL_IN_MIN_THRESHOLD_NS	(2)
+
+/*
+ * Return register address based on passed in firmware version
+ */
+#define IDTCM_FW_REG(FW, VER, REG)	(((FW) < (VER)) ? (REG) : (REG##_##VER))
+
+/* PTP PLL Mode */
+enum ptp_pll_mode {
+	PTP_PLL_MODE_MIN = 0,
+	PTP_PLL_MODE_WRITE_FREQUENCY = PTP_PLL_MODE_MIN,
+	PTP_PLL_MODE_WRITE_PHASE,
+	PTP_PLL_MODE_UNSUPPORTED,
+	PTP_PLL_MODE_MAX = PTP_PLL_MODE_UNSUPPORTED,
+};
 
 /* Values of DPLL_N.DPLL_MODE.PLL_MODE */
 enum pll_mode {
 	PLL_MODE_MIN = 0,
-	PLL_MODE_NORMAL = PLL_MODE_MIN,
+	PLL_MODE_PLL = PLL_MODE_MIN,
 	PLL_MODE_WRITE_PHASE = 1,
 	PLL_MODE_WRITE_FREQUENCY = 2,
 	PLL_MODE_GPIO_INC_DEC = 3,
@@ -75,6 +87,31 @@
 	PLL_MODE_MAX = PLL_MODE_DISABLED,
 };
 
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+	MANU_REF_MIN = 0,
+	MANU_REF_CLK0 = MANU_REF_MIN,
+	MANU_REF_CLK1,
+	MANU_REF_CLK2,
+	MANU_REF_CLK3,
+	MANU_REF_CLK4,
+	MANU_REF_CLK5,
+	MANU_REF_CLK6,
+	MANU_REF_CLK7,
+	MANU_REF_CLK8,
+	MANU_REF_CLK9,
+	MANU_REF_CLK10,
+	MANU_REF_CLK11,
+	MANU_REF_CLK12,
+	MANU_REF_CLK13,
+	MANU_REF_CLK14,
+	MANU_REF_CLK15,
+	MANU_REF_WRITE_PHASE,
+	MANU_REF_WRITE_FREQUENCY,
+	MANU_REF_XO_DPLL,
+	MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
 enum hw_tod_write_trig_sel {
 	HW_TOD_WR_TRIG_SEL_MIN = 0,
 	HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
@@ -119,6 +156,12 @@
 	DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
 };
 
+enum fw_version {
+	V_DEFAULT = 0,
+	V487 = 1,
+	V520 = 2,
+};
+
 struct idtcm;
 
 struct idtcm_channel {
@@ -134,7 +177,14 @@
 	u16			tod_write;
 	u16			tod_n;
 	u16			hw_dpll_n;
-	enum pll_mode		pll_mode;
+	u8			sync_src;
+	enum ptp_pll_mode	mode;
+	int			(*configure_write_frequency)(struct idtcm_channel *channel);
+	int			(*configure_write_phase)(struct idtcm_channel *channel);
+	int			(*do_phase_pull_in)(struct idtcm_channel *channel,
+						    s32 offset_ns, u32 max_ffo_ppb);
+	s32			current_freq_scaled_ppm;
+	bool			phase_pull_in;
 	u8			pll;
 	u16			output_mask;
 };
@@ -145,7 +195,7 @@
 	u8			page_offset;
 	u8			tod_mask;
 	char			version[16];
-	u8			deprecated;
+	enum fw_version		fw_ver;
 
 	/* Overhead calculation for adjtime */
 	u8			calculate_overhead_flag;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index caf9b37..844b140 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4,6 +4,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/debugfs.h>
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/serial_8250.h>
@@ -72,7 +73,7 @@
 	u32	status;
 	u32	uart_polarity;
 	u32	version;
-	u32	correction_sec;
+	u32	adj_sec;
 	u32	__pad0[3];
 	u32	uart_baud;
 	u32	__pad1[3];
@@ -124,6 +125,55 @@
 	u32	version;
 };
 
+struct gpio_reg {
+	u32	gpio1;
+	u32	__pad0;
+	u32	gpio2;
+	u32	__pad1;
+};
+
+struct irig_master_reg {
+	u32	ctrl;
+	u32	status;
+	u32	__pad0;
+	u32	version;
+	u32	adj_sec;
+	u32	mode_ctrl;
+};
+
+#define IRIG_M_CTRL_ENABLE	BIT(0)
+
+struct irig_slave_reg {
+	u32	ctrl;
+	u32	status;
+	u32	__pad0;
+	u32	version;
+	u32	adj_sec;
+	u32	mode_ctrl;
+};
+
+#define IRIG_S_CTRL_ENABLE	BIT(0)
+
+struct dcf_master_reg {
+	u32	ctrl;
+	u32	status;
+	u32	__pad0;
+	u32	version;
+	u32	adj_sec;
+};
+
+#define DCF_M_CTRL_ENABLE	BIT(0)
+
+struct dcf_slave_reg {
+	u32	ctrl;
+	u32	status;
+	u32	__pad0;
+	u32	version;
+	u32	adj_sec;
+};
+
+#define DCF_S_CTRL_ENABLE	BIT(0)
+
 struct ptp_ocp_flash_info {
 	const char *name;
 	int pci_offset;
@@ -131,11 +181,17 @@
 	void *data;
 };
 
-struct ptp_ocp_ext_info {
+struct ptp_ocp_i2c_info {
 	const char *name;
+	unsigned long fixed_rate;
+	size_t data_size;
+	void *data;
+};
+
+struct ptp_ocp_ext_info {
 	int index;
 	irqreturn_t (*irq_fcn)(int irq, void *priv);
-	int (*enable)(void *priv, bool enable);
+	int (*enable)(void *priv, u32 req, bool enable);
 };
 
 struct ptp_ocp_ext_src {
@@ -153,9 +209,17 @@
 	struct tod_reg __iomem	*tod;
 	struct pps_reg __iomem	*pps_to_ext;
 	struct pps_reg __iomem	*pps_to_clk;
+	struct gpio_reg __iomem	*pps_select;
+	struct gpio_reg __iomem	*sma;
+	struct irig_master_reg	__iomem *irig_out;
+	struct irig_slave_reg	__iomem *irig_in;
+	struct dcf_master_reg	__iomem *dcf_out;
+	struct dcf_slave_reg	__iomem *dcf_in;
+	struct tod_reg		__iomem *nmea_out;
 	struct ptp_ocp_ext_src	*pps;
 	struct ptp_ocp_ext_src	*ts0;
 	struct ptp_ocp_ext_src	*ts1;
+	struct ptp_ocp_ext_src	*ts2;
 	struct img_reg __iomem	*image;
 	struct ptp_clock	*ptp;
 	struct ptp_clock_info	ptp_info;
@@ -163,16 +227,25 @@
 	struct platform_device	*spi_flash;
 	struct clk_hw		*i2c_clk;
 	struct timer_list	watchdog;
+	struct dentry		*debug_root;
 	time64_t		gnss_lost;
 	int			id;
 	int			n_irqs;
 	int			gnss_port;
+	int			gnss2_port;
 	int			mac_port;	/* miniature atomic clock */
+	int			nmea_port;
 	u8			serial[6];
-	int			flash_start;
 	bool			has_serial;
+	u32			pps_req_map;
+	int			flash_start;
+	u32			utc_tai_offset;
+	u32			ts_window_adjust;
 };
 
+#define OCP_REQ_TIMESTAMP	BIT(0)
+#define OCP_REQ_PPS		BIT(1)
+
 struct ocp_resource {
 	unsigned long offset;
 	int size;
@@ -180,6 +253,7 @@
 	int (*setup)(struct ptp_ocp *bp, struct ocp_resource *r);
 	void *extra;
 	unsigned long bp_offset;
+	const char * const name;
 };
 
 static int ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r);
@@ -189,7 +263,7 @@
 static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
 static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
 static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
-static int ptp_ocp_ts_enable(void *priv, bool enable);
+static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
 
 #define bp_assign_entry(bp, res, val) ({				\
 	uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset;		\
@@ -197,7 +271,7 @@
 })
 
 #define OCP_RES_LOCATION(member) \
-	.bp_offset = offsetof(struct ptp_ocp, member)
+	.name = #member, .bp_offset = offsetof(struct ptp_ocp, member)
 
 #define OCP_MEM_RESOURCE(member) \
 	OCP_RES_LOCATION(member), .setup = ptp_ocp_register_mem
@@ -215,16 +289,17 @@
 	OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
 
 /* This is the MSI vector mapping used.
- * 0: N/C
+ * 0: TS3 (and PPS)
  * 1: TS0
  * 2: TS1
- * 3: GPS
- * 4: GPS2 (n/c)
+ * 3: GNSS
+ * 4: GNSS2
  * 5: MAC
- * 6: SPI IMU (inertial measurement unit)
- * 7: I2C oscillator
- * 8: HWICAP
+ * 6: TS2
+ * 7: I2C controller
+ * 8: HWICAP (notused)
  * 9: SPI Flash
+ * 10: NMEA
  */
 
 static struct ocp_resource ocp_fb_resource[] = {
@@ -236,7 +311,7 @@
 		OCP_EXT_RESOURCE(ts0),
 		.offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
 		.extra = &(struct ptp_ocp_ext_info) {
-			.name = "ts0", .index = 0,
+			.index = 0,
 			.irq_fcn = ptp_ocp_ts_irq,
 			.enable = ptp_ocp_ts_enable,
 		},
@@ -245,7 +320,25 @@
 		OCP_EXT_RESOURCE(ts1),
 		.offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
 		.extra = &(struct ptp_ocp_ext_info) {
-			.name = "ts1", .index = 1,
+			.index = 1,
+			.irq_fcn = ptp_ocp_ts_irq,
+			.enable = ptp_ocp_ts_enable,
+		},
+	},
+	{
+		OCP_EXT_RESOURCE(ts2),
+		.offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+		.extra = &(struct ptp_ocp_ext_info) {
+			.index = 2,
+			.irq_fcn = ptp_ocp_ts_irq,
+			.enable = ptp_ocp_ts_enable,
+		},
+	},
+	{
+		OCP_EXT_RESOURCE(pps),
+		.offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+		.extra = &(struct ptp_ocp_ext_info) {
+			.index = 3,
 			.irq_fcn = ptp_ocp_ts_irq,
 			.enable = ptp_ocp_ts_enable,
 		},
@@ -263,22 +356,62 @@
 		.offset = 0x01050000, .size = 0x10000,
 	},
 	{
+		OCP_MEM_RESOURCE(irig_in),
+		.offset = 0x01070000, .size = 0x10000,
+	},
+	{
+		OCP_MEM_RESOURCE(irig_out),
+		.offset = 0x01080000, .size = 0x10000,
+	},
+	{
+		OCP_MEM_RESOURCE(dcf_in),
+		.offset = 0x01090000, .size = 0x10000,
+	},
+	{
+		OCP_MEM_RESOURCE(dcf_out),
+		.offset = 0x010A0000, .size = 0x10000,
+	},
+	{
+		OCP_MEM_RESOURCE(nmea_out),
+		.offset = 0x010B0000, .size = 0x10000,
+	},
+	{
 		OCP_MEM_RESOURCE(image),
 		.offset = 0x00020000, .size = 0x1000,
 	},
 	{
+		OCP_MEM_RESOURCE(pps_select),
+		.offset = 0x00130000, .size = 0x1000,
+	},
+	{
+		OCP_MEM_RESOURCE(sma),
+		.offset = 0x00140000, .size = 0x1000,
+	},
+	{
 		OCP_I2C_RESOURCE(i2c_ctrl),
 		.offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
+		.extra = &(struct ptp_ocp_i2c_info) {
+			.name = "xiic-i2c",
+			.fixed_rate = 50000000,
+		},
 	},
 	{
 		OCP_SERIAL_RESOURCE(gnss_port),
 		.offset = 0x00160000 + 0x1000, .irq_vec = 3,
 	},
 	{
+		OCP_SERIAL_RESOURCE(gnss2_port),
+		.offset = 0x00170000 + 0x1000, .irq_vec = 4,
+	},
+	{
 		OCP_SERIAL_RESOURCE(mac_port),
 		.offset = 0x00180000 + 0x1000, .irq_vec = 5,
 	},
 	{
+		OCP_SERIAL_RESOURCE(nmea_port),
+		.offset = 0x00190000 + 0x1000, .irq_vec = 10,
+	},
+	{
 		OCP_SPI_RESOURCE(spi_flash),
 		.offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
 		.extra = &(struct ptp_ocp_flash_info) {
@@ -309,10 +442,12 @@
 static DEFINE_MUTEX(ptp_ocp_lock);
 static DEFINE_IDR(ptp_ocp_idr);
 
-static struct {
+struct ocp_selector {
 	const char *name;
 	int value;
-} ptp_ocp_clock[] = {
+};
+
+static struct ocp_selector ptp_ocp_clock[] = {
 	{ .name = "NONE",	.value = 0 },
 	{ .name = "TOD",	.value = 1 },
 	{ .name = "IRIG",	.value = 2 },
@@ -322,33 +457,71 @@
 	{ .name = "DCF",	.value = 6 },
 	{ .name = "REGS",	.value = 0xfe },
 	{ .name = "EXT",	.value = 0xff },
+	{ }
+};
+
+static struct ocp_selector ptp_ocp_sma_in[] = {
+	{ .name = "10Mhz",	.value = 0x00 },
+	{ .name = "PPS1",	.value = 0x01 },
+	{ .name = "PPS2",	.value = 0x02 },
+	{ .name = "TS1",	.value = 0x04 },
+	{ .name = "TS2",	.value = 0x08 },
+	{ .name = "IRIG",	.value = 0x10 },
+	{ .name = "DCF",	.value = 0x20 },
+	{ }
+};
+
+static struct ocp_selector ptp_ocp_sma_out[] = {
+	{ .name = "10Mhz",	.value = 0x00 },
+	{ .name = "PHC",	.value = 0x01 },
+	{ .name = "MAC",	.value = 0x02 },
+	{ .name = "GNSS",	.value = 0x04 },
+	{ .name = "GNSS2",	.value = 0x08 },
+	{ .name = "IRIG",	.value = 0x10 },
+	{ .name = "DCF",	.value = 0x20 },
+	{ }
 };
 
 static const char *
-ptp_ocp_clock_name_from_val(int val)
+ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++)
-		if (ptp_ocp_clock[i].value == val)
-			return ptp_ocp_clock[i].name;
+	for (i = 0; tbl[i].name; i++)
+		if (tbl[i].value == val)
+			return tbl[i].name;
 	return NULL;
 }
 
 static int
-ptp_ocp_clock_val_from_name(const char *name)
+ptp_ocp_select_val_from_name(struct ocp_selector *tbl, const char *name)
 {
-	const char *clk;
+	const char *select;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
-		clk = ptp_ocp_clock[i].name;
-		if (!strncasecmp(name, clk, strlen(clk)))
-			return ptp_ocp_clock[i].value;
+	for (i = 0; tbl[i].name; i++) {
+		select = tbl[i].name;
+		if (!strncasecmp(name, select, strlen(select)))
+			return tbl[i].value;
 	}
 	return -EINVAL;
 }
 
+static ssize_t
+ptp_ocp_select_table_show(struct ocp_selector *tbl, char *buf)
+{
+	ssize_t count;
+	int i;
+
+	count = 0;
+	for (i = 0; tbl[i].name; i++)
+		count += sysfs_emit_at(buf, count, "%s ", tbl[i].name);
+	if (count)
+		count--;
+	count += sysfs_emit_at(buf, count, "\n");
+	return count;
+}
+
 static int
 __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
 			 struct ptp_system_timestamp *sts)
@@ -356,10 +529,9 @@
 	u32 ctrl, time_sec, time_ns;
 	int i;
 
-	ctrl = ioread32(&bp->reg->ctrl);
-	ctrl |= OCP_CTRL_READ_TIME_REQ;
-
 	ptp_read_system_prets(sts);
+
+	ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
 	iowrite32(ctrl, &bp->reg->ctrl);
 
 	for (i = 0; i < 100; i++) {
@@ -369,6 +541,12 @@
 	}
 	ptp_read_system_postts(sts);
 
+	if (sts && bp->ts_window_adjust) {
+		s64 ns = timespec64_to_ns(&sts->post_ts);
+
+		sts->post_ts = ns_to_timespec64(ns - bp->ts_window_adjust);
+	}
+
 	time_ns = ioread32(&bp->reg->time_ns);
 	time_sec = ioread32(&bp->reg->time_sec);
 
@@ -408,8 +586,7 @@
 	iowrite32(time_ns, &bp->reg->adjust_ns);
 	iowrite32(time_sec, &bp->reg->adjust_sec);
 
-	ctrl = ioread32(&bp->reg->ctrl);
-	ctrl |= OCP_CTRL_ADJUST_TIME;
+	ctrl = OCP_CTRL_ADJUST_TIME | OCP_CTRL_ENABLE;
 	iowrite32(ctrl, &bp->reg->ctrl);
 
 	/* restore clock selection */
@@ -422,9 +599,6 @@
 	struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
 	unsigned long flags;
 
-	if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC)
-		return 0;
-
 	spin_lock_irqsave(&bp->lock, flags);
 	__ptp_ocp_settime_locked(bp, ts);
 	spin_unlock_irqrestore(&bp->lock, flags);
@@ -432,26 +606,39 @@
 	return 0;
 }
 
+static void
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+{
+	u32 select, ctrl;
+
+	select = ioread32(&bp->reg->select);
+	iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
+
+	iowrite32(adj_val, &bp->reg->offset_ns);
+	iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+
+	ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
+	iowrite32(ctrl, &bp->reg->ctrl);
+
+	/* restore clock selection */
+	iowrite32(select >> 16, &bp->reg->select);
+}
+
 static int
 ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
 {
 	struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
-	struct timespec64 ts;
 	unsigned long flags;
-	int err;
+	u32 adj_ns, sign;
 
-	if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC)
-		return 0;
+	sign = delta_ns < 0 ? BIT(31) : 0;
+	adj_ns = sign ? -delta_ns : delta_ns;
 
 	spin_lock_irqsave(&bp->lock, flags);
-	err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
-	if (likely(!err)) {
-		timespec64_add_ns(&ts, delta_ns);
-		__ptp_ocp_settime_locked(bp, &ts);
-	}
+	__ptp_ocp_adjtime_locked(bp, sign | adj_ns);
 	spin_unlock_irqrestore(&bp->lock, flags);
 
-	return err;
+	return 0;
 }
 
 static int
@@ -464,7 +651,7 @@
 }
 
 static int
-ptp_ocp_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
+ptp_ocp_null_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
 {
 	return -EOPNOTSUPP;
 }
@@ -475,10 +662,12 @@
 {
 	struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
 	struct ptp_ocp_ext_src *ext = NULL;
+	u32 req;
 	int err;
 
 	switch (rq->type) {
 	case PTP_CLK_REQ_EXTTS:
+		req = OCP_REQ_TIMESTAMP;
 		switch (rq->extts.index) {
 		case 0:
 			ext = bp->ts0;
@@ -486,18 +675,33 @@
 		case 1:
 			ext = bp->ts1;
 			break;
+		case 2:
+			ext = bp->ts2;
+			break;
+		case 3:
+			ext = bp->pps;
+			break;
 		}
 		break;
 	case PTP_CLK_REQ_PPS:
+		req = OCP_REQ_PPS;
 		ext = bp->pps;
 		break;
+	case PTP_CLK_REQ_PEROUT:
+		if (on &&
+		    (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0))
+			return -EINVAL;
+		/* This is a request for 1PPS on an output SMA.
+		 * Allow, but assume manual configuration.
+		 */
+		return 0;
 	default:
 		return -EOPNOTSUPP;
 	}
 
 	err = -ENXIO;
 	if (ext)
-		err = ext->info->enable(ext, on);
+		err = ext->info->enable(ext, req, on);
 
 	return err;
 }
@@ -510,10 +714,11 @@
 	.settime64	= ptp_ocp_settime,
 	.adjtime	= ptp_ocp_adjtime,
 	.adjfine	= ptp_ocp_null_adjfine,
-	.adjphase	= ptp_ocp_adjphase,
+	.adjphase	= ptp_ocp_null_adjphase,
 	.enable		= ptp_ocp_enable,
 	.pps		= true,
-	.n_ext_ts	= 2,
+	.n_ext_ts	= 4,
+	.n_per_out	= 1,
 };
 
 static void
@@ -526,8 +731,7 @@
 
 	iowrite32(0, &bp->reg->drift_ns);
 
-	ctrl = ioread32(&bp->reg->ctrl);
-	ctrl |= OCP_CTRL_ADJUST_DRIFT;
+	ctrl = OCP_CTRL_ADJUST_DRIFT | OCP_CTRL_ENABLE;
 	iowrite32(ctrl, &bp->reg->ctrl);
 
 	/* restore clock selection */
@@ -559,6 +763,28 @@
 	mod_timer(&bp->watchdog, jiffies + HZ);
 }
 
+static void
+ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
+{
+	ktime_t start, end;
+	ktime_t delay;
+	u32 ctrl;
+
+	ctrl = ioread32(&bp->reg->ctrl);
+	ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
+
+	iowrite32(ctrl, &bp->reg->ctrl);
+
+	start = ktime_get_ns();
+
+	ctrl = ioread32(&bp->reg->ctrl);
+
+	end = ktime_get_ns();
+
+	delay = end - start;
+	bp->ts_window_adjust = (delay >> 5) * 3;
+}
+
 static int
 ptp_ocp_init_clock(struct ptp_ocp *bp)
 {
@@ -566,9 +792,7 @@
 	bool sync;
 	u32 ctrl;
 
-	/* make sure clock is enabled */
-	ctrl = ioread32(&bp->reg->ctrl);
-	ctrl |= OCP_CTRL_ENABLE;
+	ctrl = OCP_CTRL_ENABLE;
 	iowrite32(ctrl, &bp->reg->ctrl);
 
 	/* NO DRIFT Correction */
@@ -587,23 +811,58 @@
 		return -ENODEV;
 	}
 
+	ptp_ocp_estimate_pci_timing(bp);
+
 	sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
 	if (!sync) {
-		ktime_get_real_ts64(&ts);
+		ktime_get_clocktai_ts64(&ts);
 		ptp_ocp_settime(&bp->ptp_info, &ts);
 	}
-	if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
-		dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
-			 ts.tv_sec, ts.tv_nsec,
-			 sync ? "in-sync" : "UNSYNCED");
 
-	timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
-	mod_timer(&bp->watchdog, jiffies + HZ);
+	/* If there is a clock supervisor, then enable the watchdog */
+	if (bp->pps_to_clk) {
+		timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
+		mod_timer(&bp->watchdog, jiffies + HZ);
+	}
 
 	return 0;
 }
 
 static void
+ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->lock, flags);
+
+	bp->utc_tai_offset = val;
+
+	if (bp->irig_out)
+		iowrite32(val, &bp->irig_out->adj_sec);
+	if (bp->dcf_out)
+		iowrite32(val, &bp->dcf_out->adj_sec);
+	if (bp->nmea_out)
+		iowrite32(val, &bp->nmea_out->adj_sec);
+
+	spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+ptp_ocp_tod_init(struct ptp_ocp *bp)
+{
+	u32 ctrl, reg;
+
+	ctrl = ioread32(&bp->tod->ctrl);
+	ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
+	ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
+	iowrite32(ctrl, &bp->tod->ctrl);
+
+	reg = ioread32(&bp->tod->utc_status);
+	if (reg & TOD_STATUS_UTC_VALID)
+		ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK);
+}
+
+static void
 ptp_ocp_tod_info(struct ptp_ocp *bp)
 {
 	static const char * const proto_name[] = {
@@ -621,11 +880,6 @@
 		 version >> 24, (version >> 16) & 0xff, version & 0xffff);
 
 	ctrl = ioread32(&bp->tod->ctrl);
-	ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
-	ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
-	iowrite32(ctrl, &bp->tod->ctrl);
-
-	ctrl = ioread32(&bp->tod->ctrl);
 	idx = ctrl & TOD_CTRL_PROTOCOL ? 4 : 0;
 	idx += (ctrl >> 16) & 3;
 	dev_info(&bp->pdev->dev, "control: %x\n", ctrl);
@@ -639,7 +893,7 @@
 	reg = ioread32(&bp->tod->status);
 	dev_info(&bp->pdev->dev, "status: %x\n", reg);
 
-	reg = ioread32(&bp->tod->correction_sec);
+	reg = ioread32(&bp->tod->adj_sec);
 	dev_info(&bp->pdev->dev, "correction: %d\n", reg);
 
 	reg = ioread32(&bp->tod->utc_status);
@@ -695,6 +949,9 @@
 	struct device *dev;
 	int err;
 
+	if (!bp->i2c_ctrl)
+		return;
+
 	dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild);
 	if (!dev) {
 		dev_err(&bp->pdev->dev, "Can't find I2C adapter\n");
@@ -720,21 +977,6 @@
 	put_device(dev);
 }
 
-static void
-ptp_ocp_info(struct ptp_ocp *bp)
-{
-	u32 version, select;
-
-	version = ioread32(&bp->reg->version);
-	select = ioread32(&bp->reg->select);
-	dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
-		 version >> 24, (version >> 16) & 0xff, version & 0xffff,
-		 ptp_ocp_clock_name_from_val(select >> 16),
-		 ptp_clock_index(bp->ptp));
-
-	ptp_ocp_tod_info(bp);
-}
-
 static struct device *
 ptp_ocp_find_flash(struct ptp_ocp *bp)
 {
@@ -910,18 +1152,6 @@
 	unsigned long start;
 	int id;
 
-	/* XXX hack to work around old FPGA */
-	if (bp->n_irqs < 10) {
-		dev_err(&bp->pdev->dev, "FPGA does not have SPI devices\n");
-		return 0;
-	}
-
-	if (r->irq_vec > bp->n_irqs) {
-		dev_err(&bp->pdev->dev, "spi device irq %d out of range\n",
-			r->irq_vec);
-		return 0;
-	}
-
 	start = pci_resource_start(pdev, 0) + r->offset;
 	ptp_ocp_set_mem_resource(&res[0], start, r->size);
 	ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
@@ -944,41 +1174,41 @@
 static struct platform_device *
 ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id)
 {
+	struct ptp_ocp_i2c_info *info;
 	struct resource res[2];
 	unsigned long start;
 
+	info = r->extra;
 	start = pci_resource_start(pdev, 0) + r->offset;
 	ptp_ocp_set_mem_resource(&res[0], start, r->size);
 	ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
 
-	return platform_device_register_resndata(&pdev->dev, "xiic-i2c",
-						 id, res, 2, NULL, 0);
+	return platform_device_register_resndata(&pdev->dev, info->name,
+						 id, res, 2,
+						 info->data, info->data_size);
 }
 
 static int
 ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
 {
 	struct pci_dev *pdev = bp->pdev;
+	struct ptp_ocp_i2c_info *info;
 	struct platform_device *p;
 	struct clk_hw *clk;
 	char buf[32];
 	int id;
 
-	if (r->irq_vec > bp->n_irqs) {
-		dev_err(&bp->pdev->dev, "i2c device irq %d out of range\n",
-			r->irq_vec);
-		return 0;
-	}
-
+	info = r->extra;
 	id = pci_dev_id(bp->pdev);
 
 	sprintf(buf, "AXI.%d", id);
-	clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0, 50000000);
+	clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0,
+					 info->fixed_rate);
 	if (IS_ERR(clk))
 		return PTR_ERR(clk);
 	bp->i2c_clk = clk;
 
-	sprintf(buf, "xiic-i2c.%d", id);
+	sprintf(buf, "%s.%d", info->name, id);
 	devm_clk_hw_register_clkdev(&pdev->dev, clk, NULL, buf);
 	p = ptp_ocp_i2c_bus(bp->pdev, r, id);
 	if (IS_ERR(p))
@@ -997,26 +1227,51 @@
 	struct ptp_clock_event ev;
 	u32 sec, nsec;
 
+	if (ext == ext->bp->pps) {
+		if (ext->bp->pps_req_map & OCP_REQ_PPS) {
+			ev.type = PTP_CLOCK_PPS;
+			ptp_clock_event(ext->bp->ptp, &ev);
+		}
+
+		if ((ext->bp->pps_req_map & ~OCP_REQ_PPS) == 0)
+			goto out;
+	}
+
 	/* XXX should fix API - this converts s/ns -> ts -> s/ns */
 	sec = ioread32(&reg->time_sec);
 	nsec = ioread32(&reg->time_ns);
 
 	ev.type = PTP_CLOCK_EXTTS;
 	ev.index = ext->info->index;
-	ev.timestamp = sec * 1000000000ULL + nsec;
+	ev.timestamp = sec * NSEC_PER_SEC + nsec;
 
 	ptp_clock_event(ext->bp->ptp, &ev);
 
+out:
 	iowrite32(1, &reg->intr);	/* write 1 to ack */
 
 	return IRQ_HANDLED;
 }
 
 static int
-ptp_ocp_ts_enable(void *priv, bool enable)
+ptp_ocp_ts_enable(void *priv, u32 req, bool enable)
 {
 	struct ptp_ocp_ext_src *ext = priv;
 	struct ts_reg __iomem *reg = ext->mem;
+	struct ptp_ocp *bp = ext->bp;
+
+	if (ext == bp->pps) {
+		u32 old_map = bp->pps_req_map;
+
+		if (enable)
+			bp->pps_req_map |= req;
+		else
+			bp->pps_req_map &= ~req;
+
+		/* if no state change, just return */
+		if ((!!old_map ^ !!bp->pps_req_map) == 0)
+			return 0;
+	}
 
 	if (enable) {
 		iowrite32(1, &reg->enable);
@@ -1033,7 +1288,7 @@
 static void
 ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext)
 {
-	ext->info->enable(ext, false);
+	ext->info->enable(ext, ~0, false);
 	pci_free_irq(ext->bp->pdev, ext->irq_vec, ext);
 	kfree(ext);
 }
@@ -1059,7 +1314,7 @@
 	ext->irq_vec = r->irq_vec;
 
 	err = pci_request_irq(pdev, r->irq_vec, ext->info->irq_fcn, NULL,
-			      ext, "ocp%d.%s", bp->id, ext->info->name);
+			      ext, "ocp%d.%s", bp->id, r->name);
 	if (err) {
 		dev_err(&pdev->dev, "Could not get irq %d\n", r->irq_vec);
 		goto out;
@@ -1101,12 +1356,6 @@
 {
 	int port;
 
-	if (r->irq_vec > bp->n_irqs) {
-		dev_err(&bp->pdev->dev, "serial device irq %d out of range\n",
-			r->irq_vec);
-		return 0;
-	}
-
 	port = ptp_ocp_serial_line(bp, r);
 	if (port < 0)
 		return port;
@@ -1130,15 +1379,40 @@
 	return 0;
 }
 
+static void
+ptp_ocp_nmea_out_init(struct ptp_ocp *bp)
+{
+	if (!bp->nmea_out)
+		return;
+
+	iowrite32(0, &bp->nmea_out->ctrl);		/* disable */
+	iowrite32(7, &bp->nmea_out->uart_baud);		/* 115200 */
+	iowrite32(1, &bp->nmea_out->ctrl);		/* enable */
+}
+
 /* FB specific board initializers; last "resource" registered. */
 static int
 ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
 {
 	bp->flash_start = 1024 * 4096;
 
+	ptp_ocp_tod_init(bp);
+	ptp_ocp_nmea_out_init(bp);
+
 	return ptp_ocp_init_clock(bp);
 }
 
+static bool
+ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+	bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs;
+
+	if (!allow)
+		dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n",
+			r->irq_vec, r->name);
+	return allow;
+}
+
 static int
 ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
 {
@@ -1147,13 +1421,373 @@
 
 	table = (struct ocp_resource *)driver_data;
 	for (r = table; r->setup; r++) {
+		if (!ptp_ocp_allow_irq(bp, r))
+			continue;
 		err = r->setup(bp, r);
-		if (err)
+		if (err) {
+			dev_err(&bp->pdev->dev,
+				"Could not register %s: err %d\n",
+				r->name, err);
 			break;
+		}
 	}
 	return err;
 }
 
+static void
+ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable)
+{
+	u32 ctrl;
+	bool on;
+
+	ctrl = ioread32(reg);
+	on = ctrl & bit;
+	if (on ^ enable) {
+		ctrl &= ~bit;
+		ctrl |= enable ? bit : 0;
+		iowrite32(ctrl, reg);
+	}
+}
+
+static void
+ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable)
+{
+	return ptp_ocp_enable_fpga(&bp->irig_out->ctrl,
+				   IRIG_M_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable)
+{
+	return ptp_ocp_enable_fpga(&bp->irig_in->ctrl,
+				   IRIG_S_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable)
+{
+	return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl,
+				   DCF_M_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable)
+{
+	return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl,
+				   DCF_S_CTRL_ENABLE, enable);
+}
+
+static void
+__handle_signal_outputs(struct ptp_ocp *bp, u32 val)
+{
+	ptp_ocp_irig_out(bp, val & 0x00100010);
+	ptp_ocp_dcf_out(bp, val & 0x00200020);
+}
+
+static void
+__handle_signal_inputs(struct ptp_ocp *bp, u32 val)
+{
+	ptp_ocp_irig_in(bp, val & 0x00100010);
+	ptp_ocp_dcf_in(bp, val & 0x00200020);
+}
+
+/*
+ * ANT0 == gps	(in)
+ * ANT1 == sma1 (in)
+ * ANT2 == sma2 (in)
+ * ANT3 == sma3 (out)
+ * ANT4 == sma4 (out)
+ */
+
+enum ptp_ocp_sma_mode {
+	SMA_MODE_IN,
+	SMA_MODE_OUT,
+};
+
+static struct ptp_ocp_sma_connector {
+	enum	ptp_ocp_sma_mode mode;
+	bool	fixed_mode;
+	u16	default_out_idx;
+} ptp_ocp_sma_map[4] = {
+	{
+		.mode = SMA_MODE_IN,
+		.fixed_mode = true,
+	},
+	{
+		.mode = SMA_MODE_IN,
+		.fixed_mode = true,
+	},
+	{
+		.mode = SMA_MODE_OUT,
+		.fixed_mode = true,
+		.default_out_idx = 0,		/* 10Mhz */
+	},
+	{
+		.mode = SMA_MODE_OUT,
+		.fixed_mode = true,
+		.default_out_idx = 1,		/* PHC */
+	},
+};
+
+static ssize_t
+ptp_ocp_show_output(u32 val, char *buf, int default_idx)
+{
+	const char *name;
+	ssize_t count;
+
+	count = sysfs_emit(buf, "OUT: ");
+	name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val);
+	if (!name)
+		name = ptp_ocp_sma_out[default_idx].name;
+	count += sysfs_emit_at(buf, count, "%s\n", name);
+	return count;
+}
+
+static ssize_t
+ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
+{
+	const char *name;
+	ssize_t count;
+	int i;
+
+	count = sysfs_emit(buf, "IN: ");
+	for (i = 0; i < ARRAY_SIZE(ptp_ocp_sma_in); i++) {
+		if (val & ptp_ocp_sma_in[i].value) {
+			name = ptp_ocp_sma_in[i].name;
+			count += sysfs_emit_at(buf, count, "%s ", name);
+		}
+	}
+	if (!val && zero_in)
+		count += sysfs_emit_at(buf, count, "%s ", zero_in);
+	if (count)
+		count--;
+	count += sysfs_emit_at(buf, count, "\n");
+	return count;
+}
+
+static int
+sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode)
+{
+	struct ocp_selector *tbl[] = { ptp_ocp_sma_in, ptp_ocp_sma_out };
+	int idx, count, dir;
+	char **argv;
+	int ret;
+
+	argv = argv_split(GFP_KERNEL, buf, &count);
+	if (!argv)
+		return -ENOMEM;
+
+	ret = -EINVAL;
+	if (!count)
+		goto out;
+
+	idx = 0;
+	dir = *mode == SMA_MODE_IN ? 0 : 1;
+	if (!strcasecmp("IN:", argv[idx])) {
+		dir = 0;
+		idx++;
+	}
+	if (!strcasecmp("OUT:", argv[0])) {
+		dir = 1;
+		idx++;
+	}
+	*mode = dir == 0 ? SMA_MODE_IN : SMA_MODE_OUT;
+
+	ret = 0;
+	for (; idx < count; idx++)
+		ret |= ptp_ocp_select_val_from_name(tbl[dir], argv[idx]);
+	if (ret < 0)
+		ret = -EINVAL;
+
+out:
+	argv_free(argv);
+	return ret;
+}
+
+static ssize_t
+ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf,
+		 const char *zero_in)
+{
+	struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+
+	if (sma->mode == SMA_MODE_IN)
+		return ptp_ocp_show_inputs(val, buf, zero_in);
+
+	return ptp_ocp_show_output(val, buf, sma->default_out_idx);
+}
+
+static ssize_t
+sma1_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	u32 val;
+
+	val = ioread32(&bp->sma->gpio1) & 0x3f;
+	return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name);
+}
+
+static ssize_t
+sma2_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	u32 val;
+
+	val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f;
+	return ptp_ocp_sma_show(bp, 2, val, buf, NULL);
+}
+
+static ssize_t
+sma3_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	u32 val;
+
+	val = ioread32(&bp->sma->gpio2) & 0x3f;
+	return ptp_ocp_sma_show(bp, 3, val, buf, NULL);
+}
+
+static ssize_t
+sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	u32 val;
+
+	val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f;
+	return ptp_ocp_sma_show(bp, 4, val, buf, NULL);
+}
+
+static void
+ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift)
+{
+	unsigned long flags;
+	u32 gpio, mask;
+
+	mask = 0xffff << (16 - shift);
+
+	spin_lock_irqsave(&bp->lock, flags);
+
+	gpio = ioread32(&bp->sma->gpio2);
+	gpio = (gpio & mask) | (val << shift);
+
+	__handle_signal_outputs(bp, gpio);
+
+	iowrite32(gpio, &bp->sma->gpio2);
+
+	spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift)
+{
+	unsigned long flags;
+	u32 gpio, mask;
+
+	mask = 0xffff << (16 - shift);
+
+	spin_lock_irqsave(&bp->lock, flags);
+
+	gpio = ioread32(&bp->sma->gpio1);
+	gpio = (gpio & mask) | (val << shift);
+
+	__handle_signal_inputs(bp, gpio);
+
+	iowrite32(gpio, &bp->sma->gpio1);
+
+	spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static ssize_t
+ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
+{
+	struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+	enum ptp_ocp_sma_mode mode;
+	int val;
+
+	mode = sma->mode;
+	val = sma_parse_inputs(buf, &mode);
+	if (val < 0)
+		return val;
+
+	if (mode != sma->mode && sma->fixed_mode)
+		return -EOPNOTSUPP;
+
+	if (mode != sma->mode) {
+		pr_err("Mode changes not supported yet.\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (sma->mode == SMA_MODE_IN)
+		ptp_ocp_sma_store_inputs(bp, val, shift);
+	else
+		ptp_ocp_sma_store_output(bp, val, shift);
+
+	return 0;
+}
+
+static ssize_t
+sma1_store(struct device *dev, struct device_attribute *attr,
+	   const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	int err;
+
+	err = ptp_ocp_sma_store(bp, buf, 1, 0);
+	return err ? err : count;
+}
+
+static ssize_t
+sma2_store(struct device *dev, struct device_attribute *attr,
+	   const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	int err;
+
+	err = ptp_ocp_sma_store(bp, buf, 2, 16);
+	return err ? err : count;
+}
+
+static ssize_t
+sma3_store(struct device *dev, struct device_attribute *attr,
+	   const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	int err;
+
+	err = ptp_ocp_sma_store(bp, buf, 3, 0);
+	return err ? err : count;
+}
+
+static ssize_t
+sma4_store(struct device *dev, struct device_attribute *attr,
+	   const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	int err;
+
+	err = ptp_ocp_sma_store(bp, buf, 4, 16);
+	return err ? err : count;
+}
+static DEVICE_ATTR_RW(sma1);
+static DEVICE_ATTR_RW(sma2);
+static DEVICE_ATTR_RW(sma3);
+static DEVICE_ATTR_RW(sma4);
+
+static ssize_t
+available_sma_inputs_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	return ptp_ocp_select_table_show(ptp_ocp_sma_in, buf);
+}
+static DEVICE_ATTR_RO(available_sma_inputs);
+
+static ssize_t
+available_sma_outputs_show(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	return ptp_ocp_select_table_show(ptp_ocp_sma_out, buf);
+}
+static DEVICE_ATTR_RO(available_sma_outputs);
+
 static ssize_t
 serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -1182,6 +1816,102 @@
 static DEVICE_ATTR_RO(gnss_sync);
 
 static ssize_t
+utc_tai_offset_show(struct device *dev,
+		    struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+	return sysfs_emit(buf, "%d\n", bp->utc_tai_offset);
+}
+
+static ssize_t
+utc_tai_offset_store(struct device *dev,
+		     struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	int err;
+	u32 val;
+
+	err = kstrtou32(buf, 0, &val);
+	if (err)
+		return err;
+
+	ptp_ocp_utc_distribute(bp, val);
+
+	return count;
+}
+static DEVICE_ATTR_RW(utc_tai_offset);
+
+static ssize_t
+ts_window_adjust_show(struct device *dev,
+		      struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+	return sysfs_emit(buf, "%d\n", bp->ts_window_adjust);
+}
+
+static ssize_t
+ts_window_adjust_store(struct device *dev,
+		       struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	int err;
+	u32 val;
+
+	err = kstrtou32(buf, 0, &val);
+	if (err)
+		return err;
+
+	bp->ts_window_adjust = val;
+
+	return count;
+}
+static DEVICE_ATTR_RW(ts_window_adjust);
+
+static ssize_t
+irig_b_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	u32 val;
+
+	val = ioread32(&bp->irig_out->ctrl);
+	val = (val >> 16) & 0x07;
+	return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t
+irig_b_mode_store(struct device *dev,
+		  struct device_attribute *attr,
+		  const char *buf, size_t count)
+{
+	struct ptp_ocp *bp = dev_get_drvdata(dev);
+	unsigned long flags;
+	int err;
+	u32 reg;
+	u8 val;
+
+	err = kstrtou8(buf, 0, &val);
+	if (err)
+		return err;
+	if (val > 7)
+		return -EINVAL;
+
+	reg = ((val & 0x7) << 16);
+
+	spin_lock_irqsave(&bp->lock, flags);
+	iowrite32(0, &bp->irig_out->ctrl);		/* disable */
+	iowrite32(reg, &bp->irig_out->ctrl);		/* change mode */
+	iowrite32(reg | IRIG_M_CTRL_ENABLE, &bp->irig_out->ctrl);
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	return count;
+}
+static DEVICE_ATTR_RW(irig_b_mode);
+
+static ssize_t
 clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct ptp_ocp *bp = dev_get_drvdata(dev);
@@ -1189,7 +1919,7 @@
 	u32 select;
 
 	select = ioread32(&bp->reg->select);
-	p = ptp_ocp_clock_name_from_val(select >> 16);
+	p = ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16);
 
 	return sysfs_emit(buf, "%s\n", p);
 }
@@ -1202,7 +1932,7 @@
 	unsigned long flags;
 	int val;
 
-	val = ptp_ocp_clock_val_from_name(buf);
+	val = ptp_ocp_select_val_from_name(ptp_ocp_clock, buf);
 	if (val < 0)
 		return val;
 
@@ -1218,19 +1948,7 @@
 available_clock_sources_show(struct device *dev,
 			     struct device_attribute *attr, char *buf)
 {
-	const char *clk;
-	ssize_t count;
-	int i;
-
-	count = 0;
-	for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
-		clk = ptp_ocp_clock[i].name;
-		count += sysfs_emit_at(buf, count, "%s ", clk);
-	}
-	if (count)
-		count--;
-	count += sysfs_emit_at(buf, count, "\n");
-	return count;
+	return ptp_ocp_select_table_show(ptp_ocp_clock, buf);
 }
 static DEVICE_ATTR_RO(available_clock_sources);
 
@@ -1239,10 +1957,258 @@
 	&dev_attr_gnss_sync.attr,
 	&dev_attr_clock_source.attr,
 	&dev_attr_available_clock_sources.attr,
+	&dev_attr_sma1.attr,
+	&dev_attr_sma2.attr,
+	&dev_attr_sma3.attr,
+	&dev_attr_sma4.attr,
+	&dev_attr_available_sma_inputs.attr,
+	&dev_attr_available_sma_outputs.attr,
+	&dev_attr_irig_b_mode.attr,
+	&dev_attr_utc_tai_offset.attr,
+	&dev_attr_ts_window_adjust.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(timecard);
 
+static const char *
+gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def)
+{
+	const char *ans;
+
+	if (gpio & (1 << bit))
+		ans = pri;
+	else if (gpio & (1 << (bit + 16)))
+		ans = sec;
+	else
+		ans = def;
+	return ans;
+}
+
+static void
+gpio_multi_map(char *buf, u32 gpio, u32 bit,
+	       const char *pri, const char *sec, const char *def)
+{
+	char *ans = buf;
+
+	strcpy(ans, def);
+	if (gpio & (1 << bit))
+		ans += sprintf(ans, "%s ", pri);
+	if (gpio & (1 << (bit + 16)))
+		ans += sprintf(ans, "%s ", sec);
+}
+
+static int
+ptp_ocp_summary_show(struct seq_file *s, void *data)
+{
+	struct device *dev = s->private;
+	struct ptp_system_timestamp sts;
+	u32 sma_in, sma_out, ctrl, val;
+	struct ts_reg __iomem *ts_reg;
+	struct timespec64 ts;
+	struct ptp_ocp *bp;
+	const char *src;
+	bool on, map;
+	char *buf;
+
+	buf = (char *)__get_free_page(GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	bp = dev_get_drvdata(dev);
+	sma_in = ioread32(&bp->sma->gpio1);
+	sma_out = ioread32(&bp->sma->gpio2);
+
+	seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
+
+	sma1_show(dev, NULL, buf);
+	seq_printf(s, "   sma1: %s", buf);
+
+	sma2_show(dev, NULL, buf);
+	seq_printf(s, "   sma2: %s", buf);
+
+	sma3_show(dev, NULL, buf);
+	seq_printf(s, "   sma3: %s", buf);
+
+	sma4_show(dev, NULL, buf);
+	seq_printf(s, "   sma4: %s", buf);
+
+	if (bp->ts0) {
+		ts_reg = bp->ts0->mem;
+		on = ioread32(&ts_reg->enable);
+		src = "GNSS";
+		seq_printf(s, "%7s: %s, src: %s\n", "TS0",
+			   on ? " ON" : "OFF", src);
+	}
+
+	if (bp->ts1) {
+		ts_reg = bp->ts1->mem;
+		on = ioread32(&ts_reg->enable);
+		src = gpio_map(sma_in, 2, "sma1", "sma2", "----");
+		seq_printf(s, "%7s: %s, src: %s\n", "TS1",
+			   on ? " ON" : "OFF", src);
+	}
+
+	if (bp->ts2) {
+		ts_reg = bp->ts2->mem;
+		on = ioread32(&ts_reg->enable);
+		src = gpio_map(sma_in, 3, "sma1", "sma2", "----");
+		seq_printf(s, "%7s: %s, src: %s\n", "TS2",
+			   on ? " ON" : "OFF", src);
+	}
+
+	if (bp->pps) {
+		ts_reg = bp->pps->mem;
+		src = "PHC";
+		on = ioread32(&ts_reg->enable);
+		map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
+		seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+			   on & map ? " ON" : "OFF", src);
+
+		map = !!(bp->pps_req_map & OCP_REQ_PPS);
+		seq_printf(s, "%7s: %s, src: %s\n", "PPS",
+			   on & map ? " ON" : "OFF", src);
+	}
+
+	if (bp->irig_out) {
+		ctrl = ioread32(&bp->irig_out->ctrl);
+		on = ctrl & IRIG_M_CTRL_ENABLE;
+		val = ioread32(&bp->irig_out->status);
+		gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----");
+		seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG",
+			   on ? " ON" : "OFF", val, (ctrl >> 16), buf);
+	}
+
+	if (bp->irig_in) {
+		on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE;
+		val = ioread32(&bp->irig_in->status);
+		src = gpio_map(sma_in, 4, "sma1", "sma2", "----");
+		seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in",
+			   on ? " ON" : "OFF", val, src);
+	}
+
+	if (bp->dcf_out) {
+		on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE;
+		val = ioread32(&bp->dcf_out->status);
+		gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----");
+		seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF",
+			   on ? " ON" : "OFF", val, buf);
+	}
+
+	if (bp->dcf_in) {
+		on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE;
+		val = ioread32(&bp->dcf_in->status);
+		src = gpio_map(sma_in, 5, "sma1", "sma2", "----");
+		seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in",
+			   on ? " ON" : "OFF", val, src);
+	}
+
+	if (bp->nmea_out) {
+		on = ioread32(&bp->nmea_out->ctrl) & 1;
+		val = ioread32(&bp->nmea_out->status);
+		seq_printf(s, "%7s: %s, error: %d\n", "NMEA",
+			   on ? " ON" : "OFF", val);
+	}
+
+	/* compute src for PPS1, used below. */
+	if (bp->pps_select) {
+		val = ioread32(&bp->pps_select->gpio1);
+		if (val & 0x01)
+			src = gpio_map(sma_in, 0, "sma1", "sma2", "----");
+		else if (val & 0x02)
+			src = "MAC";
+		else if (val & 0x04)
+			src = "GNSS";
+		else
+			src = "----";
+	} else {
+		src = "?";
+	}
+
+	/* assumes automatic switchover/selection */
+	val = ioread32(&bp->reg->select);
+	switch (val >> 16) {
+	case 0:
+		sprintf(buf, "----");
+		break;
+	case 2:
+		sprintf(buf, "IRIG");
+		break;
+	case 3:
+		sprintf(buf, "%s via PPS1", src);
+		break;
+	case 6:
+		sprintf(buf, "DCF");
+		break;
+	default:
+		strcpy(buf, "unknown");
+		break;
+	}
+	val = ioread32(&bp->reg->status);
+	seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf,
+		   val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced");
+
+	/* reuses PPS1 src from earlier */
+	seq_printf(s, "MAC PPS1 src: %s\n", src);
+
+	src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2");
+	seq_printf(s, "MAC PPS2 src: %s\n", src);
+
+	if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
+		struct timespec64 sys_ts;
+		s64 pre_ns, post_ns, ns;
+
+		pre_ns = timespec64_to_ns(&sts.pre_ts);
+		post_ns = timespec64_to_ns(&sts.post_ts);
+		ns = (pre_ns + post_ns) / 2;
+		ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC;
+		sys_ts = ns_to_timespec64(ns);
+
+		seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC",
+			   ts.tv_sec, ts.tv_nsec, &ts);
+		seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS",
+			   sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts,
+			   bp->utc_tai_offset);
+		seq_printf(s, "%7s: PHC:SYS offset: %lld  window: %lld\n", "",
+			   timespec64_to_ns(&ts) - ns,
+			   post_ns - pre_ns);
+	}
+
+	free_page((unsigned long)buf);
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary);
+
+static struct dentry *ptp_ocp_debugfs_root;
+
+static void
+ptp_ocp_debugfs_add_device(struct ptp_ocp *bp)
+{
+	struct dentry *d;
+
+	d = debugfs_create_dir(dev_name(&bp->dev), ptp_ocp_debugfs_root);
+	bp->debug_root = d;
+	debugfs_create_file("summary", 0444, bp->debug_root,
+			    &bp->dev, &ptp_ocp_summary_fops);
+}
+
+static void
+ptp_ocp_debugfs_remove_device(struct ptp_ocp *bp)
+{
+	debugfs_remove_recursive(bp->debug_root);
+}
+
+static void
+ptp_ocp_debugfs_init(void)
+{
+	ptp_ocp_debugfs_root = debugfs_create_dir("timecard", NULL);
+}
+
+static void
+ptp_ocp_debugfs_fini(void)
+{
+	debugfs_remove_recursive(ptp_ocp_debugfs_root);
+}
+
 static void
 ptp_ocp_dev_release(struct device *dev)
 {
@@ -1270,7 +2236,9 @@
 	bp->ptp_info = ptp_ocp_clock_info;
 	spin_lock_init(&bp->lock);
 	bp->gnss_port = -1;
+	bp->gnss2_port = -1;
 	bp->mac_port = -1;
+	bp->nmea_port = -1;
 	bp->pdev = pdev;
 
 	device_initialize(&bp->dev);
@@ -1332,10 +2300,18 @@
 		sprintf(buf, "ttyS%d", bp->gnss_port);
 		ptp_ocp_link_child(bp, buf, "ttyGNSS");
 	}
+	if (bp->gnss2_port != -1) {
+		sprintf(buf, "ttyS%d", bp->gnss2_port);
+		ptp_ocp_link_child(bp, buf, "ttyGNSS2");
+	}
 	if (bp->mac_port != -1) {
 		sprintf(buf, "ttyS%d", bp->mac_port);
 		ptp_ocp_link_child(bp, buf, "ttyMAC");
 	}
+	if (bp->nmea_port != -1) {
+		sprintf(buf, "ttyS%d", bp->nmea_port);
+		ptp_ocp_link_child(bp, buf, "ttyNMEA");
+	}
 	sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
 	ptp_ocp_link_child(bp, buf, "ptp");
 
@@ -1346,13 +2322,53 @@
 	if (device_add_groups(&bp->dev, timecard_groups))
 		pr_err("device add groups failed\n");
 
+	ptp_ocp_debugfs_add_device(bp);
+
 	return 0;
 }
 
 static void
-ptp_ocp_resource_summary(struct ptp_ocp *bp)
+ptp_ocp_phc_info(struct ptp_ocp *bp)
 {
+	struct timespec64 ts;
+	u32 version, select;
+	bool sync;
+
+	version = ioread32(&bp->reg->version);
+	select = ioread32(&bp->reg->select);
+	dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
+		 version >> 24, (version >> 16) & 0xff, version & 0xffff,
+		 ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16),
+		 ptp_clock_index(bp->ptp));
+
+	sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
+	if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
+		dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
+			 ts.tv_sec, ts.tv_nsec,
+			 sync ? "in-sync" : "UNSYNCED");
+}
+
+static void
+ptp_ocp_serial_info(struct device *dev, const char *name, int port, int baud)
+{
+	if (port != -1)
+		dev_info(dev, "%5s: /dev/ttyS%-2d @ %6d\n", name, port, baud);
+}
+
+static void
+ptp_ocp_info(struct ptp_ocp *bp)
+{
+	static int nmea_baud[] = {
+		1200, 2400, 4800, 9600, 19200, 38400,
+		57600, 115200, 230400, 460800, 921600,
+		1000000, 2000000
+	};
 	struct device *dev = &bp->pdev->dev;
+	u32 reg;
+
+	ptp_ocp_phc_info(bp);
+	if (bp->tod)
+		ptp_ocp_tod_info(bp);
 
 	if (bp->image) {
 		u32 ver = ioread32(&bp->image->version);
@@ -1365,10 +2381,17 @@
 			dev_info(dev, "golden image, version %d\n",
 				 ver >> 16);
 	}
-	if (bp->gnss_port != -1)
-		dev_info(dev, "GNSS @ /dev/ttyS%d 115200\n", bp->gnss_port);
-	if (bp->mac_port != -1)
-		dev_info(dev, "MAC @ /dev/ttyS%d   57600\n", bp->mac_port);
+	ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200);
+	ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200);
+	ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600);
+	if (bp->nmea_out && bp->nmea_port != -1) {
+		int baud = -1;
+
+		reg = ioread32(&bp->nmea_out->uart_baud);
+		if (reg < ARRAY_SIZE(nmea_baud))
+			baud = nmea_baud[reg];
+		ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port, baud);
+	}
 }
 
 static void
@@ -1386,6 +2409,7 @@
 static void
 ptp_ocp_detach(struct ptp_ocp *bp)
 {
+	ptp_ocp_debugfs_remove_device(bp);
 	ptp_ocp_detach_sysfs(bp);
 	if (timer_pending(&bp->watchdog))
 		del_timer_sync(&bp->watchdog);
@@ -1393,12 +2417,18 @@
 		ptp_ocp_unregister_ext(bp->ts0);
 	if (bp->ts1)
 		ptp_ocp_unregister_ext(bp->ts1);
+	if (bp->ts2)
+		ptp_ocp_unregister_ext(bp->ts2);
 	if (bp->pps)
 		ptp_ocp_unregister_ext(bp->pps);
 	if (bp->gnss_port != -1)
 		serial8250_unregister_port(bp->gnss_port);
+	if (bp->gnss2_port != -1)
+		serial8250_unregister_port(bp->gnss2_port);
 	if (bp->mac_port != -1)
 		serial8250_unregister_port(bp->mac_port);
+	if (bp->nmea_port != -1)
+		serial8250_unregister_port(bp->nmea_port);
 	if (bp->spi_flash)
 		platform_device_unregister(bp->spi_flash);
 	if (bp->i2c_ctrl)
@@ -1445,7 +2475,7 @@
 	 * allow this - if not all of the IRQ's are returned, skip the
 	 * extra devices and just register the clock.
 	 */
-	err = pci_alloc_irq_vectors(pdev, 1, 10, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+	err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX);
 	if (err < 0) {
 		dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
 		goto out;
@@ -1470,7 +2500,6 @@
 		goto out;
 
 	ptp_ocp_info(bp);
-	ptp_ocp_resource_summary(bp);
 
 	return 0;
 
@@ -1554,6 +2583,8 @@
 	const char *what;
 	int err;
 
+	ptp_ocp_debugfs_init();
+
 	what = "timecard class";
 	err = class_register(&timecard_class);
 	if (err)
@@ -1576,6 +2607,7 @@
 out_notifier:
 	class_unregister(&timecard_class);
 out:
+	ptp_ocp_debugfs_fini();
 	pr_err(KBUILD_MODNAME ": failed to register %s: %d\n", what, err);
 	return err;
 }
@@ -1586,6 +2618,7 @@
 	bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
 	pci_unregister_driver(&ptp_ocp_driver);
 	class_unregister(&timecard_class);
+	ptp_ocp_debugfs_fini();
 }
 
 module_init(ptp_ocp_init);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 06281a0..de2423c 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -182,7 +182,7 @@
 static void ctcmpc_chx_resend(fsm_instance *, int, void *);
 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
 
-/**
+/*
  * Check return code of a preceding ccw_device call, halt_IO etc...
  *
  * ch	:	The channel, the error belongs to.
@@ -223,7 +223,7 @@
 	}
 }
 
-/**
+/*
  * NOP action for statemachines
  */
 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
@@ -234,7 +234,7 @@
  * Actions for channel - statemachines.
  */
 
-/**
+/*
  * Normal data has been send. Free the corresponding
  * skb (it's in io_queue), reset dev->tbusy and
  * revert to idle state.
@@ -322,7 +322,7 @@
 	ctcm_clear_busy_do(dev);
 }
 
-/**
+/*
  * Initial data is sent.
  * Notify device statemachine that we are up and
  * running.
@@ -344,7 +344,7 @@
 	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
 }
 
-/**
+/*
  * Got normal data, check for sanity, queue it up, allocate new buffer
  * trigger bottom half, and initiate next read.
  *
@@ -421,7 +421,7 @@
 		ctcm_ccw_check_rc(ch, rc, "normal RX");
 }
 
-/**
+/*
  * Initialize connection by sending a __u16 of value 0.
  *
  * fi		An instance of a channel statemachine.
@@ -497,7 +497,7 @@
 	}
 }
 
-/**
+/*
  * Got initial data, check it. If OK,
  * notify device statemachine that we are up and
  * running.
@@ -538,7 +538,7 @@
 	}
 }
 
-/**
+/*
  * Set channel into extended mode.
  *
  * fi		An instance of a channel statemachine.
@@ -578,7 +578,7 @@
 		ch->retry = 0;
 }
 
-/**
+/*
  * Setup channel.
  *
  * fi		An instance of a channel statemachine.
@@ -641,7 +641,7 @@
 	}
 }
 
-/**
+/*
  * Shutdown a channel.
  *
  * fi		An instance of a channel statemachine.
@@ -682,7 +682,7 @@
 	}
 }
 
-/**
+/*
  * Cleanup helper for chx_fail and chx_stopped
  * cleanup channels queue and notify interface statemachine.
  *
@@ -728,7 +728,7 @@
 	}
 }
 
-/**
+/*
  * A channel has successfully been halted.
  * Cleanup it's queue and notify interface statemachine.
  *
@@ -741,7 +741,7 @@
 	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
 }
 
-/**
+/*
  * A stop command from device statemachine arrived and we are in
  * not operational mode. Set state to stopped.
  *
@@ -754,7 +754,7 @@
 	fsm_newstate(fi, CTC_STATE_STOPPED);
 }
 
-/**
+/*
  * A machine check for no path, not operational status or gone device has
  * happened.
  * Cleanup queue and notify interface statemachine.
@@ -768,7 +768,7 @@
 	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
 }
 
-/**
+/*
  * Handle error during setup of channel.
  *
  * fi		An instance of a channel statemachine.
@@ -817,7 +817,7 @@
 	}
 }
 
-/**
+/*
  * Restart a channel after an error.
  *
  * fi		An instance of a channel statemachine.
@@ -858,7 +858,7 @@
 	}
 }
 
-/**
+/*
  * Handle error during RX initial handshake (exchange of
  * 0-length block header)
  *
@@ -893,7 +893,7 @@
 	}
 }
 
-/**
+/*
  * Notify device statemachine if we gave up initialization
  * of RX channel.
  *
@@ -914,7 +914,7 @@
 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
 }
 
-/**
+/*
  * Handle RX Unit check remote reset (remote disconnected)
  *
  * fi		An instance of a channel statemachine.
@@ -946,7 +946,7 @@
 	ccw_device_halt(ch2->cdev, 0);
 }
 
-/**
+/*
  * Handle error during TX channel initialization.
  *
  * fi		An instance of a channel statemachine.
@@ -978,7 +978,7 @@
 	}
 }
 
-/**
+/*
  * Handle TX timeout by retrying operation.
  *
  * fi		An instance of a channel statemachine.
@@ -1050,7 +1050,7 @@
 	return;
 }
 
-/**
+/*
  * Handle fatal errors during an I/O command.
  *
  * fi		An instance of a channel statemachine.
@@ -1198,7 +1198,7 @@
  * Actions for mpc channel statemachine.
  */
 
-/**
+/*
  * Normal data has been send. Free the corresponding
  * skb (it's in io_queue), reset dev->tbusy and
  * revert to idle state.
@@ -1361,7 +1361,7 @@
 	return;
 }
 
-/**
+/*
  * Got normal data, check for sanity, queue it up, allocate new buffer
  * trigger bottom half, and initiate next read.
  *
@@ -1464,7 +1464,7 @@
 
 }
 
-/**
+/*
  * Initialize connection by sending a __u16 of value 0.
  *
  * fi		An instance of a channel statemachine.
@@ -1516,7 +1516,7 @@
 	return;
 }
 
-/**
+/*
  * Got initial data, check it. If OK,
  * notify device statemachine that we are up and
  * running.
@@ -2043,7 +2043,7 @@
  * Actions for interface - statemachine.
  */
 
-/**
+/*
  * Startup channels by sending CTC_EVENT_START to each channel.
  *
  * fi		An instance of an interface statemachine.
@@ -2068,7 +2068,7 @@
 	}
 }
 
-/**
+/*
  * Shutdown channels by sending CTC_EVENT_STOP to each channel.
  *
  * fi		An instance of an interface statemachine.
@@ -2122,7 +2122,7 @@
 			DEV_EVENT_START, dev);
 }
 
-/**
+/*
  * Called from channel statemachine
  * when a channel is up and running.
  *
@@ -2183,7 +2183,7 @@
 	}
 }
 
-/**
+/*
  * Called from device statemachine
  * when a channel has been shutdown.
  *
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index fd70542..5ea7eeb 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -55,7 +55,7 @@
 
 /* Some common global variables */
 
-/**
+/*
  * The root device for ctcm group devices
  */
 static struct device *ctcm_root_dev;
@@ -65,7 +65,7 @@
  */
 struct channel *channels;
 
-/**
+/*
  * Unpack a just received skb and hand it over to
  * upper layers.
  *
@@ -180,7 +180,7 @@
 	}
 }
 
-/**
+/*
  * Release a specific channel in the channel list.
  *
  *  ch		Pointer to channel struct to be released.
@@ -192,7 +192,7 @@
 	fsm_newstate(ch->fsm, CTC_STATE_IDLE);
 }
 
-/**
+/*
  * Remove a specific channel in the channel list.
  *
  *  ch		Pointer to channel struct to be released.
@@ -240,7 +240,7 @@
 			chid, ok ? "OK" : "failed");
 }
 
-/**
+/*
  * Get a specific channel from the channel list.
  *
  *  type	Type of channel we are interested in.
@@ -300,7 +300,7 @@
 }
 
 
-/**
+/*
  * Check sense of a unit check.
  *
  *  ch		The channel, the sense code belongs to.
@@ -414,7 +414,7 @@
  * Interface API for upper network layers
  */
 
-/**
+/*
  * Open an interface.
  * Called from generic network layer when ifconfig up is run.
  *
@@ -432,7 +432,7 @@
 	return 0;
 }
 
-/**
+/*
  * Close an interface.
  * Called from generic network layer when ifconfig down is run.
  *
@@ -451,7 +451,7 @@
 }
 
 
-/**
+/*
  * Transmit a packet.
  * This is a helper function for ctcm_tx().
  *
@@ -822,7 +822,7 @@
 	return rc;
 }
 
-/**
+/*
  * Start transmission of a packet.
  * Called from generic network device layer.
  *
@@ -975,7 +975,7 @@
 }
 
 
-/**
+/*
  * Sets MTU of an interface.
  *
  *  dev		Pointer to interface struct.
@@ -1007,7 +1007,7 @@
 	return 0;
 }
 
-/**
+/*
  * Returns interface statistics of a device.
  *
  *  dev		Pointer to interface struct.
@@ -1144,7 +1144,7 @@
 	return dev;
 }
 
-/**
+/*
  * Main IRQ handler.
  *
  *  cdev	The ccw_device the interrupt is for.
@@ -1257,7 +1257,7 @@
 	.groups = ctcm_attr_groups,
 };
 
-/**
+/*
  * Add ctcm specific attributes.
  * Add ctcm private data.
  *
@@ -1293,7 +1293,7 @@
 	return 0;
 }
 
-/**
+/*
  * Add a new channel to the list of channels.
  * Keeps the channel list sorted.
  *
@@ -1343,7 +1343,7 @@
 	snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
 	ch->type = type;
 
-	/**
+	/*
 	 * "static" ccws are used in the following way:
 	 *
 	 * ccw[0..2] (Channel program for generic I/O):
@@ -1471,7 +1471,7 @@
 	return type;
 }
 
-/**
+/*
  *
  * Setup an interface.
  *
@@ -1595,7 +1595,7 @@
 	return result;
 }
 
-/**
+/*
  * Shutdown an interface.
  *
  *  cgdev	Device to be shut down.
@@ -1738,7 +1738,7 @@
 	pr_info("CTCM driver initialized\n");
 }
 
-/**
+/*
  * Initialize module.
  * This is called just after the module is loaded.
  *
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index f0436f5..88abfb5 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1016,7 +1016,7 @@
 	CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
 }
 
-/**
+/*
  * Unpack a just received skb and hand it over to
  * upper layers.
  * special MPC version of unpack_skb.
@@ -1211,7 +1211,7 @@
 			__func__, dev->name, ch, ch->id);
 }
 
-/**
+/*
  * tasklet helper for mpc's skb unpacking.
  *
  * ch		The channel to work on.
@@ -1320,7 +1320,7 @@
  * CTCM_PROTO_MPC only
  */
 
-/**
+/*
  * NOP action for statemachines
  */
 static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
@@ -1426,7 +1426,7 @@
 	}
 }
 
-/**
+/*
  * Handle mpc group  action timeout.
  * MPC Group Station FSM action
  * CTCM_PROTO_MPC only
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index eb07862..98c4864 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * A generic FSM based on fsm used in isdn4linux
  *
  */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 26cc943..5f7e28d 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -555,7 +555,7 @@
 	if (ret)
 		goto err_disable;
 
-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	if (ret)
 		goto err_resource;
 
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 440219b..c18fd48 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -40,18 +40,18 @@
 #error Cannot compile lcs.c without some net devices switched on.
 #endif
 
-/**
+/*
  * initialization string for output
  */
 
 static char version[] __initdata = "LCS driver";
 
-/**
+/*
   * the root device for lcs group devices
   */
 static struct device *lcs_root_dev;
 
-/**
+/*
  * Some prototypes.
  */
 static void lcs_tasklet(unsigned long);
@@ -62,14 +62,14 @@
 #endif /* CONFIG_IP_MULTICAST */
 static int lcs_recovery(void *ptr);
 
-/**
+/*
  * Debug Facility Stuff
  */
 static char debug_buffer[255];
 static debug_info_t *lcs_dbf_setup;
 static debug_info_t *lcs_dbf_trace;
 
-/**
+/*
  *  LCS Debug Facility functions
  */
 static void
@@ -96,7 +96,7 @@
 	return 0;
 }
 
-/**
+/*
  * Allocate io buffers.
  */
 static int
@@ -123,7 +123,7 @@
 	return 0;
 }
 
-/**
+/*
  * Free io buffers.
  */
 static void
@@ -151,7 +151,7 @@
 	lcs_free_channel(channel);
 }
 
-/**
+/*
  * LCS free memory for card and channels.
  */
 static void
@@ -162,7 +162,7 @@
 	kfree(card);
 }
 
-/**
+/*
  * LCS alloc memory for card and channels
  */
 static struct lcs_card *
@@ -402,7 +402,7 @@
         return rc;
 }
 
-/**
+/*
  * Initialize channels,card and state machines.
  */
 static void
@@ -451,7 +451,8 @@
 	spin_unlock_irqrestore(&card->ipm_lock, flags);
 #endif
 }
-/**
+
+/*
  * Cleanup channels,card and state machines.
  */
 static void
@@ -468,7 +469,7 @@
 	lcs_cleanup_channel(&card->read);
 }
 
-/**
+/*
  * Start channel.
  */
 static int
@@ -517,7 +518,7 @@
 }
 
 
-/**
+/*
  * Stop channel.
  */
 static int
@@ -545,7 +546,7 @@
 	return 0;
 }
 
-/**
+/*
  * start read and write channel
  */
 static int
@@ -565,7 +566,7 @@
 	return rc;
 }
 
-/**
+/*
  * stop read and write channel
  */
 static int
@@ -577,7 +578,7 @@
 	return 0;
 }
 
-/**
+/*
  * Get empty buffer.
  */
 static struct lcs_buffer *
@@ -610,7 +611,7 @@
 	return buffer;
 }
 
-/**
+/*
  * Resume channel program if the channel is suspended.
  */
 static int
@@ -636,7 +637,7 @@
 
 }
 
-/**
+/*
  * Make a buffer ready for processing.
  */
 static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
@@ -678,7 +679,7 @@
 	return rc;
 }
 
-/**
+/*
  * Mark the buffer as processed. Take care of the suspend bit
  * of the previous buffer. This function is called from
  * interrupt context, so the lock must not be taken.
@@ -712,7 +713,7 @@
 	return __lcs_resume_channel(channel);
 }
 
-/**
+/*
  * Put a processed buffer back to state empty.
  */
 static void
@@ -728,7 +729,7 @@
 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
 }
 
-/**
+/*
  * Get buffer for a lan command.
  */
 static struct lcs_buffer *
@@ -785,7 +786,7 @@
 	return reply;
 }
 
-/**
+/*
  * Notifier function for lancmd replies. Called from read irq.
  */
 static void
@@ -813,7 +814,7 @@
 	spin_unlock(&card->lock);
 }
 
-/**
+/*
  * Emit buffer of a lan command.
  */
 static void
@@ -877,7 +878,7 @@
 	return rc ? -EIO : 0;
 }
 
-/**
+/*
  * LCS startup command
  */
 static int
@@ -895,7 +896,7 @@
 	return lcs_send_lancmd(card, buffer, NULL);
 }
 
-/**
+/*
  * LCS shutdown command
  */
 static int
@@ -912,7 +913,7 @@
 	return lcs_send_lancmd(card, buffer, NULL);
 }
 
-/**
+/*
  * LCS lanstat command
  */
 static void
@@ -939,7 +940,7 @@
 	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
 }
 
-/**
+/*
  * send stoplan command
  */
 static int
@@ -958,7 +959,7 @@
 	return lcs_send_lancmd(card, buffer, NULL);
 }
 
-/**
+/*
  * send startlan command
  */
 static void
@@ -986,7 +987,7 @@
 }
 
 #ifdef CONFIG_IP_MULTICAST
-/**
+/*
  * send setipm command (Multicast)
  */
 static int
@@ -1010,7 +1011,7 @@
 	return lcs_send_lancmd(card, buffer, NULL);
 }
 
-/**
+/*
  * send delipm command (Multicast)
  */
 static int
@@ -1034,7 +1035,7 @@
 	return lcs_send_lancmd(card, buffer, NULL);
 }
 
-/**
+/*
  * check if multicast is supported by LCS
  */
 static void
@@ -1074,7 +1075,7 @@
 	return -EOPNOTSUPP;
 }
 
-/**
+/*
  * set or del multicast address on LCS card
  */
 static void
@@ -1129,7 +1130,7 @@
 	spin_unlock_irqrestore(&card->ipm_lock, flags);
 }
 
-/**
+/*
  * get mac address for the relevant Multicast address
  */
 static void
@@ -1139,7 +1140,7 @@
 	ip_eth_mc_map(ipm, mac);
 }
 
-/**
+/*
  * function called by net device to handle multicast address relevant things
  */
 static void lcs_remove_mc_addresses(struct lcs_card *card,
@@ -1260,7 +1261,7 @@
 }
 #endif /* CONFIG_IP_MULTICAST */
 
-/**
+/*
  * function called by net device to
  * handle multicast address relevant things
  */
@@ -1355,7 +1356,7 @@
 		schedule_work(&card->kernel_thread_starter);
 }
 
-/**
+/*
  * IRQ Handler for LCS channels
  */
 static void
@@ -1439,7 +1440,7 @@
 	tasklet_schedule(&channel->irq_tasklet);
 }
 
-/**
+/*
  * Tasklet for IRQ handler
  */
 static void
@@ -1476,7 +1477,7 @@
 	wake_up(&channel->wait_q);
 }
 
-/**
+/*
  * Finish current tx buffer and make it ready for transmit.
  */
 static void
@@ -1490,7 +1491,7 @@
 	card->tx_emitted++;
 }
 
-/**
+/*
  * Callback for finished tx buffers.
  */
 static void
@@ -1515,7 +1516,7 @@
 	spin_unlock(&card->lock);
 }
 
-/**
+/*
  * Packet transmit function called by network stack
  */
 static int
@@ -1593,7 +1594,7 @@
 	return rc;
 }
 
-/**
+/*
  * send startlan and lanstat command to make LCS device ready
  */
 static int
@@ -1648,7 +1649,7 @@
 	return rc;
 }
 
-/**
+/*
  * LCS detect function
  * setup channels and make them I/O ready
  */
@@ -1680,7 +1681,7 @@
 	return rc;
 }
 
-/**
+/*
  * LCS Stop card
  */
 static int
@@ -1705,7 +1706,7 @@
 	return rc;
 }
 
-/**
+/*
  * Kernel Thread helper functions for LGW initiated commands
  */
 static void
@@ -1721,7 +1722,7 @@
 #endif
 }
 
-/**
+/*
  * Process control frames.
  */
 static void
@@ -1748,7 +1749,7 @@
 		lcs_notify_lancmd_waiters(card, cmd);
 }
 
-/**
+/*
  * Unpack network packet.
  */
 static void
@@ -1779,7 +1780,7 @@
 	netif_rx(skb);
 }
 
-/**
+/*
  * LCS main routine to get packets and lancmd replies from the buffers
  */
 static void
@@ -1829,7 +1830,7 @@
 	lcs_ready_buffer(&card->read, buffer);
 }
 
-/**
+/*
  * get network statistics for ifconfig and other user programs
  */
 static struct net_device_stats *
@@ -1842,7 +1843,7 @@
 	return &card->stats;
 }
 
-/**
+/*
  * stop lcs device
  * This function will be called by user doing ifconfig xxx down
  */
@@ -1866,7 +1867,7 @@
 	return rc;
 }
 
-/**
+/*
  * start lcs device and make it runnable
  * This function will be called by user doing ifconfig xxx up
  */
@@ -1892,7 +1893,7 @@
 	return rc;
 }
 
-/**
+/*
  * show function for portno called by cat or similar things
  */
 static ssize_t
@@ -1908,7 +1909,7 @@
         return sprintf(buf, "%d\n", card->portno);
 }
 
-/**
+/*
  * store the value which is piped to file portno
  */
 static ssize_t
@@ -2033,7 +2034,7 @@
 	.groups = lcs_attr_groups,
 };
 
-/**
+/*
  * lcs_probe_device is called on establishing a new ccwgroup_device.
  */
 static int
@@ -2077,7 +2078,7 @@
 	return register_netdev(card->dev);
 }
 
-/**
+/*
  * lcs_new_device will be called by setting the group device online.
  */
 static const struct net_device_ops lcs_netdev_ops = {
@@ -2199,7 +2200,7 @@
 	return -ENODEV;
 }
 
-/**
+/*
  * lcs_shutdown_device, called when setting the group device offline.
  */
 static int
@@ -2240,7 +2241,7 @@
 	return __lcs_shutdown_device(ccwgdev, 0);
 }
 
-/**
+/*
  * drive lcs recovery after startup and startlan initiated by Lan Gateway
  */
 static int
@@ -2271,7 +2272,7 @@
 	return 0;
 }
 
-/**
+/*
  * lcs_remove_device, free buffers and card
  */
 static void
@@ -2315,7 +2316,7 @@
 	.int_class = IRQIO_LCS,
 };
 
-/**
+/*
  * LCS ccwgroup driver registration
  */
 static struct ccwgroup_driver lcs_group_driver = {
@@ -2351,7 +2352,7 @@
 	NULL,
 };
 
-/**
+/*
  *  LCS Module/Kernel initialization function
  */
 static int
@@ -2389,7 +2390,7 @@
 }
 
 
-/**
+/*
  *  LCS module cleanup function
  */
 static void
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5a0c2f0..981e7b1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -58,7 +58,7 @@
     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
 
-/**
+/*
  * Debug Facility stuff
  */
 #define IUCV_DBF_SETUP_NAME "iucv_setup"
@@ -107,7 +107,7 @@
 		debug_sprintf_event(iucv_dbf_trace, level, text ); \
 	} while (0)
 
-/**
+/*
  * some more debug stuff
  */
 #define PRINTK_HEADER " iucv: "       /* for debugging */
@@ -118,7 +118,7 @@
 	.bus  = &iucv_bus,
 };
 
-/**
+/*
  * Per connection profiling data
  */
 struct connection_profile {
@@ -133,7 +133,7 @@
 	unsigned long tx_max_pending;
 };
 
-/**
+/*
  * Representation of one iucv connection
  */
 struct iucv_connection {
@@ -154,13 +154,13 @@
 	char			  userdata[17];
 };
 
-/**
+/*
  * Linked list of all connection structs.
  */
 static LIST_HEAD(iucv_connection_list);
 static DEFINE_RWLOCK(iucv_connection_rwlock);
 
-/**
+/*
  * Representation of event-data for the
  * connection state machine.
  */
@@ -169,7 +169,7 @@
 	void                   *data;
 };
 
-/**
+/*
  * Private part of the network device structure
  */
 struct netiucv_priv {
@@ -180,7 +180,7 @@
 	struct device           *dev;
 };
 
-/**
+/*
  * Link level header for a packet.
  */
 struct ll_header {
@@ -195,7 +195,7 @@
 #define NETIUCV_QUEUELEN_DEFAULT 50
 #define NETIUCV_TIMEOUT_5SEC     5000
 
-/**
+/*
  * Compatibility macros for busy handling
  * of network devices.
  */
@@ -223,7 +223,7 @@
 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 };
 
-/**
+/*
  * Convert an iucv userId to its printable
  * form (strip whitespace at end).
  *
@@ -262,7 +262,7 @@
 		return netiucv_printname(conn->userid, 8);
 }
 
-/**
+/*
  * States of the interface statemachine.
  */
 enum dev_states {
@@ -270,7 +270,7 @@
 	DEV_STATE_STARTWAIT,
 	DEV_STATE_STOPWAIT,
 	DEV_STATE_RUNNING,
-	/**
+	/*
 	 * MUST be always the last element!!
 	 */
 	NR_DEV_STATES
@@ -283,7 +283,7 @@
 	"Running",
 };
 
-/**
+/*
  * Events of the interface statemachine.
  */
 enum dev_events {
@@ -291,7 +291,7 @@
 	DEV_EVENT_STOP,
 	DEV_EVENT_CONUP,
 	DEV_EVENT_CONDOWN,
-	/**
+	/*
 	 * MUST be always the last element!!
 	 */
 	NR_DEV_EVENTS
@@ -304,11 +304,11 @@
 	"Connection down",
 };
 
-/**
+/*
  * Events of the connection statemachine
  */
 enum conn_events {
-	/**
+	/*
 	 * Events, representing callbacks from
 	 * lowlevel iucv layer)
 	 */
@@ -320,23 +320,23 @@
 	CONN_EVENT_RX,
 	CONN_EVENT_TXDONE,
 
-	/**
+	/*
 	 * Events, representing errors return codes from
 	 * calls to lowlevel iucv layer
 	 */
 
-	/**
+	/*
 	 * Event, representing timer expiry.
 	 */
 	CONN_EVENT_TIMER,
 
-	/**
+	/*
 	 * Events, representing commands from upper levels.
 	 */
 	CONN_EVENT_START,
 	CONN_EVENT_STOP,
 
-	/**
+	/*
 	 * MUST be always the last element!!
 	 */
 	NR_CONN_EVENTS,
@@ -357,55 +357,55 @@
 	"Stop",
 };
 
-/**
+/*
  * States of the connection statemachine.
  */
 enum conn_states {
-	/**
+	/*
 	 * Connection not assigned to any device,
 	 * initial state, invalid
 	 */
 	CONN_STATE_INVALID,
 
-	/**
+	/*
 	 * Userid assigned but not operating
 	 */
 	CONN_STATE_STOPPED,
 
-	/**
+	/*
 	 * Connection registered,
 	 * no connection request sent yet,
 	 * no connection request received
 	 */
 	CONN_STATE_STARTWAIT,
 
-	/**
+	/*
 	 * Connection registered and connection request sent,
 	 * no acknowledge and no connection request received yet.
 	 */
 	CONN_STATE_SETUPWAIT,
 
-	/**
+	/*
 	 * Connection up and running idle
 	 */
 	CONN_STATE_IDLE,
 
-	/**
+	/*
 	 * Data sent, awaiting CONN_EVENT_TXDONE
 	 */
 	CONN_STATE_TX,
 
-	/**
+	/*
 	 * Error during registration.
 	 */
 	CONN_STATE_REGERR,
 
-	/**
+	/*
 	 * Error during registration.
 	 */
 	CONN_STATE_CONNERR,
 
-	/**
+	/*
 	 * MUST be always the last element!!
 	 */
 	NR_CONN_STATES,
@@ -424,7 +424,7 @@
 };
 
 
-/**
+/*
  * Debug Facility Stuff
  */
 static debug_info_t *iucv_dbf_setup = NULL;
@@ -556,7 +556,7 @@
 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
 }
 
-/**
+/*
  * NOP action for statemachines
  */
 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
@@ -567,7 +567,7 @@
  * Actions of the connection statemachine
  */
 
-/**
+/*
  * netiucv_unpack_skb
  * @conn: The connection where this skb has been received.
  * @pskb: The received skb.
@@ -993,7 +993,7 @@
  * Actions for interface - statemachine.
  */
 
-/**
+/*
  * dev_action_start
  * @fi: An instance of an interface statemachine.
  * @event: The event, just happened.
@@ -1012,7 +1012,7 @@
 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
 }
 
-/**
+/*
  * Shutdown connection by sending CONN_EVENT_STOP to it.
  *
  * @param fi    An instance of an interface statemachine.
@@ -1034,7 +1034,7 @@
 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
 }
 
-/**
+/*
  * Called from connection statemachine
  * when a connection is up and running.
  *
@@ -1067,7 +1067,7 @@
 	}
 }
 
-/**
+/*
  * Called from connection statemachine
  * when a connection has been shutdown.
  *
@@ -1107,7 +1107,7 @@
 
 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
 
-/**
+/*
  * Transmit a packet.
  * This is a helper function for netiucv_tx().
  *
@@ -1144,7 +1144,7 @@
 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
 	} else {
 		struct sk_buff *nskb = skb;
-		/**
+		/*
 		 * Copy the skb to a new allocated skb in lowmem only if the
 		 * data is located above 2G in memory or tailroom is < 2.
 		 */
@@ -1164,7 +1164,7 @@
 			}
 			copied = 1;
 		}
-		/**
+		/*
 		 * skb now is below 2G and has enough room. Add headers.
 		 */
 		header.next = nskb->len + NETIUCV_HDRLEN;
@@ -1194,7 +1194,7 @@
 			if (copied)
 				dev_kfree_skb(nskb);
 			else {
-				/**
+				/*
 				 * Remove our headers. They get added
 				 * again on retransmit.
 				 */
@@ -1217,7 +1217,7 @@
  * Interface API for upper network layers
  */
 
-/**
+/*
  * Open an interface.
  * Called from generic network layer when ifconfig up is run.
  *
@@ -1233,7 +1233,7 @@
 	return 0;
 }
 
-/**
+/*
  * Close an interface.
  * Called from generic network layer when ifconfig down is run.
  *
@@ -1249,7 +1249,7 @@
 	return 0;
 }
 
-/**
+/*
  * Start transmission of a packet.
  * Called from generic network device layer.
  *
@@ -1266,7 +1266,7 @@
 	int rc;
 
 	IUCV_DBF_TEXT(trace, 4, __func__);
-	/**
+	/*
 	 * Some sanity checks ...
 	 */
 	if (skb == NULL) {
@@ -1282,7 +1282,7 @@
 		return NETDEV_TX_OK;
 	}
 
-	/**
+	/*
 	 * If connection is not running, try to restart it
 	 * and throw away packet.
 	 */
@@ -1304,7 +1304,7 @@
 	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
 }
 
-/**
+/*
  * netiucv_stats
  * @dev: Pointer to interface struct.
  *
@@ -1745,7 +1745,7 @@
 	device_unregister(dev);
 }
 
-/**
+/*
  * Allocate and initialize a new connection structure.
  * Add it to the list of netiucv connections;
  */
@@ -1802,7 +1802,7 @@
 	return NULL;
 }
 
-/**
+/*
  * Release a connection structure and remove it from the
  * list of netiucv connections.
  */
@@ -1826,7 +1826,7 @@
 	kfree_skb(conn->tx_buff);
 }
 
-/**
+/*
  * Release everything of a net device.
  */
 static void netiucv_free_netdevice(struct net_device *dev)
@@ -1848,7 +1848,7 @@
 	}
 }
 
-/**
+/*
  * Initialize a net device. (Called from kernel in alloc_netdev())
  */
 static const struct net_device_ops netiucv_netdev_ops = {
@@ -1873,7 +1873,7 @@
 	dev->netdev_ops		 = &netiucv_netdev_ops;
 }
 
-/**
+/*
  * Allocate and initialize everything of a net device.
  */
 static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 812a4d7..4df0bf0 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -145,12 +145,6 @@
 	unsigned char mac[64][ETH_ALEN];
 };
 
-union qed_filter_type_params {
-	enum qed_filter_rx_mode_type accept_flags;
-	struct qed_filter_ucast_params ucast;
-	struct qed_filter_mcast_params mcast;
-};
-
 enum qed_filter_type {
 	QED_FILTER_TYPE_UCAST,
 	QED_FILTER_TYPE_MCAST,
@@ -158,11 +152,6 @@
 	QED_MAX_FILTER_TYPES,
 };
 
-struct qed_filter_params {
-	enum qed_filter_type type;
-	union qed_filter_type_params filter;
-};
-
 struct qed_tunn_params {
 	u16 vxlan_port;
 	u8 update_vxlan_port;
@@ -314,8 +303,14 @@
 
 	int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
 
-	int (*filter_config)(struct qed_dev *cdev,
-			     struct qed_filter_params *params);
+	int (*filter_config_rx_mode)(struct qed_dev *cdev,
+				     enum qed_filter_rx_mode_type type);
+
+	int (*filter_config_ucast)(struct qed_dev *cdev,
+				   struct qed_filter_ucast_params *params);
+
+	int (*filter_config_mcast)(struct qed_dev *cdev,
+				   struct qed_filter_mcast_params *params);
 
 	int (*fastpath_stop)(struct qed_dev *cdev);
 
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 154cf0d..cd89b2d 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -1653,10 +1653,6 @@
 			      const struct devlink_param *param);
 void devlink_params_publish(struct devlink *devlink);
 void devlink_params_unpublish(struct devlink *devlink);
-void devlink_param_publish(struct devlink *devlink,
-			   const struct devlink_param *param);
-void devlink_param_unpublish(struct devlink *devlink,
-			     const struct devlink_param *param);
 int devlink_port_params_register(struct devlink_port *devlink_port,
 				 const struct devlink_param *params,
 				 size_t params_count);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c0069ac..8c2d611 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -308,6 +308,8 @@
 					  struct netlink_ext_ack *extack);
 	void			(*attach)(struct Qdisc *sch);
 	int			(*change_tx_queue_len)(struct Qdisc *, unsigned int);
+	void			(*change_real_num_tx)(struct Qdisc *sch,
+						      unsigned int new_real_tx);
 
 	int			(*dump)(struct Qdisc *, struct sk_buff *);
 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
@@ -684,6 +686,8 @@
 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
 
 int dev_qdisc_change_tx_queue_len(struct net_device *dev);
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+				  unsigned int new_real_tx);
 void dev_init_scheduler(struct net_device *dev);
 void dev_shutdown(struct net_device *dev);
 void dev_activate(struct net_device *dev);
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index 0f7f87c..b175bd0 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -38,6 +38,9 @@
 #define SMC_GENL_FAMILY_VERSION		1
 
 #define SMC_PCI_ID_STR_LEN		16 /* Max length of pci id string */
+#define SMC_MAX_HOSTNAME_LEN		32 /* Max length of the hostname */
+#define SMC_MAX_UEID			4  /* Max number of user EIDs */
+#define SMC_MAX_EID_LEN			32 /* Max length of an EID */
 
 /* SMC_GENL_FAMILY commands */
 enum {
@@ -49,6 +52,13 @@
 	SMC_NETLINK_GET_DEV_SMCR,
 	SMC_NETLINK_GET_STATS,
 	SMC_NETLINK_GET_FBACK_STATS,
+	SMC_NETLINK_DUMP_UEID,
+	SMC_NETLINK_ADD_UEID,
+	SMC_NETLINK_REMOVE_UEID,
+	SMC_NETLINK_FLUSH_UEID,
+	SMC_NETLINK_DUMP_SEID,
+	SMC_NETLINK_ENABLE_SEID,
+	SMC_NETLINK_DISABLE_SEID,
 };
 
 /* SMC_GENL_FAMILY top level attributes */
@@ -242,4 +252,21 @@
 	__SMC_NLA_FBACK_STATS_MAX,
 	SMC_NLA_FBACK_STATS_MAX = __SMC_NLA_FBACK_STATS_MAX - 1
 };
+
+/* SMC_NETLINK_UEID attributes */
+enum {
+	SMC_NLA_EID_TABLE_UNSPEC,
+	SMC_NLA_EID_TABLE_ENTRY,	/* string */
+	__SMC_NLA_EID_TABLE_MAX,
+	SMC_NLA_EID_TABLE_MAX = __SMC_NLA_EID_TABLE_MAX - 1
+};
+
+/* SMC_NETLINK_SEID attributes */
+enum {
+	SMC_NLA_SEID_UNSPEC,
+	SMC_NLA_SEID_ENTRY,	/* string */
+	SMC_NLA_SEID_ENABLED,	/* u8 */
+	__SMC_NLA_SEID_TABLE_MAX,
+	SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1
+};
 #endif /* _UAPI_LINUX_SMC_H */
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index 0d54bae..5f38be0 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -84,6 +84,20 @@
 #define TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE	16
 #define TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE	8
 
+#define TLS_CIPHER_SM4_GCM				55
+#define TLS_CIPHER_SM4_GCM_IV_SIZE			8
+#define TLS_CIPHER_SM4_GCM_KEY_SIZE		16
+#define TLS_CIPHER_SM4_GCM_SALT_SIZE		4
+#define TLS_CIPHER_SM4_GCM_TAG_SIZE		16
+#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE		8
+
+#define TLS_CIPHER_SM4_CCM				56
+#define TLS_CIPHER_SM4_CCM_IV_SIZE			8
+#define TLS_CIPHER_SM4_CCM_KEY_SIZE		16
+#define TLS_CIPHER_SM4_CCM_SALT_SIZE		4
+#define TLS_CIPHER_SM4_CCM_TAG_SIZE		16
+#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE		8
+
 #define TLS_SET_RECORD_TYPE	1
 #define TLS_GET_RECORD_TYPE	2
 
@@ -124,6 +138,22 @@
 	unsigned char rec_seq[TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE];
 };
 
+struct tls12_crypto_info_sm4_gcm {
+	struct tls_crypto_info info;
+	unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE];
+	unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE];
+	unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE];
+	unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE];
+};
+
+struct tls12_crypto_info_sm4_ccm {
+	struct tls_crypto_info info;
+	unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE];
+	unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE];
+	unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE];
+	unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE];
+};
+
 enum {
 	TLS_INFO_UNSPEC,
 	TLS_INFO_VERSION,
diff --git a/net/core/dev.c b/net/core/dev.c
index 74fd402..f930329 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2921,6 +2921,8 @@
 		if (dev->num_tc)
 			netif_setup_tc(dev, txq);
 
+		dev_qdisc_change_real_num_tx(dev, txq);
+
 		dev->real_num_tx_queues = txq;
 
 		if (disabling) {
diff --git a/net/core/devlink.c b/net/core/devlink.c
index a856ae4..f30121f 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -10122,54 +10122,6 @@
 EXPORT_SYMBOL_GPL(devlink_params_unpublish);
 
 /**
- * devlink_param_publish - publish one configuration parameter
- *
- * @devlink: devlink
- * @param: one configuration parameter
- *
- * Publish previously registered configuration parameter.
- */
-void devlink_param_publish(struct devlink *devlink,
-			   const struct devlink_param *param)
-{
-	struct devlink_param_item *param_item;
-
-	list_for_each_entry(param_item, &devlink->param_list, list) {
-		if (param_item->param != param || param_item->published)
-			continue;
-		param_item->published = true;
-		devlink_param_notify(devlink, 0, param_item,
-				     DEVLINK_CMD_PARAM_NEW);
-		break;
-	}
-}
-EXPORT_SYMBOL_GPL(devlink_param_publish);
-
-/**
- * devlink_param_unpublish - unpublish one configuration parameter
- *
- * @devlink: devlink
- * @param: one configuration parameter
- *
- * Unpublish previously registered configuration parameter.
- */
-void devlink_param_unpublish(struct devlink *devlink,
-			     const struct devlink_param *param)
-{
-	struct devlink_param_item *param_item;
-
-	list_for_each_entry(param_item, &devlink->param_list, list) {
-		if (param_item->param != param || !param_item->published)
-			continue;
-		param_item->published = false;
-		devlink_param_notify(devlink, 0, param_item,
-				     DEVLINK_CMD_PARAM_DEL);
-		break;
-	}
-}
-EXPORT_SYMBOL_GPL(devlink_param_unpublish);
-
-/**
  *	devlink_port_params_register - register port configuration parameters
  *
  *	@devlink_port: devlink port
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2170bea..7c2ab27 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -134,34 +134,31 @@
 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
 
-static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
-				unsigned int align_mask)
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
 {
 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
-	return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
-}
-
-void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
-{
 	fragsz = SKB_DATA_ALIGN(fragsz);
 
-	return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+	return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
 }
 EXPORT_SYMBOL(__napi_alloc_frag_align);
 
 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
 {
-	struct page_frag_cache *nc;
 	void *data;
 
 	fragsz = SKB_DATA_ALIGN(fragsz);
 	if (in_hardirq() || irqs_disabled()) {
-		nc = this_cpu_ptr(&netdev_alloc_cache);
+		struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
 		data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
 	} else {
+		struct napi_alloc_cache *nc;
+
 		local_bh_disable();
-		data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+		nc = this_cpu_ptr(&napi_alloc_cache);
+		data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
 		local_bh_enable();
 	}
 	return data;
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index f920487..6d928ee 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -54,7 +54,7 @@
 	p = (__be16 *)tag;
 	*p = htons(RTL4_A_ETHERTYPE);
 
-	out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT) | (2 << 8);
+	out = (RTL4_A_PROTOCOL_RTL8366RB << RTL4_A_PROTOCOL_SHIFT);
 	/* The lower bits indicate the port number */
 	out |= BIT(dp->index);
 
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index f2abc31..999e2a6 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1537,6 +1537,10 @@
 		ret = getter(dev, &eeprom, data);
 		if (ret)
 			break;
+		if (!eeprom.len) {
+			ret = -EIO;
+			break;
+		}
 		if (copy_to_user(userbuf, data, eeprom.len)) {
 			ret = -EFAULT;
 			break;
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
index aef750d..71e1034 100644
--- a/net/nfc/hci/llc_shdlc.c
+++ b/net/nfc/hci/llc_shdlc.c
@@ -201,8 +201,7 @@
 			del_timer_sync(&shdlc->t2_timer);
 			shdlc->t2_active = false;
 
-			pr_debug
-			    ("All sent frames acked. Stopped T2(retransmit)\n");
+			pr_debug("All sent frames acked. Stopped T2(retransmit)\n");
 		}
 	} else {
 		skb = skb_peek(&shdlc->ack_pending_q);
@@ -211,8 +210,7 @@
 			  msecs_to_jiffies(SHDLC_T2_VALUE_MS));
 		shdlc->t2_active = true;
 
-		pr_debug
-		    ("Start T2(retransmit) for remaining unacked sent frames\n");
+		pr_debug("Start T2(retransmit) for remaining unacked sent frames\n");
 	}
 }
 
@@ -522,12 +520,11 @@
 	unsigned long time_sent;
 
 	if (shdlc->send_q.qlen)
-		pr_debug
-		    ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
-		     shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
-		     shdlc->rnr == false ? "false" : "true",
-		     shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
-		     shdlc->ack_pending_q.qlen);
+		pr_debug("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
+			 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
+			 shdlc->rnr == false ? "false" : "true",
+			 shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
+			 shdlc->ack_pending_q.qlen);
 
 	while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
 	       (shdlc->rnr == false)) {
@@ -649,8 +646,7 @@
 		llc_shdlc_handle_send_queue(shdlc);
 
 		if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
-			pr_debug
-			    ("Handle T1(send ack) elapsed (T1 now inactive)\n");
+			pr_debug("Handle T1(send ack) elapsed (T1 now inactive)\n");
 
 			shdlc->t1_active = false;
 			r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
@@ -660,8 +656,7 @@
 		}
 
 		if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
-			pr_debug
-			    ("Handle T2(retransmit) elapsed (T2 inactive)\n");
+			pr_debug("Handle T2(retransmit) elapsed (T2 inactive)\n");
 
 			shdlc->t2_active = false;
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a8dd06c..66d2fbe 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1330,6 +1330,15 @@
 	return 0;
 }
 
+void dev_qdisc_change_real_num_tx(struct net_device *dev,
+				  unsigned int new_real_tx)
+{
+	struct Qdisc *qdisc = dev->qdisc;
+
+	if (qdisc->ops->change_real_num_tx)
+		qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
+}
+
 int dev_qdisc_change_tx_queue_len(struct net_device *dev)
 {
 	bool up = dev->flags & IFF_UP;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index e79f1af..db18d8a 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -125,6 +125,29 @@
 	priv->qdiscs = NULL;
 }
 
+static void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
+{
+#ifdef CONFIG_NET_SCHED
+	struct net_device *dev = qdisc_dev(sch);
+	struct Qdisc *qdisc;
+	unsigned int i;
+
+	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
+		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+		/* Only update the default qdiscs we created,
+		 * qdiscs with handles are always hashed.
+		 */
+		if (qdisc != &noop_qdisc && !qdisc->handle)
+			qdisc_hash_del(qdisc);
+	}
+	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
+		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+		if (qdisc != &noop_qdisc && !qdisc->handle)
+			qdisc_hash_add(qdisc, false);
+	}
+#endif
+}
+
 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
 	struct net_device *dev = qdisc_dev(sch);
@@ -288,6 +311,7 @@
 	.init		= mq_init,
 	.destroy	= mq_destroy,
 	.attach		= mq_attach,
+	.change_real_num_tx = mq_change_real_num_tx,
 	.dump		= mq_dump,
 	.owner		= THIS_MODULE,
 };
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 8766ab5..7f23a92 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -306,6 +306,28 @@
 	priv->qdiscs = NULL;
 }
 
+static void mqprio_change_real_num_tx(struct Qdisc *sch,
+				      unsigned int new_real_tx)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct Qdisc *qdisc;
+	unsigned int i;
+
+	for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
+		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+		/* Only update the default qdiscs we created,
+		 * qdiscs with handles are always hashed.
+		 */
+		if (qdisc != &noop_qdisc && !qdisc->handle)
+			qdisc_hash_del(qdisc);
+	}
+	for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
+		qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+		if (qdisc != &noop_qdisc && !qdisc->handle)
+			qdisc_hash_add(qdisc, false);
+	}
+}
+
 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
 					     unsigned long cl)
 {
@@ -623,6 +645,7 @@
 	.init		= mqprio_init,
 	.destroy	= mqprio_destroy,
 	.attach		= mqprio_attach,
+	.change_real_num_tx = mqprio_change_real_num_tx,
 	.dump		= mqprio_dump,
 	.owner		= THIS_MODULE,
 };
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index c038efc..f69ef3f 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -829,7 +829,7 @@
 	smc_rmb_sync_sg_for_device(&smc->conn);
 
 	reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
-					   SMC_V1);
+					   SMC_V1, NULL);
 	if (reason_code)
 		goto connect_abort;
 
@@ -883,6 +883,7 @@
 			   struct smc_clc_msg_accept_confirm *aclc,
 			   struct smc_init_info *ini)
 {
+	u8 *eid = NULL;
 	int rc = 0;
 
 	ini->is_smcd = true;
@@ -918,8 +919,15 @@
 	smc_rx_init(smc);
 	smc_tx_init(smc);
 
+	if (aclc->hdr.version > SMC_V1) {
+		struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+			(struct smc_clc_msg_accept_confirm_v2 *)aclc;
+
+		eid = clc_v2->eid;
+	}
+
 	rc = smc_clc_send_confirm(smc, ini->first_contact_local,
-				  aclc->hdr.version);
+				  aclc->hdr.version, eid);
 	if (rc)
 		goto connect_abort;
 	mutex_unlock(&smc_server_lgr_pending);
@@ -1533,9 +1541,8 @@
 	pclc_smcd = smc_get_clc_msg_smcd(pclc);
 	smc_v2_ext = smc_get_clc_v2_ext(pclc);
 	smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
-	if (!smcd_v2_ext ||
-	    !smc_v2_ext->hdr.flag.seid) { /* no system EID support for SMCD */
-		smc_find_ism_store_rc(SMC_CLC_DECL_NOSEID, ini);
+	if (!smcd_v2_ext) {
+		smc_find_ism_store_rc(SMC_CLC_DECL_NOV2DEXT, ini);
 		goto not_found;
 	}
 
@@ -1555,13 +1562,13 @@
 	}
 	mutex_unlock(&smcd_dev_list.mutex);
 
-	if (ini->ism_dev[0]) {
-		smc_ism_get_system_eid(ini->ism_dev[0], &eid);
-		if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
-			goto not_found;
-	} else {
+	if (!ini->ism_dev[0])
 		goto not_found;
-	}
+
+	smc_ism_get_system_eid(&eid);
+	if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
+			       smcd_v2_ext->system_eid, eid))
+		goto not_found;
 
 	/* separate - outside the smcd_dev_list.lock */
 	smcd_version = ini->smcd_version;
@@ -1579,6 +1586,7 @@
 	}
 	/* no V2 ISM device could be initialized */
 	ini->smcd_version = smcd_version;	/* restore original value */
+	ini->negotiated_eid[0] = 0;
 
 not_found:
 	ini->smcd_version &= ~SMC_V2;
@@ -1788,7 +1796,8 @@
 
 	/* send SMC Accept CLC message */
 	rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
-				 ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
+				 ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1,
+				 ini->negotiated_eid);
 	if (rc)
 		goto out_unlock;
 
@@ -2662,6 +2671,7 @@
 	proto_unregister(&smc_proto);
 	smc_pnet_exit();
 	smc_nl_exit();
+	smc_clc_exit();
 	unregister_pernet_subsys(&smc_net_stat_ops);
 	unregister_pernet_subsys(&smc_net_ops);
 	rcu_barrier();
diff --git a/net/smc/smc.h b/net/smc/smc.h
index d65e15f..5e7def3 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -29,9 +29,6 @@
 					 * devices
 					 */
 
-#define SMC_MAX_HOSTNAME_LEN	32
-#define SMC_MAX_EID_LEN		32
-
 extern struct proto smc_proto;
 extern struct proto smc_proto6;
 
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index e286daf..4afd9e7 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -26,6 +26,7 @@
 #include "smc_clc.h"
 #include "smc_ib.h"
 #include "smc_ism.h"
+#include "smc_netlink.h"
 
 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
@@ -39,6 +40,285 @@
 
 static u8 smc_hostname[SMC_MAX_HOSTNAME_LEN];
 
+struct smc_clc_eid_table {
+	rwlock_t lock;
+	struct list_head list;
+	u8 ueid_cnt;
+	u8 seid_enabled;
+};
+
+static struct smc_clc_eid_table smc_clc_eid_table;
+
+struct smc_clc_eid_entry {
+	struct list_head list;
+	u8 eid[SMC_MAX_EID_LEN];
+};
+
+/* The size of a user EID is 32 characters.
+ * Valid characters should be (single-byte character set) A-Z, 0-9, '.' and '-'.
+ * Blanks should only be used to pad to the expected size.
+ * First character must be alphanumeric.
+ */
+static bool smc_clc_ueid_valid(char *ueid)
+{
+	char *end = ueid + SMC_MAX_EID_LEN;
+
+	while (--end >= ueid && isspace(*end))
+		;
+	if (end < ueid)
+		return false;
+	if (!isalnum(*ueid) || islower(*ueid))
+		return false;
+	while (ueid <= end) {
+		if ((!isalnum(*ueid) || islower(*ueid)) && *ueid != '.' &&
+		    *ueid != '-')
+			return false;
+		ueid++;
+	}
+	return true;
+}
+
+static int smc_clc_ueid_add(char *ueid)
+{
+	struct smc_clc_eid_entry *new_ueid, *tmp_ueid;
+	int rc;
+
+	if (!smc_clc_ueid_valid(ueid))
+		return -EINVAL;
+
+	/* add a new ueid entry to the ueid table if there isn't one */
+	new_ueid = kzalloc(sizeof(*new_ueid), GFP_KERNEL);
+	if (!new_ueid)
+		return -ENOMEM;
+	memcpy(new_ueid->eid, ueid, SMC_MAX_EID_LEN);
+
+	write_lock(&smc_clc_eid_table.lock);
+	if (smc_clc_eid_table.ueid_cnt >= SMC_MAX_UEID) {
+		rc = -ERANGE;
+		goto err_out;
+	}
+	list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) {
+		if (!memcmp(tmp_ueid->eid, ueid, SMC_MAX_EID_LEN)) {
+			rc = -EEXIST;
+			goto err_out;
+		}
+	}
+	list_add_tail(&new_ueid->list, &smc_clc_eid_table.list);
+	smc_clc_eid_table.ueid_cnt++;
+	write_unlock(&smc_clc_eid_table.lock);
+	return 0;
+
+err_out:
+	write_unlock(&smc_clc_eid_table.lock);
+	kfree(new_ueid);
+	return rc;
+}
+
+int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY];
+	char *ueid;
+
+	if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1)
+		return -EINVAL;
+	ueid = (char *)nla_data(nla_ueid);
+
+	return smc_clc_ueid_add(ueid);
+}
+
+/* remove one or all ueid entries from the table */
+static int smc_clc_ueid_remove(char *ueid)
+{
+	struct smc_clc_eid_entry *lst_ueid, *tmp_ueid;
+	int rc = -ENOENT;
+
+	/* remove table entry */
+	write_lock(&smc_clc_eid_table.lock);
+	list_for_each_entry_safe(lst_ueid, tmp_ueid, &smc_clc_eid_table.list,
+				 list) {
+		if (!ueid || !memcmp(lst_ueid->eid, ueid, SMC_MAX_EID_LEN)) {
+			list_del(&lst_ueid->list);
+			smc_clc_eid_table.ueid_cnt--;
+			kfree(lst_ueid);
+			rc = 0;
+		}
+	}
+	if (!rc && !smc_clc_eid_table.ueid_cnt) {
+		smc_clc_eid_table.seid_enabled = 1;
+		rc = -EAGAIN;	/* indicate success and enabling of seid */
+	}
+	write_unlock(&smc_clc_eid_table.lock);
+	return rc;
+}
+
+int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info)
+{
+	struct nlattr *nla_ueid = info->attrs[SMC_NLA_EID_TABLE_ENTRY];
+	char *ueid;
+
+	if (!nla_ueid || nla_len(nla_ueid) != SMC_MAX_EID_LEN + 1)
+		return -EINVAL;
+	ueid = (char *)nla_data(nla_ueid);
+
+	return smc_clc_ueid_remove(ueid);
+}
+
+int smc_nl_flush_ueid(struct sk_buff *skb, struct genl_info *info)
+{
+	smc_clc_ueid_remove(NULL);
+	return 0;
+}
+
+static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
+				u32 flags, char *ueid)
+{
+	char ueid_str[SMC_MAX_EID_LEN + 1];
+	void *hdr;
+
+	hdr = genlmsg_put(skb, portid, seq, &smc_gen_nl_family,
+			  flags, SMC_NETLINK_DUMP_UEID);
+	if (!hdr)
+		return -ENOMEM;
+	snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
+	if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
+		genlmsg_cancel(skb, hdr);
+		return -EMSGSIZE;
+	}
+	genlmsg_end(skb, hdr);
+	return 0;
+}
+
+static int _smc_nl_ueid_dump(struct sk_buff *skb, u32 portid, u32 seq,
+			     int start_idx)
+{
+	struct smc_clc_eid_entry *lst_ueid;
+	int idx = 0;
+
+	read_lock(&smc_clc_eid_table.lock);
+	list_for_each_entry(lst_ueid, &smc_clc_eid_table.list, list) {
+		if (idx++ < start_idx)
+			continue;
+		if (smc_nl_ueid_dumpinfo(skb, portid, seq, NLM_F_MULTI,
+					 lst_ueid->eid)) {
+			--idx;
+			break;
+		}
+	}
+	read_unlock(&smc_clc_eid_table.lock);
+	return idx;
+}
+
+int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+	int idx;
+
+	idx = _smc_nl_ueid_dump(skb, NETLINK_CB(cb->skb).portid,
+				cb->nlh->nlmsg_seq, cb_ctx->pos[0]);
+
+	cb_ctx->pos[0] = idx;
+	return skb->len;
+}
+
+int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+	char seid_str[SMC_MAX_EID_LEN + 1];
+	u8 seid_enabled;
+	void *hdr;
+	u8 *seid;
+
+	if (cb_ctx->pos[0])
+		return skb->len;
+
+	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+			  &smc_gen_nl_family, NLM_F_MULTI,
+			  SMC_NETLINK_DUMP_SEID);
+	if (!hdr)
+		return -ENOMEM;
+	if (!smc_ism_is_v2_capable())
+		goto end;
+
+	smc_ism_get_system_eid(&seid);
+	snprintf(seid_str, sizeof(seid_str), "%s", seid);
+	if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
+		goto err;
+	read_lock(&smc_clc_eid_table.lock);
+	seid_enabled = smc_clc_eid_table.seid_enabled;
+	read_unlock(&smc_clc_eid_table.lock);
+	if (nla_put_u8(skb, SMC_NLA_SEID_ENABLED, seid_enabled))
+		goto err;
+end:
+	genlmsg_end(skb, hdr);
+	cb_ctx->pos[0]++;
+	return skb->len;
+err:
+	genlmsg_cancel(skb, hdr);
+	return -EMSGSIZE;
+}
+
+int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info)
+{
+	write_lock(&smc_clc_eid_table.lock);
+	smc_clc_eid_table.seid_enabled = 1;
+	write_unlock(&smc_clc_eid_table.lock);
+	return 0;
+}
+
+int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info)
+{
+	int rc = 0;
+
+	write_lock(&smc_clc_eid_table.lock);
+	if (!smc_clc_eid_table.ueid_cnt)
+		rc = -ENOENT;
+	else
+		smc_clc_eid_table.seid_enabled = 0;
+	write_unlock(&smc_clc_eid_table.lock);
+	return rc;
+}
+
+static bool _smc_clc_match_ueid(u8 *peer_ueid)
+{
+	struct smc_clc_eid_entry *tmp_ueid;
+
+	list_for_each_entry(tmp_ueid, &smc_clc_eid_table.list, list) {
+		if (!memcmp(tmp_ueid->eid, peer_ueid, SMC_MAX_EID_LEN))
+			return true;
+	}
+	return false;
+}
+
+bool smc_clc_match_eid(u8 *negotiated_eid,
+		       struct smc_clc_v2_extension *smc_v2_ext,
+		       u8 *peer_eid, u8 *local_eid)
+{
+	bool match = false;
+	int i;
+
+	negotiated_eid[0] = 0;
+	read_lock(&smc_clc_eid_table.lock);
+	if (smc_clc_eid_table.seid_enabled &&
+	    smc_v2_ext->hdr.flag.seid &&
+	    !memcmp(peer_eid, local_eid, SMC_MAX_EID_LEN)) {
+		memcpy(negotiated_eid, peer_eid, SMC_MAX_EID_LEN);
+		match = true;
+		goto out;
+	}
+
+	for (i = 0; i < smc_v2_ext->hdr.eid_cnt; i++) {
+		if (_smc_clc_match_ueid(smc_v2_ext->user_eids[i])) {
+			memcpy(negotiated_eid, smc_v2_ext->user_eids[i],
+			       SMC_MAX_EID_LEN);
+			match = true;
+			goto out;
+		}
+	}
+out:
+	read_unlock(&smc_clc_eid_table.lock);
+	return match;
+}
+
 /* check arriving CLC proposal */
 static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
 {
@@ -550,6 +830,7 @@
 	if (ini->smc_type_v2 == SMC_TYPE_N) {
 		pclc_smcd->v2_ext_offset = 0;
 	} else {
+		struct smc_clc_eid_entry *ueident;
 		u16 v2_ext_offset;
 		u8 *eid = NULL;
 
@@ -560,19 +841,25 @@
 						pclc_prfx->ipv6_prefixes_cnt *
 						sizeof(ipv6_prfx[0]);
 		pclc_smcd->v2_ext_offset = htons(v2_ext_offset);
-		v2_ext->hdr.eid_cnt = 0;
+
+		read_lock(&smc_clc_eid_table.lock);
+		v2_ext->hdr.eid_cnt = smc_clc_eid_table.ueid_cnt;
+		plen += smc_clc_eid_table.ueid_cnt * SMC_MAX_EID_LEN;
+		i = 0;
+		list_for_each_entry(ueident, &smc_clc_eid_table.list, list) {
+			memcpy(v2_ext->user_eids[i++], ueident->eid,
+			       sizeof(ueident->eid));
+		}
+		v2_ext->hdr.flag.seid = smc_clc_eid_table.seid_enabled;
+		read_unlock(&smc_clc_eid_table.lock);
 		v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt;
 		v2_ext->hdr.flag.release = SMC_RELEASE;
-		v2_ext->hdr.flag.seid = 1;
 		v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) -
 				offsetofend(struct smc_clnt_opts_area_hdr,
 					    smcd_v2_ext_offset) +
 				v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN);
-		if (ini->ism_dev[0])
-			smc_ism_get_system_eid(ini->ism_dev[0], &eid);
-		else
-			smc_ism_get_system_eid(ini->ism_dev[1], &eid);
-		if (eid)
+		smc_ism_get_system_eid(&eid);
+		if (eid && v2_ext->hdr.flag.seid)
 			memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN);
 		plen += sizeof(*v2_ext) + sizeof(*smcd_v2_ext);
 		if (ini->ism_offered_cnt) {
@@ -607,7 +894,8 @@
 	}
 	if (ini->smc_type_v2 != SMC_TYPE_N) {
 		vec[i].iov_base = v2_ext;
-		vec[i++].iov_len = sizeof(*v2_ext);
+		vec[i++].iov_len = sizeof(*v2_ext) +
+				   (v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN);
 		vec[i].iov_base = smcd_v2_ext;
 		vec[i++].iov_len = sizeof(*smcd_v2_ext);
 		if (ini->ism_offered_cnt) {
@@ -635,7 +923,8 @@
 /* build and send CLC CONFIRM / ACCEPT message */
 static int smc_clc_send_confirm_accept(struct smc_sock *smc,
 				       struct smc_clc_msg_accept_confirm_v2 *clc_v2,
-				       int first_contact, u8 version)
+				       int first_contact, u8 version,
+				       u8 *eid)
 {
 	struct smc_connection *conn = &smc->conn;
 	struct smc_clc_msg_accept_confirm *clc;
@@ -663,11 +952,8 @@
 		if (version == SMC_V1) {
 			clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
 		} else {
-			u8 *eid = NULL;
-
 			clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd));
-			smc_ism_get_system_eid(conn->lgr->smcd, &eid);
-			if (eid)
+			if (eid[0])
 				memcpy(clc_v2->eid, eid, SMC_MAX_EID_LEN);
 			len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
 			if (first_contact)
@@ -732,7 +1018,7 @@
 
 /* send CLC CONFIRM message across internal TCP socket */
 int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
-			 u8 version)
+			 u8 version, u8 *eid)
 {
 	struct smc_clc_msg_accept_confirm_v2 cclc_v2;
 	int reason_code = 0;
@@ -742,7 +1028,7 @@
 	memset(&cclc_v2, 0, sizeof(cclc_v2));
 	cclc_v2.hdr.type = SMC_CLC_CONFIRM;
 	len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact,
-					  version);
+					  version, eid);
 	if (len < ntohs(cclc_v2.hdr.length)) {
 		if (len >= 0) {
 			reason_code = -ENETUNREACH;
@@ -757,7 +1043,7 @@
 
 /* send CLC ACCEPT message across internal TCP socket */
 int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact,
-			u8 version)
+			u8 version, u8 *negotiated_eid)
 {
 	struct smc_clc_msg_accept_confirm_v2 aclc_v2;
 	int len;
@@ -765,7 +1051,7 @@
 	memset(&aclc_v2, 0, sizeof(aclc_v2));
 	aclc_v2.hdr.type = SMC_CLC_ACCEPT;
 	len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact,
-					  version);
+					  version, negotiated_eid);
 	if (len < ntohs(aclc_v2.hdr.length))
 		len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
 
@@ -785,4 +1071,14 @@
 	u = utsname();
 	memcpy(smc_hostname, u->nodename,
 	       min_t(size_t, strlen(u->nodename), sizeof(smc_hostname)));
+
+	INIT_LIST_HEAD(&smc_clc_eid_table.list);
+	rwlock_init(&smc_clc_eid_table.lock);
+	smc_clc_eid_table.ueid_cnt = 0;
+	smc_clc_eid_table.seid_enabled = 1;
+}
+
+void smc_clc_exit(void)
+{
+	smc_clc_ueid_remove(NULL);
 }
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 32d37f7..974d01d 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -14,8 +14,10 @@
 #define _SMC_CLC_H
 
 #include <rdma/ib_verbs.h>
+#include <linux/smc.h>
 
 #include "smc.h"
+#include "smc_netlink.h"
 
 #define SMC_CLC_PROPOSAL	0x01
 #define SMC_CLC_ACCEPT		0x02
@@ -158,6 +160,7 @@
 } __aligned(4);
 
 #define SMC_CLC_MAX_V6_PREFIX		8
+#define SMC_CLC_MAX_UEID		8
 
 struct smc_clc_msg_proposal_area {
 	struct smc_clc_msg_proposal		pclc_base;
@@ -165,6 +168,7 @@
 	struct smc_clc_msg_proposal_prefix	pclc_prfx;
 	struct smc_clc_ipv6_prefix	pclc_prfx_ipv6[SMC_CLC_MAX_V6_PREFIX];
 	struct smc_clc_v2_extension		pclc_v2_ext;
+	u8			user_eids[SMC_CLC_MAX_UEID][SMC_MAX_EID_LEN];
 	struct smc_clc_smcd_v2_extension	pclc_smcd_v2_ext;
 	struct smc_clc_smcd_gid_chid		pclc_gidchids[SMC_MAX_ISM_DEVS];
 	struct smc_clc_msg_trail		pclc_trl;
@@ -330,10 +334,21 @@
 int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version);
 int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini);
 int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
-			 u8 version);
+			 u8 version, u8 *eid);
 int smc_clc_send_accept(struct smc_sock *smc, bool srv_first_contact,
-			u8 version);
+			u8 version, u8 *negotiated_eid);
 void smc_clc_init(void) __init;
+void smc_clc_exit(void);
 void smc_clc_get_hostname(u8 **host);
+bool smc_clc_match_eid(u8 *negotiated_eid,
+		       struct smc_clc_v2_extension *smc_v2_ext,
+		       u8 *peer_eid, u8 *local_eid);
+int smc_nl_dump_ueid(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_nl_add_ueid(struct sk_buff *skb, struct genl_info *info);
+int smc_nl_remove_ueid(struct sk_buff *skb, struct genl_info *info);
+int smc_nl_flush_ueid(struct sk_buff *skb, struct genl_info *info);
+int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_nl_enable_seid(struct sk_buff *skb, struct genl_info *info);
+int smc_nl_disable_seid(struct sk_buff *skb, struct genl_info *info);
 
 #endif
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index af227b6..4d0fcd8 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -223,7 +223,6 @@
 	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
 	char hostname[SMC_MAX_HOSTNAME_LEN + 1];
 	char smc_seid[SMC_MAX_EID_LEN + 1];
-	struct smcd_dev *smcd_dev;
 	struct nlattr *attrs;
 	u8 *seid = NULL;
 	u8 *host = NULL;
@@ -252,13 +251,8 @@
 		if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
 			goto errattr;
 	}
-	mutex_lock(&smcd_dev_list.mutex);
-	smcd_dev = list_first_entry_or_null(&smcd_dev_list.list,
-					    struct smcd_dev, list);
-	if (smcd_dev)
-		smc_ism_get_system_eid(smcd_dev, &seid);
-	mutex_unlock(&smcd_dev_list.mutex);
-	if (seid && smc_ism_is_v2_capable()) {
+	if (smc_ism_is_v2_capable()) {
+		smc_ism_get_system_eid(&seid);
 		memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
 		smc_seid[SMC_MAX_EID_LEN] = 0;
 		if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c043ecd..83d30b0 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -310,6 +310,7 @@
 	u8			first_contact_local;
 	unsigned short		vlan_id;
 	u32			rc;
+	u8			negotiated_eid[SMC_MAX_EID_LEN];
 	/* SMC-R */
 	struct smc_clc_msg_local *ib_lcl;
 	struct smc_ib_device	*ib_dev;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 9cb2df2..fd28cc4 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -23,6 +23,7 @@
 };
 
 static bool smc_ism_v2_capable;
+static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN];
 
 /* Test if an ISM communication is possible - same CPC */
 int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
@@ -42,9 +43,12 @@
 	return rc < 0 ? rc : 0;
 }
 
-void smc_ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
+void smc_ism_get_system_eid(u8 **eid)
 {
-	smcd->ops->get_system_eid(smcd, eid);
+	if (!smc_ism_v2_capable)
+		*eid = NULL;
+	else
+		*eid = smc_ism_v2_system_eid;
 }
 
 u16 smc_ism_get_chid(struct smcd_dev *smcd)
@@ -435,9 +439,12 @@
 	if (list_empty(&smcd_dev_list.list)) {
 		u8 *system_eid = NULL;
 
-		smc_ism_get_system_eid(smcd, &system_eid);
-		if (system_eid[24] != '0' || system_eid[28] != '0')
+		smcd->ops->get_system_eid(smcd, &system_eid);
+		if (system_eid[24] != '0' || system_eid[28] != '0') {
 			smc_ism_v2_capable = true;
+			memcpy(smc_ism_v2_system_eid, system_eid,
+			       SMC_MAX_EID_LEN);
+		}
 	}
 	/* sort list: devices without pnetid before devices with pnetid */
 	if (smcd->pnetid[0])
@@ -533,4 +540,5 @@
 void __init smc_ism_init(void)
 {
 	smc_ism_v2_capable = false;
+	memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN);
 }
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index 113efc7..004b22a 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -48,7 +48,7 @@
 int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
 		  void *data, size_t len);
 int smc_ism_signal_shutdown(struct smc_link_group *lgr);
-void smc_ism_get_system_eid(struct smcd_dev *dev, u8 **eid);
+void smc_ism_get_system_eid(u8 **eid);
 u16 smc_ism_get_chid(struct smcd_dev *dev);
 bool smc_ism_is_v2_capable(void);
 void smc_ism_init(void);
diff --git a/net/smc/smc_netlink.c b/net/smc/smc_netlink.c
index 6fb6f96..f13ab06 100644
--- a/net/smc/smc_netlink.c
+++ b/net/smc/smc_netlink.c
@@ -19,11 +19,19 @@
 #include "smc_core.h"
 #include "smc_ism.h"
 #include "smc_ib.h"
+#include "smc_clc.h"
 #include "smc_stats.h"
 #include "smc_netlink.h"
 
-#define SMC_CMD_MAX_ATTR 1
+const struct nla_policy
+smc_gen_ueid_policy[SMC_NLA_EID_TABLE_MAX + 1] = {
+	[SMC_NLA_EID_TABLE_UNSPEC]	= { .type = NLA_UNSPEC },
+	[SMC_NLA_EID_TABLE_ENTRY]	= { .type = NLA_STRING,
+					    .len = SMC_MAX_EID_LEN,
+					  },
+};
 
+#define SMC_CMD_MAX_ATTR 1
 /* SMC_GENL generic netlink operation definition */
 static const struct genl_ops smc_gen_nl_ops[] = {
 	{
@@ -66,6 +74,43 @@
 		/* can be retrieved by unprivileged users */
 		.dumpit = smc_nl_get_fback_stats,
 	},
+	{
+		.cmd = SMC_NETLINK_DUMP_UEID,
+		/* can be retrieved by unprivileged users */
+		.dumpit = smc_nl_dump_ueid,
+	},
+	{
+		.cmd = SMC_NETLINK_ADD_UEID,
+		.flags = GENL_ADMIN_PERM,
+		.doit = smc_nl_add_ueid,
+		.policy = smc_gen_ueid_policy,
+	},
+	{
+		.cmd = SMC_NETLINK_REMOVE_UEID,
+		.flags = GENL_ADMIN_PERM,
+		.doit = smc_nl_remove_ueid,
+		.policy = smc_gen_ueid_policy,
+	},
+	{
+		.cmd = SMC_NETLINK_FLUSH_UEID,
+		.flags = GENL_ADMIN_PERM,
+		.doit = smc_nl_flush_ueid,
+	},
+	{
+		.cmd = SMC_NETLINK_DUMP_SEID,
+		/* can be retrieved by unprivileged users */
+		.dumpit = smc_nl_dump_seid,
+	},
+	{
+		.cmd = SMC_NETLINK_ENABLE_SEID,
+		.flags = GENL_ADMIN_PERM,
+		.doit = smc_nl_enable_seid,
+	},
+	{
+		.cmd = SMC_NETLINK_DISABLE_SEID,
+		.flags = GENL_ADMIN_PERM,
+		.doit = smc_nl_disable_seid,
+	},
 };
 
 static const struct nla_policy smc_gen_nl_policy[2] = {
diff --git a/net/smc/smc_netlink.h b/net/smc/smc_netlink.h
index 5ce2c0a..e8c6c3f 100644
--- a/net/smc/smc_netlink.h
+++ b/net/smc/smc_netlink.h
@@ -17,6 +17,8 @@
 
 extern struct genl_family smc_gen_nl_family;
 
+extern const struct nla_policy smc_gen_ueid_policy[];
+
 struct smc_nl_dmp_ctx {
 	int pos[3];
 };
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index fde56ff..d44399e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -421,6 +421,46 @@
 			rc = -EFAULT;
 		break;
 	}
+	case TLS_CIPHER_SM4_GCM: {
+		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
+			container_of(crypto_info,
+				struct tls12_crypto_info_sm4_gcm, info);
+
+		if (len != sizeof(*sm4_gcm_info)) {
+			rc = -EINVAL;
+			goto out;
+		}
+		lock_sock(sk);
+		memcpy(sm4_gcm_info->iv,
+		       cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+		       TLS_CIPHER_SM4_GCM_IV_SIZE);
+		memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+		       TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+		release_sock(sk);
+		if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+			rc = -EFAULT;
+		break;
+	}
+	case TLS_CIPHER_SM4_CCM: {
+		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
+			container_of(crypto_info,
+				struct tls12_crypto_info_sm4_ccm, info);
+
+		if (len != sizeof(*sm4_ccm_info)) {
+			rc = -EINVAL;
+			goto out;
+		}
+		lock_sock(sk);
+		memcpy(sm4_ccm_info->iv,
+		       cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+		       TLS_CIPHER_SM4_CCM_IV_SIZE);
+		memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+		       TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+		release_sock(sk);
+		if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+			rc = -EFAULT;
+		break;
+	}
 	default:
 		rc = -EINVAL;
 	}
@@ -524,6 +564,12 @@
 	case TLS_CIPHER_CHACHA20_POLY1305:
 		optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
 		break;
+	case TLS_CIPHER_SM4_GCM:
+		optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
+		break;
+	case TLS_CIPHER_SM4_CCM:
+		optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
+		break;
 	default:
 		rc = -EINVAL;
 		goto err_crypto_info;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 4feb95e..989d142 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2424,6 +2424,40 @@
 		cipher_name = "rfc7539(chacha20,poly1305)";
 		break;
 	}
+	case TLS_CIPHER_SM4_GCM: {
+		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
+
+		sm4_gcm_info = (void *)crypto_info;
+		nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+		tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
+		iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
+		iv = sm4_gcm_info->iv;
+		rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
+		rec_seq = sm4_gcm_info->rec_seq;
+		keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
+		key = sm4_gcm_info->key;
+		salt = sm4_gcm_info->salt;
+		salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
+		cipher_name = "gcm(sm4)";
+		break;
+	}
+	case TLS_CIPHER_SM4_CCM: {
+		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
+
+		sm4_ccm_info = (void *)crypto_info;
+		nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+		tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
+		iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
+		iv = sm4_ccm_info->iv;
+		rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
+		rec_seq = sm4_ccm_info->rec_seq;
+		keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
+		key = sm4_ccm_info->key;
+		salt = sm4_ccm_info->salt;
+		salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
+		cipher_name = "ccm(sm4)";
+		break;
+	}
 	default:
 		rc = -EINVAL;
 		goto free_priv;
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
index 7ca1f03..9227440 100644
--- a/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh
@@ -50,7 +50,7 @@
 	modprobe netdevsim
     fi
 
-    echo $NSIM_ID > /sys/bus/netdevsim/new_device
+    echo $NSIM_ID $@ > /sys/bus/netdevsim/new_device
     # get new device name
     ls /sys/bus/netdevsim/devices/netdevsim${NSIM_ID}/net/
 }
diff --git a/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
new file mode 100755
index 0000000..fd13c8c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+source ethtool-common.sh
+
+set -o pipefail
+
+n_children() {
+    n=$(tc qdisc show dev $NDEV | grep '^qdisc' | wc -l)
+    echo $((n - 1))
+}
+
+tcq() {
+    tc qdisc $1 dev $NDEV ${@:2}
+}
+
+n_child_assert() {
+    n=$(n_children)
+    if [ $n -ne $1 ]; then
+	echo "ERROR ($root): ${@:2}, expected $1 have $n"
+	((num_errors++))
+    else
+	((num_passes++))
+    fi
+}
+
+
+for root in mq mqprio; do
+    NDEV=$(make_netdev 1 4)
+
+    opts=
+    [ $root == "mqprio" ] && opts='hw 0 num_tc 1 map 0 0 0 0  queues 1@0'
+
+    tcq add root handle 100: $root $opts
+    n_child_assert 4 'Init'
+
+    # All defaults
+
+    for n in 3 2 1 2 3 4 1 4; do
+	ethtool -L $NDEV combined $n
+	n_child_assert $n "Change queues to $n while down"
+    done
+
+    ip link set dev $NDEV up
+
+    for n in 3 2 1 2 3 4 1 4; do
+	ethtool -L $NDEV combined $n
+	n_child_assert $n "Change queues to $n while up"
+    done
+
+    # One real one
+    tcq replace parent 100:4 handle 204: pfifo_fast
+    n_child_assert 4 "One real queue"
+
+    ethtool -L $NDEV combined 1
+    n_child_assert 2 "One real queue, one default"
+
+    ethtool -L $NDEV combined 4
+    n_child_assert 4 "One real queue, rest default"
+
+    # Graft some
+    tcq replace parent 100:1 handle 204:
+    n_child_assert 3 "Grafted"
+
+    ethtool -L $NDEV combined 1
+    n_child_assert 1 "Grafted, one"
+
+    cleanup_nsim
+done
+
+if [ $num_errors -eq 0 ]; then
+    echo "PASSED all $((num_passes)) checks"
+    exit 0
+else
+    echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
+    exit 1
+fi