diff mbox series

[RFC,net-next,v3,06/10] net: bridge: mrp: switchdev: Extend switchdev API to offload MRP

Message ID 20200124161828.12206-7-horatiu.vultur@microchip.com
State RFC
Delegated to: David Miller
Headers show
Series net: bridge: mrp: Add support for Media Redundancy Protocol (MRP) | expand

Commit Message

Horatiu Vultur Jan. 24, 2020, 4:18 p.m. UTC
Extend switchdev API to add support for MRP. The HW is notified in
following cases:

SWITCHDEV_OBJ_ID_PORT_MRP: This is used when a port is added/removed from the
  mrp ring.

SWITCHDEV_OBJ_ID_RING_ROLE_MRP: This is used when the role of the node
  changes. The current supported roles are Media Redundancy Manager and Media
  Redundancy Client.

SWITCHDEV_OBJ_ID_RING_TEST_MRP: This is used when to start/stop sending
  MRP_Test frames on the mrp ring ports. This is called only on nodes that have
  the role Media Redundancy Manager.

SWITCHDEV_ATTR_ID_MRP_PORT_STATE: This is used when the port's state is
  changed. It can be in blocking/forwarding mode.

SWITCHDEV_ATTR_ID_MRP_PORT_ROLE: This is used when port's role changes. The
  roles of the port can be primary/secondary. This is required to notify HW
  because the MRP_Test frame contains the field MRP_PortRole that contains this
  information.

SWITCHDEV_ATTR_ID_MRP_RING_STATE: This is used when the ring changes it states
  to open or closed. This is required to notify HW because the MRP_Test frame
  contains the field MRP_InState which contains this information.

Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
---
 include/net/switchdev.h | 51 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

Comments

Andrew Lunn Jan. 25, 2020, 4:35 p.m. UTC | #1
> SWITCHDEV_OBJ_ID_RING_TEST_MRP: This is used when to start/stop sending
>   MRP_Test frames on the mrp ring ports. This is called only on nodes that have
>   the role Media Redundancy Manager.

How do you handle the 'headless chicken' scenario? User space tells
the port to start sending MRP_Test frames. It then dies. The hardware
continues sending these messages, and the neighbours thinks everything
is O.K, but in reality the state machine is dead, and when the ring
breaks, the daemon is not there to fix it?

And it is not just the daemon that could die. The kernel could opps or
deadlock, etc.

For a robust design, it seems like SWITCHDEV_OBJ_ID_RING_TEST_MRP
should mean: start sending MRP_Test frames for the next X seconds, and
then stop. And the request is repeated every X-1 seconds.

     Andrew
Horatiu Vultur Jan. 26, 2020, 1:22 p.m. UTC | #2
The 01/25/2020 17:35, Andrew Lunn wrote:
> EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
> 
> > SWITCHDEV_OBJ_ID_RING_TEST_MRP: This is used when to start/stop sending
> >   MRP_Test frames on the mrp ring ports. This is called only on nodes that have
> >   the role Media Redundancy Manager.
> 
> How do you handle the 'headless chicken' scenario? User space tells
> the port to start sending MRP_Test frames. It then dies. The hardware
> continues sending these messages, and the neighbours thinks everything
> is O.K, but in reality the state machine is dead, and when the ring
> breaks, the daemon is not there to fix it?
> 
> And it is not just the daemon that could die. The kernel could opps or
> deadlock, etc.
> 
> For a robust design, it seems like SWITCHDEV_OBJ_ID_RING_TEST_MRP
> should mean: start sending MRP_Test frames for the next X seconds, and
> then stop. And the request is repeated every X-1 seconds.

I totally missed this case, I will update this as you suggest.

> 
>      Andrew
Andrew Lunn Jan. 26, 2020, 3:59 p.m. UTC | #3
On Sun, Jan 26, 2020 at 02:22:13PM +0100, Horatiu Vultur wrote:
> The 01/25/2020 17:35, Andrew Lunn wrote:
> > EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
> > 
> > > SWITCHDEV_OBJ_ID_RING_TEST_MRP: This is used when to start/stop sending
> > >   MRP_Test frames on the mrp ring ports. This is called only on nodes that have
> > >   the role Media Redundancy Manager.
> > 
> > How do you handle the 'headless chicken' scenario? User space tells
> > the port to start sending MRP_Test frames. It then dies. The hardware
> > continues sending these messages, and the neighbours thinks everything
> > is O.K, but in reality the state machine is dead, and when the ring
> > breaks, the daemon is not there to fix it?
> > 
> > And it is not just the daemon that could die. The kernel could opps or
> > deadlock, etc.
> > 
> > For a robust design, it seems like SWITCHDEV_OBJ_ID_RING_TEST_MRP
> > should mean: start sending MRP_Test frames for the next X seconds, and
> > then stop. And the request is repeated every X-1 seconds.
> 
> I totally missed this case, I will update this as you suggest.

Hi Horatiu

What does your hardware actually provide?

Given the design of the protocol, if the hardware decides the OS etc
is dead, it should stop sending MRP_TEST frames and unblock the ports.
If then becomes a 'dumb switch', and for a short time there will be a
broadcast storm. Hopefully one of the other nodes will then take over
the role and block a port.

    Andrew
Allan W. Nielsen Jan. 27, 2020, 11:04 a.m. UTC | #4
On 26.01.2020 16:59, Andrew Lunn wrote:
>EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>
>On Sun, Jan 26, 2020 at 02:22:13PM +0100, Horatiu Vultur wrote:
>> The 01/25/2020 17:35, Andrew Lunn wrote:
>> > EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>> >
>> > > SWITCHDEV_OBJ_ID_RING_TEST_MRP: This is used when to start/stop sending
>> > >   MRP_Test frames on the mrp ring ports. This is called only on nodes that have
>> > >   the role Media Redundancy Manager.
>> >
>> > How do you handle the 'headless chicken' scenario? User space tells
>> > the port to start sending MRP_Test frames. It then dies. The hardware
>> > continues sending these messages, and the neighbours thinks everything
>> > is O.K, but in reality the state machine is dead, and when the ring
>> > breaks, the daemon is not there to fix it?
I agree, we need to find a solution to this issue.

>> > And it is not just the daemon that could die. The kernel could opps or
>> > deadlock, etc.
>> >
>> > For a robust design, it seems like SWITCHDEV_OBJ_ID_RING_TEST_MRP
>> > should mean: start sending MRP_Test frames for the next X seconds, and
>> > then stop. And the request is repeated every X-1 seconds.
Sounds like a good idea to me.

>> I totally missed this case, I will update this as you suggest.
>
>What does your hardware actually provide?
>
>Given the design of the protocol, if the hardware decides the OS etc
>is dead, it should stop sending MRP_TEST frames and unblock the ports.
>If then becomes a 'dumb switch', and for a short time there will be a
>broadcast storm. Hopefully one of the other nodes will then take over
>the role and block a port.
As far as I know, the only feature HW has to prevent this is a
watch-dog timer. Which will reset the entire system (not a bad idea if
the kernel has dead-locked).

/Allan
Jürgen Lambrecht Jan. 27, 2020, 11:29 a.m. UTC | #5
On 1/26/20 4:59 PM, Andrew Lunn wrote:
> Given the design of the protocol, if the hardware decides the OS etc
> is dead, it should stop sending MRP_TEST frames and unblock the ports.
> If then becomes a 'dumb switch', and for a short time there will be a
> broadcast storm. Hopefully one of the other nodes will then take over
> the role and block a port.

In my experience a closed loop should never happen. It can make software crash and give other problems.
An other node should first take over before unblocking the ring ports. (If this is possible - I only follow this discussion halfly)

What is your opinion?

(FYI:
I made that mistake once doing a proof-of-concept ring design: during testing, when a "broken" Ethernet cable was "fixed" I had for a short time a loop, and then it happened often that that port of the (Marvell 88E6063) switch was blocked.  (To unblock, only solution was to bring that port down and up again, and then all "lost" packets came out in a burst.)
That problem was caused by flow control (with pause frames), and disabling flow control fixed it, but flow-control is default on as far as I know.
)

Kind regards,

Jürgen
Allan W. Nielsen Jan. 27, 2020, 12:27 p.m. UTC | #6
Hi Jürgen,

On 27.01.2020 12:29, Jürgen Lambrecht wrote:
>EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>
>On 1/26/20 4:59 PM, Andrew Lunn wrote:
>> Given the design of the protocol, if the hardware decides the OS etc
>> is dead, it should stop sending MRP_TEST frames and unblock the ports.
>> If then becomes a 'dumb switch', and for a short time there will be a
>> broadcast storm. Hopefully one of the other nodes will then take over
>> the role and block a port.

>In my experience a closed loop should never happen. It can make
>software crash and give other problems.  An other node should first
>take over before unblocking the ring ports. (If this is possible - I
>only follow this discussion halfly)
>
>What is your opinion?
Having loops in the network is never a good thing - but to be honest, I
think it is more important that we ensure the design can survive and
recover from loops.

With the current design, it will be really hard to void loops when the
network boot. MRP will actually start with the ports blocked, but they
will be unblocked in the period from when the bridge is created and
until MRP is enabled. If we want to change this (which I'm not too keen
on), then we need to be able to block the ports while the bridge is
down.

And even if we do this, then we can not guarantee to avoid loops. Lets
assume we have a small ring with just 2 nodes: a MRM and a MRC. Lets
assume the MRM boots first. It will unblock both ports as the ring is
open. Now the MRC boots, and make the ring closed, and create a loop.
This will take some time (milliseconds) before the MRM notice this and
block one of the ports.

But while we are at this topic, we need to add some functionality to
the user-space application such that it can set the priority of the MRP
frames. We will get that fixed.

>(FYI: I made that mistake once doing a proof-of-concept ring design:
>during testing, when a "broken" Ethernet cable was "fixed" I had for a
>short time a loop, and then it happened often that that port of the
>(Marvell 88E6063) switch was blocked.  (To unblock, only solution was
>to bring that port down and up again, and then all "lost" packets came
>out in a burst.) That problem was caused by flow control (with pause
>frames), and disabling flow control fixed it, but flow-control is
>default on as far as I know.)
I see. It could be fun to see if what we have proposed so far will with
with such a switch.

/Allan
Jürgen Lambrecht Jan. 27, 2020, 2:39 p.m. UTC | #7
On 1/27/20 1:27 PM, Allan W. Nielsen wrote:
> CAUTION: This Email originated from outside Televic. Do not click links or open attachments unless you recognize the sender and know the content is safe.
>
>
> Hi Jürgen,
>
> On 27.01.2020 12:29, Jürgen Lambrecht wrote:
>> EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>>
>> On 1/26/20 4:59 PM, Andrew Lunn wrote:
>>> Given the design of the protocol, if the hardware decides the OS etc
>>> is dead, it should stop sending MRP_TEST frames and unblock the ports.
>>> If then becomes a 'dumb switch', and for a short time there will be a
>>> broadcast storm. Hopefully one of the other nodes will then take over
>>> the role and block a port.
This can probably be a configuration option in the hardware, how to fall-back.
>
>> In my experience a closed loop should never happen. It can make
>> software crash and give other problems.  An other node should first
>> take over before unblocking the ring ports. (If this is possible - I
>> only follow this discussion halfly)
>>
>> What is your opinion?
> Having loops in the network is never a good thing - but to be honest, I
> think it is more important that we ensure the design can survive and
> recover from loops.
Indeed
>
> With the current design, it will be really hard to void loops when the
> network boot. MRP will actually start with the ports blocked, but they
> will be unblocked in the period from when the bridge is created and
> until MRP is enabled. If we want to change this (which I'm not too keen
> on), then we need to be able to block the ports while the bridge is
> down.
Our ring network is part of a bigger network. Loops are really not allowed.
>
> And even if we do this, then we can not guarantee to avoid loops. Lets
> assume we have a small ring with just 2 nodes: a MRM and a MRC. Lets
> assume the MRM boots first. It will unblock both ports as the ring is
> open. Now the MRC boots, and make the ring closed, and create a loop.
> This will take some time (milliseconds) before the MRM notice this and
> block one of the ports.

In my view there is a bring-up and tear-down module needed. I don't know if it should be part of MRP or not? Probably not, so something on top of the mrp daemon.

>
> But while we are at this topic, we need to add some functionality to
> the user-space application such that it can set the priority of the MRP
> frames. We will get that fixed.

Indeed! In my old design I had to give high priority, else the loop was wrongly closed at high network load.

I guess you mean the priority in the VLAN header?
I think to remember one talked about the bride code being VLAN-agnostic.

>
>> (FYI: I made that mistake once doing a proof-of-concept ring design:
>> during testing, when a "broken" Ethernet cable was "fixed" I had for a
>> short time a loop, and then it happened often that that port of the
>> (Marvell 88E6063) switch was blocked.  (To unblock, only solution was
>> to bring that port down and up again, and then all "lost" packets came
>> out in a burst.) That problem was caused by flow control (with pause
>> frames), and disabling flow control fixed it, but flow-control is
>> default on as far as I know.)
> I see. It could be fun to see if what we have proposed so far will with
> with such a switch.

Depending on the projects I could work on it later this year (or only next year or not..)

Kind regards,

Jürgen

>
> /Allan
>
Jürgen Lambrecht Jan. 27, 2020, 2:41 p.m. UTC | #8
On 1/27/20 12:04 PM, Allan W. Nielsen wrote:
> CAUTION: This Email originated from outside Televic. Do not click links or open attachments unless you recognize the sender and know the content is safe.
>
>
> On 26.01.2020 16:59, Andrew Lunn wrote:
>> EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>>
>> On Sun, Jan 26, 2020 at 02:22:13PM +0100, Horatiu Vultur wrote:
>>> The 01/25/2020 17:35, Andrew Lunn wrote:
>>> > EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>>> >
>>> > > SWITCHDEV_OBJ_ID_RING_TEST_MRP: This is used when to start/stop sending
>>> > >   MRP_Test frames on the mrp ring ports. This is called only on nodes that have
>>> > >   the role Media Redundancy Manager.
>>> >
>>> > How do you handle the 'headless chicken' scenario? User space tells
>>> > the port to start sending MRP_Test frames. It then dies. The hardware
>>> > continues sending these messages, and the neighbours thinks everything
>>> > is O.K, but in reality the state machine is dead, and when the ring
>>> > breaks, the daemon is not there to fix it?
> I agree, we need to find a solution to this issue.
>
>>> > And it is not just the daemon that could die. The kernel could opps or
>>> > deadlock, etc.
>>> >
>>> > For a robust design, it seems like SWITCHDEV_OBJ_ID_RING_TEST_MRP
>>> > should mean: start sending MRP_Test frames for the next X seconds, and
>>> > then stop. And the request is repeated every X-1 seconds.
> Sounds like a good idea to me.

Indeed, and it should then do the same as mentioned below and "... come a 'dumb switch' ", except that I propose to make it configurable how to fallback: with auto-recovery ('dumb switch') or safe mode that keeps the ports blocked, and then some higher layer protocol should fix it.

>
>>> I totally missed this case, I will update this as you suggest.
>>
>> What does your hardware actually provide?
>>
>> Given the design of the protocol, if the hardware decides the OS etc
>> is dead, it should stop sending MRP_TEST frames and unblock the ports.
>> If then becomes a 'dumb switch', and for a short time there will be a
>> broadcast storm. Hopefully one of the other nodes will then take over
>> the role and block a port.
> As far as I know, the only feature HW has to prevent this is a
> watch-dog timer. Which will reset the entire system (not a bad idea if
> the kernel has dead-locked).
Indeed. Our designs always have a watchdog.

And then I again propose to have 2 bootup options.

I refer here also to my answer on Allan's answer on my email of 12:29PM.

Kind regards,

Jürgen

>
> /Allan
>
Andrew Lunn Jan. 27, 2020, 3:06 p.m. UTC | #9
On Mon, Jan 27, 2020 at 03:26:38PM +0100, Jürgen Lambrecht wrote:
> On 1/27/20 12:04 PM, Allan W. Nielsen wrote:
> 
>             > How do you handle the 'headless chicken' scenario? User space
>             tells
>             > the port to start sending MRP_Test frames. It then dies. The
>             hardware
> 
> Andrew, I am a bit confused here - maybe I missed an email-thread, I'm sorry
> then.
> 
> In previous emails you and others talked about hardware support to send packets
> (inside the switch). But somebody also talked about data-plane and
> control-plane (about STP in-kernel being a bad idea), and that data-plane is
> in-kernel, and control plane is a mrp-daemon (in user space).
> And in my mind, the "hardware" you mention is a frame-injector and can be both
> real hardware and a driver in the kernel.
> 
> Do I see it right?

Hi Jürgen

It i still unclear where the MRP_Test frames should be generated,
forward and consumed, either in kernel, or in user space.

The userspace RSTP daemon generates and consumes all the BPDUs in
userspace. But BPDUs are never forwarded. However MRP_Test frames are
forwarded by client nodes. Are the MRP_Test frames then part of the
data plane in a client?

What i think is clear is that the state machine is in user space.

For the rest, we are still exploring possibilities.

    Andrew
Jürgen Lambrecht Jan. 28, 2020, 9:50 a.m. UTC | #10
[sending again to vger.kernel.org, because previous was rejected]

On 1/27/20 12:04 PM, Allan W. Nielsen wrote:
>>> > How do you handle the 'headless chicken' scenario? User space tells
>>> > the port to start sending MRP_Test frames. It then dies. The hardware

Andrew, I am a bit confused here - maybe I missed an email-thread, I'm sorry then.

In previous emails you and others talked about hardware support to send packets (inside the switch). But somebody also talked about data-plane and control-plane (about STP in-kernel being a bad idea), and that data-plane is in-kernel, and control plane is a mrp-daemon (in user space).
And in my mind, the "hardware" you mention is a frame-injector and can be both real hardware and a driver in the kernel.

Do I see it right?

>>> > continues sending these messages, and the neighbours thinks everything
>>> > is O.K, but in reality the state machine is dead, and when the ring
>>> > breaks, the daemon is not there to fix it?
> I agree, we need to find a solution to this issue.
>
Allan W. Nielsen Jan. 28, 2020, 9:58 a.m. UTC | #11
On 27.01.2020 15:39, Jürgen Lambrecht wrote:
>EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>
>On 1/27/20 1:27 PM, Allan W. Nielsen wrote:
>> Hi Jürgen,
>>
>> On 27.01.2020 12:29, Jürgen Lambrecht wrote:
>>> EXTERNAL EMAIL: Do not click links or open attachments unless you know the content is safe
>>>
>>> On 1/26/20 4:59 PM, Andrew Lunn wrote:
>>>> Given the design of the protocol, if the hardware decides the OS etc
>>>> is dead, it should stop sending MRP_TEST frames and unblock the ports.
>>>> If then becomes a 'dumb switch', and for a short time there will be a
>>>> broadcast storm. Hopefully one of the other nodes will then take over
>>>> the role and block a port.
>This can probably be a configuration option in the hardware, how to fall-back.
>>
>>> In my experience a closed loop should never happen. It can make
>>> software crash and give other problems.  An other node should first
>>> take over before unblocking the ring ports. (If this is possible - I
>>> only follow this discussion halfly)
>>>
>>> What is your opinion?
>> Having loops in the network is never a good thing - but to be honest, I
>> think it is more important that we ensure the design can survive and
>> recover from loops.
>Indeed
>>
>> With the current design, it will be really hard to void loops when the
>> network boot. MRP will actually start with the ports blocked, but they
>> will be unblocked in the period from when the bridge is created and
>> until MRP is enabled. If we want to change this (which I'm not too keen
>> on), then we need to be able to block the ports while the bridge is
>> down.
>Our ring network is part of a bigger network. Loops are really not allowed.
That is understood, and should be avoided. But I assume that switches
which crashes is not allowed either ;-)

We will consider if we somehow can block the ports before/after a
user-space protocol kicks in. I can not promise anything, but we will
see what can be done.

>> And even if we do this, then we can not guarantee to avoid loops. Lets
>> assume we have a small ring with just 2 nodes: a MRM and a MRC. Lets
>> assume the MRM boots first. It will unblock both ports as the ring is
>> open. Now the MRC boots, and make the ring closed, and create a loop.
>> This will take some time (milliseconds) before the MRM notice this and
>> block one of the ports.
>In my view there is a bring-up and tear-down module needed. I don't
>know if it should be part of MRP or not? Probably not, so something on
>top of the mrp daemon.
If we need this kind of policies, then I agree it should be on top of or
out-side the user-space MRP daemon.

>> But while we are at this topic, we need to add some functionality to
>> the user-space application such that it can set the priority of the MRP
>> frames. We will get that fixed.
>Indeed! In my old design I had to give high priority, else the loop was
>wrongly closed at high network load.
Yes, I'm not surprised to hear that.

>I guess you mean the priority in the VLAN header?
>I think to remember one talked about the bride code being VLAN-agnostic.
Yes, if it has a VLAN header (which is optional). But even without the
VLAN header these frames needs to be classified to a high priority
queue.

>>> (FYI: I made that mistake once doing a proof-of-concept ring design:
>>> during testing, when a "broken" Ethernet cable was "fixed" I had for a
>>> short time a loop, and then it happened often that that port of the
>>> (Marvell 88E6063) switch was blocked.  (To unblock, only solution was
>>> to bring that port down and up again, and then all "lost" packets came
>>> out in a burst.) That problem was caused by flow control (with pause
>>> frames), and disabling flow control fixed it, but flow-control is
>>> default on as far as I know.)
>> I see. It could be fun to see if what we have proposed so far will with
>> with such a switch.
>
>Depending on the projects I could work on it later this year (or only next year or not..)
Sounds good - no hurry.

/Allan
diff mbox series

Patch

diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index aee86a189432..b1ed170fc01d 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -40,6 +40,11 @@  enum switchdev_attr_id {
 	SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 	SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
 	SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
+#ifdef CONFIG_BRIDGE_MRP
+	SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
+	SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
+	SWITCHDEV_ATTR_ID_MRP_RING_STATE,
+#endif
 };
 
 struct switchdev_attr {
@@ -55,6 +60,11 @@  struct switchdev_attr {
 		clock_t ageing_time;			/* BRIDGE_AGEING_TIME */
 		bool vlan_filtering;			/* BRIDGE_VLAN_FILTERING */
 		bool mc_disabled;			/* MC_DISABLED */
+#ifdef CONFIG_BRIDGE_MRP
+		u8 mrp_port_state;			/* MRP_PORT_STATE */
+		u8 mrp_port_role;			/* MRP_PORT_ROLE */
+		u8 mrp_ring_state;			/* MRP_RING_STATE */
+#endif
 	} u;
 };
 
@@ -63,6 +73,11 @@  enum switchdev_obj_id {
 	SWITCHDEV_OBJ_ID_PORT_VLAN,
 	SWITCHDEV_OBJ_ID_PORT_MDB,
 	SWITCHDEV_OBJ_ID_HOST_MDB,
+#ifdef CONFIG_BRIDGE_MRP
+	SWITCHDEV_OBJ_ID_PORT_MRP,
+	SWITCHDEV_OBJ_ID_RING_TEST_MRP,
+	SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
+#endif
 };
 
 struct switchdev_obj {
@@ -94,6 +109,42 @@  struct switchdev_obj_port_mdb {
 #define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
 	container_of((OBJ), struct switchdev_obj_port_mdb, obj)
 
+
+#ifdef CONFIG_BRIDGE_MRP
+/* SWITCHDEV_OBJ_ID_PORT_MRP */
+struct switchdev_obj_port_mrp {
+	struct switchdev_obj obj;
+	struct net_device *port;
+	u32 ring_nr;
+};
+
+#define SWITCHDEV_OBJ_PORT_MRP(OBJ) \
+	container_of((OBJ), struct switchdev_obj_port_mrp, obj)
+
+/* SWITCHDEV_OBJ_ID_RING_TEST_MRP */
+struct switchdev_obj_ring_test_mrp {
+	struct switchdev_obj obj;
+	/* The value is in us and a value of 0 represents to stop */
+	u32 interval;
+	u8 max_miss;
+	u32 ring_nr;
+};
+
+#define SWITCHDEV_OBJ_RING_TEST_MRP(OBJ) \
+	container_of((OBJ), struct switchdev_obj_ring_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_RING_ROLE_MRP */
+struct switchdev_obj_ring_role_mrp {
+	struct switchdev_obj obj;
+	u8 ring_role;
+	u32 ring_nr;
+};
+
+#define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \
+	container_of((OBJ), struct switchdev_obj_ring_role_mrp, obj)
+
+#endif
+
 typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
 
 enum switchdev_notifier_type {