go.ligato.io/vpp-agent/v3@v3.5.0/tests/robot/suites/trafficIPv6/sr_proxy_traffic/dynamic_sr_proxy_trafficIPv6.robot (about)

     1  *** Settings ***
     2  
     3  Library     OperatingSystem
     4  Library     String
     5  
     6  Resource     ../../../variables/${VARIABLES}_variables.robot
     7  Resource    ../../../libraries/all_libs.robot
     8  Resource    ../../../libraries/pretty_keywords.robot
     9  
    10  Suite Setup       Testsuite Setup
    11  Suite Teardown    Testsuite Teardown
    12  Test Setup        TestSetup
    13  Test Teardown     TestTeardown
    14  
    15  *** Variables ***
    16  ${VARIABLES}=                     common
    17  ${ENV}=                           common
    18  ${WAIT_TIMEOUT}=                  20s
    19  ${SYNC_SLEEP}=                    3s
    20  ${TRACE_WAIT_TIMEOUT}=            6s
    21  ${TRACE_SYNC_SLEEP}=              1s
    22  ${PING_WAIT_TIMEOUT}=             15s
    23  ${PING_SLEEP}=                    1s
    24  
    25  @{segments}                       b::    c::
    26  @{segmentsweight}                 1    @{segments}    # segment list's weight and segments
    27  @{segmentList}                    ${segmentsweight}
    28  ${vpp1_tap_ipv6}=                 a::a
    29  ${linux_vpp1_tap_ipv6}=           a::1
    30  ${linux_vpp1_tap_ipv6_subnet}=    a::
    31  ${vpp3_tap_ipv6}=                 c::c
    32  ${linux_vpp3_tap_ipv6}=           c::1
    33  ${linux_vpp3_tap_ipv6_subnet}=    c::
    34  ${linux_vpp1_tap_ipv4_subnet}     10.1.1.0  # 24-bit netmask, IPv4 pattern: 10.<vpp number>.x.x
    35  ${linux_vpp1_tap_ipv4}=           10.1.1.1
    36  ${vpp1_tap_ipv4}=                 10.1.1.2
    37  ${vpp1_memif2_ipv4}=              10.1.3.1
    38  ${srproxy_out_memif_ipv4}=        10.2.1.1
    39  ${srproxy_in_memif_ipv4}=         10.2.2.2
    40  ${linux_vpp3_tap_ipv4_subnet}=    10.3.1.0  # 24-bit netmask
    41  ${linux_vpp3_tap_ipv4}=           10.3.1.1
    42  ${vpp3_tap_ipv4}=                 10.3.1.2
    43  ${vpp3_memif2_ipv4}=              10.3.3.1
    44  ${service_out_memif_ipv4}=        10.4.1.2
    45  ${service_in_memif_ipv4}=         10.4.2.1
    46  ${vpp1_tap_mac}=                  11:11:11:11:11:11
    47  ${linux_vpp1_tap_mac}=            22:22:22:22:22:22
    48  ${vpp3_tap_mac}=                  33:33:33:33:33:33
    49  ${linux_vpp3_tap_mac}=            44:44:44:44:44:44
    50  ${vpp1_memif1_mac}=               02:f1:be:90:00:01
    51  ${vpp2_memif1_mac}=               02:f1:be:90:00:02
    52  ${vpp2_memif2_mac}=               02:f1:be:90:02:02
    53  ${vpp3_memif1_mac}=               02:f1:be:90:00:03
    54  ${vpp3_memif2_mac}=               02:f1:be:90:02:03
    55  ${vpp1_memif2_mac}=               02:f1:be:90:02:01
    56  ${srproxy_out_memif_mac}=         02:f1:be:90:03:02
    57  ${service_in_memif_mac}=          02:f1:be:90:00:04
    58  ${service_out_memif_mac}=         02:f1:be:90:02:04
    59  ${srproxy_in_memif_mac}=          02:f1:be:90:04:02
    60  
    61  # ethernet frame sending variables (used as values in sending python script or in validation)
    62  ${out_interface}=                 linux_vpp1_tap
    63  ${source_mac_address}=            01:02:03:04:05:06
    64  ${destination_mac_address}=       01:02:03:04:05:06
    65  ${ethernet_type}                  88b5                                # using public ethernet type for prototype and vendor-specific protocol development to not explicitly say what to expect in payload (=general frame) (http://standards-oui.ieee.org/ethertype/eth.txt)
    66  ${payload}                        "["*30)+"PAYLOAD"+("]"*30           # custom payload (= general frame) (inserted directly "as is" in python script)
    67  ${payload_hex_prefix}=            5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b    # partial prefix of payload in hexadecimal form (used for validation)
    68  ${checksum}                       1a2b3c4d                            # checksum is not controlled anywhere, but needed for correct construction of frame structure
    69  ${srproxy_out_memif_vpp_name}=    memif3/3                            # used for validation
    70  ${srproxy_in_memif_vpp_index}=    4                                   # used for validation
    71  
    72  *** Test Cases ***
    73  Common Setup Used Across All Tests
    74      [Tags]    setup
    75      # create nodes
    76      Add Agent VPP Node    agent_vpp_1
    77      Add Agent VPP Node    agent_vpp_2
    78      Add Agent VPP Node    agent_vpp_3
    79      Add Agent VPP Node    agent_vpp_4
    80  
    81      # creating TAP tunnels between linux and VPP (in containers)
    82      Put TAPv2 Interface With 2 IPs    node=agent_vpp_1    name=vpp1_tap                  ip=${vpp1_tap_ipv6}          prefix=64    second_ip=${vpp1_tap_ipv4}    second_prefix=24    host_if_name=linux_vpp1_tap    mac=${vpp1_tap_mac}
    83      Wait Until Keyword Succeeds   ${WAIT_TIMEOUT}   ${SYNC_SLEEP}    vpp_term: Interface Is Created    node=agent_vpp_1    mac=${vpp1_tap_mac}
    84      linux: Set Host TAP Interface     node=agent_vpp_1    host_if_name=linux_vpp1_tap    ip=${linux_vpp1_tap_ipv6}    prefix=64    mac=${linux_vpp1_tap_mac}     second_ip=${linux_vpp1_tap_ipv4}    second_prefix=24
    85      Put TAPv2 Interface With 2 IPs    node=agent_vpp_3    name=vpp3_tap                  ip=${vpp3_tap_ipv6}          prefix=64    second_ip=${vpp3_tap_ipv4}    second_prefix=24    host_if_name=linux_vpp3_tap    mac=${vpp3_tap_mac}
    86      Wait Until Keyword Succeeds   ${WAIT_TIMEOUT}   ${SYNC_SLEEP}    vpp_term: Interface Is Created    node=agent_vpp_3    mac=${vpp3_tap_mac}
    87      linux: Set Host TAP Interface     node=agent_vpp_3    host_if_name=linux_vpp3_tap    ip=${linux_vpp3_tap_ipv6}    prefix=64    mac=${linux_vpp3_tap_mac}     second_ip=${linux_vpp3_tap_ipv4}    second_prefix=24
    88      # creating VPP (memif) tunnels between nodes (for IPv6 address purposes: agent_vpp_1 = node A, agent_vpp_2 = node B (SR proxy), agent_vpp_3 = node C, agent_vpp_4 = node D (SR-unaware service))
    89      Create Master vpp1_memif1 on agent_vpp_1 with IP ab::a, MAC ${vpp1_memif1_mac}, key 1 and m1.sock socket
    90      Create Slave vpp2_memif1 on agent_vpp_2 with IP ab::b, MAC ${vpp2_memif1_mac}, key 1 and m1.sock socket
    91      Create Master vpp2_memif2 on agent_vpp_2 with IP bc::b, MAC ${vpp2_memif2_mac}, key 2 and m2.sock socket
    92      Create Slave vpp3_memif1 on agent_vpp_3 with IP bc::c, MAC ${vpp3_memif1_mac}, key 2 and m2.sock socket
    93      # creating routes between nodes (using memifs)
    94      Create Route On agent_vpp_1 With IP b::/64 With Vrf Id 0 With Interface vpp1_memif1 And Next Hop ab::b
    95      Create Route On agent_vpp_2 With IP c::/64 With Vrf Id 0 With Interface vpp2_memif2 And Next Hop bc::c
    96      # configure segment routing that is common for all tests
    97      Put SRv6 Policy                 node=agent_vpp_1    bsid=a::c            installationVrfId=0         srhEncapsulation=true    sprayBehaviour=false    segmentlists=${segmentList}
    98      # preventing packet drops due to unresolved ipv6 neighbor discovery
    99      vpp_term: Set IPv6 neighbor  agent_vpp_1    memif1/1    ab::b    ${vpp2_memif1_mac}
   100      vpp_term: Set IPv6 neighbor  agent_vpp_2    memif1/1    ab::a    ${vpp1_memif1_mac}
   101      vpp_term: Set IPv6 neighbor  agent_vpp_2    memif2/2    bc::c    ${vpp3_memif1_mac}
   102      vpp_term: Set IPv6 neighbor  agent_vpp_3    memif1/2    bc::b    ${vpp2_memif2_mac}
   103  
   104  Dynamic SR Proxy with L3-IPv6 SR-unaware service
   105      # Testing IPv6 traffic(ping packet) going through SR-proxy (IPv6 segment routing) connected to IPv6 SR-unaware service.
   106      # Desired ping packet path:
   107      ## Container agent_vpp_1:                          linux environment -> linux_vpp1_tap interface -(tap tunnel to VPP1)-> vpp1_tap interface
   108      ##                                                 -> steering to segment routing (segment list="b::, c::") -(segment routing to b::)
   109      ##                                                 -> vpp1_memif1 interface (memif tunnel to VPP2)
   110      ## Container agent_vpp_2 (SR proxy node):          vpp2_memif1 interface -(SR-proxy functionality)-> srproxy_out_memif interface
   111      ## Container agent_vpp_4 (SR-unware service node): service_in_memif interface -(just forwarding incomming packet)-> service_out_memif interface
   112      ## Container agent_vpp_2 (SR proxy node):          srproxy_in_memif interface -(segment routing to c::)-> vpp2_memif2 interface
   113      ## Container agent_vpp_3:                          vpp3_memif1 interface -(DX6 decapsulation from segment routing)-> vpp3_tap interface
   114      ##                                                 -> linux_vpp3_tap(ping reached destination) -(ping reply)-> vpp3_tap interface
   115      ##                                                 -> vpp3_memif2 interface (memif tunnel to VPP1)
   116      ## Container agent_vpp_1:                          vpp1_memif2 interface -> vpp1_tap interface -> linux_vpp1_tap interface -> linux environment
   117  
   118      # creating path for ping packet (path is already partially done in common setup)
   119      ## steering trafic from linux to TAPs tunnel leading to VPP
   120      linux: Add Route     node=agent_vpp_1    destination_ip=${linux_vpp3_tap_ipv6}    prefix=64    next_hop_ip=${vpp1_tap_ipv6}
   121      ## steering traffic to segment routing
   122      Put SRv6 L3 Steering    node=agent_vpp_1    name=toC    bsid=a::c    installationVrfId=0    prefixAddress=c::/64
   123      ## creating sr-proxy in and out interfaces (using memifs)
   124      Create Master srproxy_out_memif on agent_vpp_2 with Prefixed IP bd:1::b/32, MAC ${srproxy_out_memif_mac}, key 3 and m3.sock socket
   125      Create Slave service_in_memif on agent_vpp_4 with Prefixed IP bd:1::d/32, MAC ${service_in_memif_mac}, key 3 and m3.sock socket
   126      Create Master service_out_memif on agent_vpp_4 with Prefixed IP bd:2::d/32, MAC ${service_out_memif_mac}, key 4 and m4.sock socket
   127      Create Slave srproxy_in_memif on agent_vpp_2 with Prefixed IP bd:2::b/32, MAC ${srproxy_in_memif_mac}, key 4 and m4.sock socket
   128      ## configure SR-proxy
   129      Put Local SID With End.AD function    node=agent_vpp_2    sidAddress=b::    l3serviceaddress=bd:1::d    outinterface=srproxy_out_memif    ininterface=srproxy_in_memif
   130      ## creating service routes (Service just sends received packets back)
   131      Create Route On agent_vpp_4 With IP ${linux_vpp3_tap_ipv6_subnet}/64 With Vrf Id 0 With Interface service_out_memif And Next Hop bd:2::b
   132      ## configure exit from segment routing
   133      Put Local SID With End.DX6 function    node=agent_vpp_3    sidAddress=c::     installationVrfId=0         outinterface=vpp3_tap    nexthop=${linux_vpp3_tap_ipv6}
   134      ## path for ping packet returning back to source (no segment routing, but just plain IPv6 route):
   135      ## create route for ping echo to get back to vpp3 from linux enviroment in agent_vpp_3 container
   136      linux: Add Route    node=agent_vpp_3    destination_ip=${linux_vpp1_tap_ipv6}    prefix=64    next_hop_ip=${vpp3_tap_ipv6}
   137      ## creating path for ping echo from vpp3 to vpp1
   138      Create Master vpp3_memif2 on agent_vpp_3 with IP ac::c, MAC ${vpp3_memif2_mac}, key 5 and m5.sock socket
   139      Create Slave vpp1_memif2 on agent_vpp_1 with IP ac::a, MAC ${vpp1_memif2_mac}, key 5 and m5.sock socket
   140      Create Route On agent_vpp_3 With IP ${linux_vpp1_tap_ipv6_subnet}/64 With Vrf Id 0 With Interface vpp3_memif2 And Next Hop ac::a
   141  
   142      # preventing packet drops due to unresolved ipv6 neighbor discovery
   143      vpp_term: Set IPv6 neighbor  agent_vpp_1    memif2/5    ac::c                     ${vpp3_memif2_mac}
   144      vpp_term: Set IPv6 neighbor  agent_vpp_1    tap0        ${linux_vpp1_tap_ipv6}    ${linux_vpp1_tap_mac}
   145      vpp_term: Set IPv6 neighbor  agent_vpp_2    memif3/3    bd:1::d                   ${service_in_memif_mac}
   146      vpp_term: Set IPv6 neighbor  agent_vpp_2    memif4/4    bd:2::d                   ${service_out_memif_mac}
   147      vpp_term: Set IPv6 neighbor  agent_vpp_3    memif2/5    ac::a                     ${vpp1_memif2_mac}
   148      vpp_term: Set IPv6 neighbor  agent_vpp_3    tap0        ${linux_vpp3_tap_ipv6}    ${linux_vpp3_tap_mac}
   149      vpp_term: Set IPv6 neighbor  agent_vpp_4    memif1/3    bd:1::b                   ${srproxy_out_memif_mac}
   150      vpp_term: Set IPv6 neighbor  agent_vpp_4    memif2/4    bd:2::b                   ${srproxy_in_memif_mac}
   151      # add packet tracing
   152      vpp_term: Add Trace Memif       agent_vpp_2    100
   153      # ping from agent_vpp_1 to agent_vpp_3's tap interface  (despite efforts to eliminite first packet drop by setting ipv6 neighbor, sometimes it is still happening -> timeoutable pinging repeat until first ping success)
   154      Wait Until Keyword Succeeds    ${PING_WAIT_TIMEOUT}   .${PING_SLEEP}    linux: Check Ping6    node=agent_vpp_1    ip=${linux_vpp3_tap_ipv6}    count=1
   155      # check that packet is processed by SR-proxy, send to SR-unware service using correct interface and in the process decapsulated(checked only source and destination address and not if SR header extension is missing because that is not visible in trace)
   156      ${vpp2trace}=    vpp_term: Show Trace    agent_vpp_2
   157      Packet Trace ${vpp2trace} Should Contain One Packet Trace That Contains These Ordered Substrings srv6-ad-localsid, IP6: ${srproxy_out_memif_mac} -> ${service_in_memif_mac}, ICMP6: ${linux_vpp1_tap_ipv6} -> ${linux_vpp3_tap_ipv6}, ., .  # using only 3 substrings to match packet trace
   158      # check that packet has returned from SR-unware service, gets rewritten by proxy (SR encapsulation) and send to another SR segment (correct interface and correct source and destination address)
   159      Packet Trace ${vpp2trace} Should Contain One Packet Trace That Contains These Ordered Substrings ${linux_vpp1_tap_ipv6} -> ${linux_vpp3_tap_ipv6}, SRv6-AD-rewrite: src :: dst c::, IP6: ${vpp2_memif2_mac} -> ${vpp3_memif1_mac}, IPV6_ROUTE: :: -> c::, .  # using only 4 substrings to match packet trace
   160  
   161      # cleanup (for next test)
   162      Delete VPP Interface     agent_vpp_2         srproxy_in_memif
   163      Delete VPP Interface     agent_vpp_2         srproxy_out_memif
   164      Delete VPP Interface     agent_vpp_4         service_in_memif
   165      Delete VPP Interface     agent_vpp_4         service_out_memif
   166      Delete VPP Interface     agent_vpp_3         vpp3_memif2
   167      Delete VPP Interface     agent_vpp_1         vpp1_memif2
   168      Delete Local SID         node=agent_vpp_2    sidAddress=b::
   169      Delete Local SID         node=agent_vpp_3    sidAddress=c::
   170      vpp_term: Clear Trace    agent_vpp_2
   171  
   172  Dynamic SR Proxy with L3-IPv4 SR-unaware service
   173      # Testing IPv4 traffic(ping packet) going through SR-proxy (IPv6 segment routing) connected to IPv4 SR-unaware service.
   174      # Desired ping packet path is basically the same as in test for IPv6 SR-unaware service. The difference is that we
   175      # use IPv4 addresses and IPv4 routes everywhere except of IPv6 segment routing.
   176  
   177      # creating path for ping packet (path is already partially done in common setup)
   178      ## steering trafic from linux to TAPs tunnel leading to VPP
   179      linux: Add Route     node=agent_vpp_1    destination_ip=${linux_vpp3_tap_ipv4_subnet}    prefix=24    next_hop_ip=${vpp1_tap_ipv4}
   180      ## steering traffic to segment routing
   181      Put SRv6 L3 Steering    node=agent_vpp_1    name=toC    bsid=a::c    installationVrfId=0    prefixAddress=${linux_vpp3_tap_ipv4_subnet}/24
   182      ## creating sr-proxy in and out interfaces (using memifs)
   183      Create Master srproxy_out_memif on agent_vpp_2 with Prefixed IP ${srproxy_out_memif_ipv4}/24, MAC ${srproxy_out_memif_mac}, key 3 and m3.sock socket
   184      Create Slave service_in_memif on agent_vpp_4 with Prefixed IP ${service_in_memif_ipv4}/24, MAC ${service_in_memif_mac}, key 3 and m3.sock socket
   185      Create Master service_out_memif on agent_vpp_4 with Prefixed IP ${service_out_memif_ipv4}/24, MAC ${service_out_memif_mac}, key 4 and m4.sock socket
   186      Create Slave srproxy_in_memif on agent_vpp_2 with Prefixed IP ${srproxy_in_memif_ipv4}/24, MAC ${srproxy_in_memif_mac}, key 4 and m4.sock socket
   187      ## configure SR-proxy
   188      Put Local SID With End.AD function     node=agent_vpp_2    sidAddress=b::    l3serviceaddress=${service_in_memif_ipv4}    outinterface=srproxy_out_memif    ininterface=srproxy_in_memif
   189      ## creating service routes (Service just sends received packets back)
   190      Create Route On agent_vpp_4 With IP ${linux_vpp3_tap_ipv4_subnet}/24 With Vrf Id 0 With Interface service_out_memif And Next Hop ${srproxy_in_memif_ipv4}
   191      ## configure exit from segment routing
   192      Put Local SID With End.DX4 function    node=agent_vpp_3    sidAddress=c::    installationVrfId=0    outinterface=vpp3_tap    nexthop=${linux_vpp3_tap_ipv4}
   193      ## path for ping packet returning back to source (no segment routing, but just plain IPv4 route):
   194      ## create route for ping echo to get back to vpp3 from linux enviroment in agent_vpp_3 container
   195      linux: Add Route    node=agent_vpp_3    destination_ip=${linux_vpp1_tap_ipv4_subnet}    prefix=24    next_hop_ip=${vpp3_tap_ipv4}
   196      ## creating path for ping echo from vpp3 to vpp1
   197      Create Master vpp3_memif2 on agent_vpp_3 with IP ${vpp3_memif2_ipv4}, MAC ${vpp3_memif2_mac}, key 5 and m5.sock socket
   198      Create Slave vpp1_memif2 on agent_vpp_1 with IP ${vpp1_memif2_ipv4}, MAC ${vpp1_memif2_mac}, key 5 and m5.sock socket
   199      Create Route On agent_vpp_3 With IP ${linux_vpp1_tap_ipv4_subnet}/24 With Vrf Id 0 With Interface vpp3_memif2 And Next Hop ${vpp1_memif2_ipv4}
   200  
   201      # preventing packet drops due to unresolved ipv6 neighbor discovery (some won't resolve properly, so it is not only about good traffic from first ping)
   202      vpp_term: Set ARP    agent_vpp_2    memif3/3    ${service_in_memif_ipv4}    ${service_in_memif_mac}
   203      vpp_term: Set ARP    agent_vpp_3    memif2/5    ${vpp1_memif2_ipv4}         ${vpp1_memif2_mac}
   204      vpp_term: Set ARP    agent_vpp_4    memif2/4    ${srproxy_in_memif_ipv4}    ${srproxy_in_memif_mac}
   205  
   206      # add packet tracing
   207      vpp_term: Add Trace Memif       agent_vpp_2    100
   208      # ping from agent_vpp_1 to agent_vpp_3's tap interface  (despite efforts to eliminite first packet drop by setting arp, sometimes it is still happening -> timeoutable pinging repeat until first ping success)
   209      Wait Until Keyword Succeeds    ${PING_WAIT_TIMEOUT}   .${PING_SLEEP}    linux: Check Ping    node=agent_vpp_1    ip=${linux_vpp3_tap_ipv4}    count=1
   210      # check that packet is processed by SR-proxy, send to SR-unware service using correct interface and in the process decapsulated(checked only source and destination address and not if SR header extension is missing because that is not visible in trace)
   211      ${vpp2trace}=    vpp_term: Show Trace    agent_vpp_2
   212      Packet Trace ${vpp2trace} Should Contain One Packet Trace That Contains These Ordered Substrings srv6-ad-localsid, IP4: ${srproxy_out_memif_mac} -> ${service_in_memif_mac}, ICMP: ${linux_vpp1_tap_ipv4} -> ${linux_vpp3_tap_ipv4}, ., .  # using only 3 substrings to match packet trace
   213      # check that packet has returned from SR-unware service, gets rewritten by proxy (SR encapsulation) and send to another SR segment (correct interface and correct source and destination address)
   214      Packet Trace ${vpp2trace} Should Contain One Packet Trace That Contains These Ordered Substrings ${linux_vpp1_tap_ipv4} -> ${linux_vpp3_tap_ipv4}, SRv6-AD-rewrite: src :: dst c::, IP6: ${vpp2_memif2_mac} -> ${vpp3_memif1_mac}, IPV6_ROUTE: :: -> c::, .  # using only 4 substrings to match packet trace
   215  
   216      # cleanup (for next test)
   217      Delete Local SID         node=agent_vpp_2    sidAddress=b::
   218      vpp_term: Clear Trace    agent_vpp_2
   219  
   220  Dynamic SR Proxy with L2 SR-unaware service
   221      # Testing L2 traffic(sending custom Ethernet frame) going through SR-proxy (IPv6 segment routing) connected to
   222      # L2 SR-unaware service. Desired frame path starts identically to the path in IPv6 SR-unaware service test, but
   223      # ends right after SR-proxy (at least it is not further checked). We don't do the full packet/frame path as for
   224      # IPv4/IPv6 SR-unaware services, because sending ethernet frame is not like ping that has echo and nice ping tool
   225      # in linux that tells you whether echo was received or not. For doing the same for ethernet frame, there have to be
   226      # some traffic catching tool and that is too complicated for something that is basically not the aim of test.
   227      # The aim is to check SR-proxy functionality.
   228  
   229      # creating path for frame (path is already partially done in common setup+using memifs between sr-proxy and service from ipv4 test)
   230      ## steering traffic to segment routing
   231      Put SRv6 L2 Steering    node=agent_vpp_1    name=toC    bsid=a::c    interfaceName=vpp1_tap
   232      ## configure SR-proxy
   233      Put Local SID With End.AD function    node=agent_vpp_2    sidAddress=b::    outinterface=srproxy_out_memif    ininterface=srproxy_in_memif    # L2 SR-unware service
   234      ## creating service paths (Service just sends received frame back)
   235      Create Bridge Domain bd1 Without Autolearn On agent_vpp_4 With Interfaces service_in_memif, service_out_memif
   236      Add fib entry for 01:02:03:04:05:06 in bd1 over service_out_memif on agent_vpp_4
   237  
   238      # add packet tracing
   239      vpp_term: Add Trace Memif     agent_vpp_2    100
   240      # sending ethernet frame
   241      linux: Send Ethernet Frame    agent_vpp_1    ${out_interface}    ${source_mac_address}    ${destination_mac_address}    ${ethernet_type}    ${payload}    ${checksum}
   242      Wait Until Keyword Succeeds   ${TRACE_WAIT_TIMEOUT}   ${TRACE_SYNC_SLEEP}    Trace on agent_vpp_2 has at least 2 packets
   243      # check that packet is processed by SR-proxy, send to SR-unware service using correct interface and in the process decapsulated(checked only ethernet frame
   244      # type and source/destination address and not if SR header extension is missing because that is not visible in trace)
   245      ${vpp2trace}=    vpp_term: Show Trace    agent_vpp_2
   246      Packet Trace ${vpp2trace} Should Contain One Packet Trace That Contains These Ordered Substrings srv6-ad-localsid, ${srproxy_out_memif_vpp_name}, 0x${ethernet_type}: ${source_mac_address} -> ${destination_mac_address}, ., .  # using only 3 substrings to match packet trace
   247      # check that packet has returned from SR-unware service (something received from correct interface and later checked its rewritten payload to be sure it is
   248      # the right frame), gets rewritten by proxy (SR encapsulation) and send to another SR segment (correct interface and correct source and destination address)
   249      Packet Trace ${vpp2trace} Should Contain One Packet Trace That Contains These Ordered Substrings memif: hw_if_index ${srproxy_in_memif_vpp_index}, SRv6-AD-rewrite: src :: dst c::, ${ethernet_type}${payload_hex_prefix}, IP6: ${vpp2_memif2_mac} -> ${vpp3_memif1_mac}, IPV6_ROUTE: :: -> c::
   250  
   251  
   252  *** Keywords ***
   253  Packet Trace ${packettrace} Should Contain One Packet Trace That Contains These Ordered Substrings ${substr1}, ${substr2}, ${substr3}, ${substr4}, ${substr5}
   254      ${packetsplit}=       String.Split String    ${packettrace}    Packet
   255      Should Contain Match    ${packetsplit}    regexp=.*${substr1}.*${substr2}.*${substr3}.*${substr4}.*${substr5}.*    case_insensitive=True
   256  
   257  Trace on ${node} has at least 2 packets
   258      ${trace}=    vpp_term: Show Trace    ${node}
   259      Should Contain    ${trace}    Packet 2
   260  
   261  TestSetup
   262      Make Datastore Snapshots    ${TEST_NAME}_test_setup
   263  
   264  TestTeardown
   265      Make Datastore Snapshots    ${TEST_NAME}_test_teardown