flow indr dev offload

flow indr dev offload驱动加载的时候会调用flow_indr_dev_register(mlx5e_rep_indr_setup_cb,rpriv)这样就把数据结构structflow_indr_dev放到链表flow_block_indr_dev_list中。*(structflow_indr_dev*)0xffff9230d9b07240={.list=(structlist_head){.next=(structlist_head*)fl…

驱动加载的时候会调用
flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv)
这样就把数据结构struct flow_indr_dev放到链表flow_block_indr_dev_list中。

*(struct flow_indr_dev *)0xffff9230d9b07240 = {

        .list = (struct list_head){

                .next = (struct list_head *)flow_block_indr_dev_list+0x0 = 0xffffffffbdab9d10,
                .prev = (struct list_head *)flow_block_indr_dev_list+0x0 = 0xffffffffbdab9d10,
        },
        .cb = (flow_indr_block_bind_cb_t *)mlx5e_rep_indr_setup_cb+0x0 = 0xffffffffc05878a0,
        .cb_priv = (void *)0xffff9230ddbf2800,
        .refcnt = (refcount_t){

                .refs = (atomic_t){

                        .counter = (int)1,
                },
        },
}

这样的话,在创建vxlan设备qdisc的时候就会调到

flow_indr_dev_setup_offload        <- 遍历flow_block_indr_dev_list
    mlx5e_rep_indr_setup_cb
        mlx5e_rep_indr_setup_block

14.40499 24260   24260   tc              mlx5e_rep_indr_setup_block
        b’mlx5e_rep_indr_setup_block+0x1 [mlx5_core]’
        b’flow_indr_dev_setup_offload+0x6a [kernel]’
        b’tcf_block_offload_cmd.isra.0+0xf7 [kernel]’
        b’tcf_block_get_ext+0x150 [kernel]’
        b’ingress_init+0x75 [sch_ingress]’
        b’qdisc_create+0x18f [kernel]’
        b’tc_modify_qdisc+0x14d [kernel]’
        b’rtnetlink_rcv_msg+0x184 [kernel]’
        b’netlink_rcv_skb+0x55 [kernel]’
        b’rtnetlink_rcv+0x15 [kernel]’
        b’netlink_unicast+0x24f [kernel]’
        b’netlink_sendmsg+0x233 [kernel]’
        b’sock_sendmsg+0x65 [kernel]’
    
然后分配
*(struct flow_block_cb *)0xffff92295ee5fe00 = {

        .driver_list = (struct list_head){

                .next = (struct list_head *)mlx5e_block_cb_list+0x0 = 0xffffffffc062aff0,
                .prev = (struct list_head *)mlx5e_block_cb_list+0x0 = 0xffffffffc062aff0,
        },
        .list = (struct list_head){

                .next = (struct list_head *)0xffff9230ccd7b178,
                .prev = (struct list_head *)0xffff9230ccd7b178,
        },
        .cb = (flow_setup_cb_t *)mlx5e_setup_tc_block_cb+0x0 = 0xffffffffc05869d0,
        .cb_ident = (void *)0xffff9230cfbe0cc0,
        .cb_priv = (void *)0xffff9230cfbe0cc0,
        .release = (void (*)(void *))0x0,
        .indr = (struct flow_block_indr){

                .list = (struct list_head){

                        .next = (struct list_head *)0x0,
                        .prev = (struct list_head *)0x0,
                },
                .dev = (struct net_device *)0x0,
                .sch = (struct Qdisc *)0x0,
                .binder_type = (enum flow_block_binder_type)FLOW_BLOCK_BINDER_TYPE_UNSPEC,
                .data = (void *)0x0,
                .cb_priv = (void *)0x0,
                .cleanup = (void (*)(struct flow_block_cb *))0x0,
        },
        .refcnt = (unsigned int)0,
}

下流表的时候,mlx5e_rep_indr_setup_tc_cb/mlx5e_configure_flower就会被掉到

5.822219 23244   23244   tc              mlx5e_rep_indr_setup_tc_cb
        b’mlx5e_rep_indr_setup_tc_cb+0x1 [mlx5_core]’
        b’fl_hw_replace_filter+0x184 [cls_flower]’
        b’fl_change+0x6ee [cls_flower]’
        b’tc_new_tfilter+0x68f [kernel]’
        b’rtnetlink_rcv_msg+0x33d [kernel]’
        b’netlink_rcv_skb+0x55 [kernel]’
        b’rtnetlink_rcv+0x15 [kernel]’
        b’netlink_unicast+0x24f [kernel]’
        b’netlink_sendmsg+0x233 [kernel]’
        
但是这样有一个bug,如果先创建vxlan设备,再加载驱动,驱动就调不到了。
所以有下面这个fix:

commit 74fc4f828769cca1c3be89ea92cb88feaa27ef52
Author: Eli Cohen <elic@nvidia.com>
Date:   Tue Aug 17 20:05:18 2021 +0300

    net: Fix offloading indirect devices dependency on qdisc order creation

    Currently, when creating an ingress qdisc on an indirect device before
    the driver registered for callbacks, the driver will not have a chance
    to register its filter configuration callbacks.

    To fix that, modify the code such that it keeps track of all the ingress
    qdiscs that call flow_indr_dev_setup_offload(). When a driver calls
    flow_indr_dev_register(),  go through the list of tracked ingress qdiscs
    and call the driver callback entry point so as to give it a chance to
    register its callback.

    Reviewed-by: Jiri Pirko <jiri@nvidia.com>
    Signed-off-by: Eli Cohen <elic@nvidia.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    
改commit引入了一个新链表flow_indir_dev_list,保存了所有indr dev,例如:
(char [16])”vxlan1″
(char [16])”br”
(char [16])”br”
(char [16])”ovs-system”
(char [16])”ovs-system”

这样驱动在加载的时候,会调用existing_qdiscs_register,然后遍历链表flow_indir_dev_list,
调用flow_indr_dev_setup_offload。

今天的文章flow indr dev offload分享到此就结束了,感谢您的阅读,如果确实帮到您,您可以动动手指转发给其他人。

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
如需转载请保留出处:https://bianchenghao.cn/27095.html

(0)
编程小号编程小号

相关推荐

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注