iqiyi / dpvs

DPVS is a high performance Layer-4 load balancer based on DPDK.
Other
3.01k stars 726 forks source link

ipvsadm --add-laddr的时候报错 #767

Closed leon96 closed 1 year ago

leon96 commented 2 years ago

系统:Centos 7.2 内核:3.10.0-327.el7.x86_64 dpvs版本:dpvs-1.9.0 dpdk版本:dpdk-stable-20.11.1 模式:FNAT 网卡:Intel X710

执行脚本:

#!/bin/sh -

# add VIP to WAN interface
./dpip addr add 10.192.6.61/32 dev dpdk0

# route for WAN/LAN access
# add routes for other network or default route if needed.
./dpip route add 10.192.6.0/23 dev dpdk0
./dpip route add 10.192.10.0/24 dev dpdk1

# add service <VIP:vport> to forwarding, scheduling mode is RR.
# use ipvsadm --help for more info.
./ipvsadm -A -t 10.192.6.61:80 -s rr

# add two RS for service, forwarding mode is FNAT (-b)
./ipvsadm -a -t 10.192.6.61:80 -r 10.192.10.62 -b

# add at least one Local-IP (LIP) for FNAT on LAN interface
./ipvsadm --add-laddr -z 10.192.10.61:80 -t 10.192.6.61:80 -F dpdk1

在最后一步add-laddr的时候报错如下,请问我是什么地方配错了?

FLOW: rte_flow_validate on dpdk1 failed -- 13, Invalid IPv4 item
FLOW: netif_sapool_flow_add: adding tcp sapool flow failed: dpdk1 ip 10.192.10.61 port 0(0x0000) mask 0x0000, queue 0 lcore  1
FLOW: rte_flow_validate on dpdk1 failed -- 13, Invalid IPv4 item
FLOW: netif_sapool_flow_add: adding udp sapool flow failed: dpdk1 ip 10.192.10.61 port 0(0x0000) mask 0x0000, queue 0 lcore  1
IFA: [01] ifa_entry_add: add ifaddr 10.192.10.61 failed -- no resource

以下是dpvs.conf配置

! global config
global_defs {
    log_level   DEBUG
    ! log_file    /var/log/dpvs.log
    ! log_async_mode    on
    ! pdump       off
}

! netif config
netif_defs {
    <init> pktpool_size     1048575
    <init> pktpool_cache    256
    <init> fdir_mode        perfect

    <init> device dpdk0 {
        rx {
            queue_number        8
            descriptor_number   1024
            rss                 all
        }
        tx {
            queue_number        8
            descriptor_number   1024
        }
        ! mtu                   1500
        ! promisc_mode
        kni_name                dpdk0.kni
    }

    <init> device dpdk1 {
        rx {
            queue_number        8
            descriptor_number   1024
            rss                 all
        }
        tx {
            queue_number        8
            descriptor_number   1024
        }
        ! mtu                   1500
        ! promisc_mode
        kni_name                dpdk1.kni
    }

    ! <init> bonding bond0 {
    !    mode        0
    !    slave       dpdk0
    !    slave       dpdk1
    !    primary     dpdk0
    !    kni_name    bond0.kni
    !    options     dedicated_queues=off  # for mode 4 only
    !}
}

! worker config (lcores)
worker_defs {
    <init> worker cpu0 {
        type    master
        cpu_id  0
    }

    <init> worker cpu1 {
        type    slave
        cpu_id  1
        port    dpdk0 {
            rx_queue_ids     0
            tx_queue_ids     0
            ! isol_rx_cpu_ids  9
            ! isol_rxq_ring_sz 1048576
        }
        port    dpdk1 {
            rx_queue_ids     0
            tx_queue_ids     0
            ! isol_rx_cpu_ids  9
            ! isol_rxq_ring_sz 1048576
        }
    }

}

! timer config
timer_defs {
    # cpu job loops to schedule dpdk timer management
    schedule_interval    500
}

! dpvs neighbor config
neigh_defs {
    <init> unres_queue_length  128
    timeout                    60
}

! dpvs ipv4 config
ipv4_defs {
    forwarding                 off
    <init> default_ttl         64
    fragment {
        <init> bucket_number   4096
        <init> bucket_entries  16
        <init> max_entries     4096
        <init> ttl             1
    }
}

! dpvs ipv6 config
ipv6_defs {
    disable                     off
    forwarding                  off
    route6 {
        <init> method           hlist
        recycle_time            10
    }
}

! control plane config
ctrl_defs {
    lcore_msg {
        <init> ring_size                4096
        sync_msg_timeout_us             20000
        priority_level                  low
    }
    ipc_msg {
        <init> unix_domain /var/run/dpvs_ctrl
    }
}

! ipvs config
ipvs_defs {
    conn {
        <init> conn_pool_size       2097152
        <init> conn_pool_cache      256
        conn_init_timeout           3
        ! expire_quiescent_template
        ! fast_xmit_close
        ! <init> redirect           off
    }

    udp {
        ! defence_udp_drop
        uoa_mode        opp
        uoa_max_trail   3
        timeout {
            normal      300
            last        3
        }
    }

    tcp {
        ! defence_tcp_drop
        timeout {
            none        2
            established 90
            syn_sent    3
            syn_recv    30
            fin_wait    7
            time_wait   7
            close       3
            close_wait  7
            last_ack    7
            listen      120
            synack      30
            last        2
        }
        synproxy {
            synack_options {
                mss             1452
                ttl             63
                sack
                ! wscale
                ! timestamp
            }
            ! defer_rs_syn
            rs_syn_max_retry    3
            ack_storm_thresh    10
            max_ack_saved       3
            conn_reuse_state {
                close
                time_wait
                ! fin_wait
                ! close_wait
                ! last_ack
           }
        }
    }
}

! sa_pool config
sa_pool {
    pool_hash_size  16
    flow_enable     on
}
ywc689 commented 2 years ago

网卡不支持 dpvs 要求的 flow 类型,换其它网卡试试,或者把dpvs.conf里的 conn/redirect 打开, flow_enable 关闭。

leon96 commented 2 years ago

开启redirect和关闭flow_enable以后不报错了。 但是我根据faq里的文档查了一下,x710驱动是i40e,i40e支持faq里的如下特性,请问是否还需要支持别的特性?

The PMD of your NIC should support the following rte_flow items,
ipv4
ipv6
tcp
udp

and the following rte_flow actions at least.
queue
drop

PS:请问开启、关闭flow_enable性能大约能差多少呢?

difficultwork commented 2 years ago

我也开启了redirect并关闭了flow_enable,但是设置LIP后,使用dpip addr show查看时缺少sa_used 0 sa_free 1032176 sa_miss 0,并且从client侧无法访问RS上的服务,是不是这个导致的

difficultwork commented 2 years ago

@difficultwork mark一下

xiexia4416 commented 2 years ago

dpvs FNAT的flow 需要什么网卡支持

gerry-vip commented 1 year ago

leon96

My Bro,这个问题后来解决了吗,我现在也遇到同样的问题,i40e~