! This is dpvs default configuration file.
!
! The attribute "<init>" denotes the configuration item at initialization stage. Item of
! this type is configured oneshoot and not reloadable. If invalid value configured in the
! file, dpvs would use its default value.
!
! Note that dpvs configuration file supports the following comment type:
! * line comment: using '#" or '!'
! * inline range comment: using '<' and '>', put comment in between
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! global config
global_defs {
log_level DEBUG
! log_file /var/log/dpvs.log
! log_async_mode on
}
! netif config
netif_defs {
<init> pktpool_size 16384
<init> pktpool_cache 256
<init> device dpdk0 {
rx {
queue_number 1
descriptor_number 1024
rss all
}
tx {
queue_number 1
descriptor_number 1024
}
fdir {
mode signature
pballoc 64k
status matched
}
! promisc_mode
kni_name dpdk0.kni
}
<init> device dpdk1 {
rx {
queue_number 1
descriptor_number 1024
rss all
}
tx {
queue_number 1
descriptor_number 1024
}
fdir {
mode signature
pballoc 64k
status matched
}
! promisc_mode
kni_name dpdk1.kni
}
}
! worker config (lcores)
worker_defs {
<init> worker cpu0 {
type master
cpu_id 0
}
<init> worker cpu1 {
type slave
cpu_id 1
port dpdk0 {
rx_queue_ids 0
tx_queue_ids 0
}
port dpdk1 {
rx_queue_ids 0
tx_queue_ids 0
}
}
}
! timer config
timer_defs {
# cpu job loops to schedule dpdk timer management
schedule_interval 500
}
! dpvs neighbor config
neigh_defs {
<init> unres_queue_length 128
timeout 60
}
! dpvs ipv4 config
ipv4_defs {
forwarding off
<init> default_ttl 64
fragment {
<init> bucket_number 4096
<init> bucket_entries 16
<init> max_entries 4096
<init> ttl 1
}
}
! dpvs ipv6 config
ipv6_defs {
disable off
forwarding off
route6 {
<init> method hlist
recycle_time 10
}
}
! control plane config
ctrl_defs {
lcore_msg {
<init> ring_size 4096
sync_msg_timeout_us 20000
priority_level low
}
ipc_msg {
<init> unix_domain /var/run/dpvs_ctrl
}
}
! ipvs config
ipvs_defs {
conn {
<init> conn_pool_size 1097152
<init> conn_pool_cache 256
conn_init_timeout 3
! expire_quiescent_template
! fast_xmit_close
<init> redirect on
}
udp {
! defence_udp_drop
uoa_mode opp
uoa_max_trail 3
timeout {
normal 300
last 3
}
}
tcp {
! defence_tcp_drop
timeout {
none 2
established 90
syn_sent 3
syn_recv 30
fin_wait 7
time_wait 7
close 3
close_wait 7
last_ack 7
listen 120
synack 30
last 2
}
synproxy {
synack_options {
mss 1452
ttl 63
sack
! wscale
! timestamp
}
! defer_rs_syn
rs_syn_max_retry 3
ack_storm_thresh 10
max_ack_saved 3
conn_reuse_state {
close
time_wait
! fin_wait
! close_wait
! last_ack
}
}
}
}
! sa_pool config
sa_pool {
pool_hash_size 16
}
Fullnat配置
wan_ip=192.168.253.135
wan_net="192.168.253.0/24"
lan_net="10.140.16.0/20"
vip=192.168.253.135
# add VIP to WAN interface
./dpip addr add ${vip}/32 dev dpdk0
# route for WAN/LAN access
# add routes for other network or default route if needed.
./dpip route add ${wan_net} dev dpdk0
./dpip route add ${lan_net} dev dpdk1
# add service <VIP:vport> to forwarding, scheduling mode is RR.
# use ipvsadm --help for more info.
./ipvsadm -A -t ${vip}:10080 -s rr
# add two RS for service, forwarding mode is FNAT (-b)
./ipvsadm -a -t ${vip}:10080 -r 10.140.24.1 -b
./ipvsadm -a -t ${vip}:10080 -r 10.140.24.4 -b
# add at least one Local-IP (LIP) for FNAT on LAN interface
./ipvsadm --add-laddr -z 10.140.24.5 -t ${vip}:10080 -F dpdk1
环境
操作系统:centos 7 机器:Vmware 虚拟网卡: e1000
配置
Fullnat配置
问题
10.140.24.1 和 10.140.24.4 上各起了一个 iperf server
通过vip:port用iperf进行压测时,当只添加一个rs时转发工作正常,如果添加两个rs,则能够简历链接,但是后续的收发包无法继续进行。ipvsadm可以看到转发情况如下: