MPD5 + DUMMYNET + PF HIGH CPU USAGE

Marcos Vinícius Buzo marcosvbuzo at gmail.com
Wed Sep 8 18:03:22 UTC 2010


Hi all.

I just started working in a small WISP in a place of a friend that
unfortunatelly is not between us anymore :(
_ We're running FreeBSD 8.1 64bits with MPD5 for pppoe, IPFW+Dummynet for
Traffic Shaping and PF for NAT and firewall.
_ Our hardware is a Dell PowerEdge R210, X3430 Intel Xeon, 4GB 1066Mhz and a
two ports Broadcom NetXtreme II BCM5716.
_ Our WAN Link is 60mbps down/up.

When we have 450+ pppoe connections and link usage is about 30mbps, things
get strange.
CPU usage goes to 80%+(Im using cacti+snmp to see this); we have high
latency pings, sometimes it goes to 300ms+ and sometimes mpd5 stops doing
its service.

I did setup another server to work together, it solves the problem just for
now, in this server i disabled flowtable (sysctl
net.inet.flowtable.enable=0), because in the old server, when i run top
-ISH, I see the following:

 22 root      44    -     0K    16K CPU2    2 236:19 100.00% flowcleaner

Is this a bug ?

Are the following customizations right ?

Here are the custom kernel flags:

#NETGRAPH
options HZ=2000
options NETGRAPH
options NETGRAPH_PPPOE
options NETGRAPH_SOCKET

options NETGRAPH_CISCO
options NETGRAPH_ECHO
options NETGRAPH_FRAME_RELAY
options NETGRAPH_HOLE
#options NETGRAPH_KSOCKET
options NETGRAPH_LMI
options NETGRAPH_RFC1490
options NETGRAPH_TTY

options NETGRAPH_ASYNC
options NETGRAPH_BPF
options NETGRAPH_ETHER
options NETGRAPH_IFACE
options NETGRAPH_KSOCKET
options NETGRAPH_L2TP
options NETGRAPH_MPPC_ENCRYPTION
options NETGRAPH_PPP
options NETGRAPH_PPTPGRE
options NETGRAPH_TEE
options NETGRAPH_UI
options NETGRAPH_VJC

# bridge support, device polling support, other security features
#options BRIDGE
options DEVICE_POLLING
options IPSTEALTH

# support for ALTQ traffic shaping
options ALTQ
options ALTQ_CBQ
options ALTQ_RED
options ALTQ_RIO
options ALTQ_HFSC
options ALTQ_PRIQ
# options ALTQ_NOPCC


# support for pf firewall
#device mem
device pf
device pflog
device pfsync


# IPFW
options IPFIREWALL
options IPFIREWALL_VERBOSE
options IPFIREWALL_FORWARD
options IPFIREWALL_DEFAULT_TO_ACCEPT
options DUMMYNET


/boot/loader.conf:

kern.maxusers=1024
net.graph.maxdata=8192
net.graph.maxalloc=16384
# this rule help you to support more than 800 ng devices, when mpd starts
kern.ipc.maxpipekva=62000000
net.inet.tcp.tcbhashsize=4096

/etc/sysctl.conf:

net.inet.ip.portrange.last=65535
net.inet.ip.portrange.first=1024
#kern.maxfilesperproc=32768
net.inet.tcp.blackhole=2
net.inet.udp.blackhole=1
#compat.linux.osrelease=2.6.16
net.inet.ip.fastforwarding=1
net.inet.tcp.rfc1323=1
net.graph.maxdgram=128000
net.graph.recvspace=128000
kern.maxvnodes=100000000
kern.ipc.somaxconn=65535
kern.ipc.nmbclusters=262140
net.inet.tcp.maxtcptw=280960
net.inet.tcp.nolocaltimewait=1
net.inet.tcp.msl=1460
net.inet.icmp.icmplim=0
kern.ipc.maxsockbuf=16777216
kern.ipc.maxsockets=232000

#net.inet.tcp.recvspace=16772216
#net.inet.tcp.sendspace=16772216
net.inet.tcp.sendbuf_max=16777216
net.inet.tcp.recvbuf_max=16777216
kern.ipc.shmmax=2147483648
net.inet.tcp.fast_finwait2_recycle=1
net.inet.tcp.ecn.enable=1

kern.maxvnodes=100000000
kern.ipc.somaxconn=65535
kern.ipc.nmbclusters=262140
net.inet.tcp.maxtcptw=80960
net.inet.tcp.nolocaltimewait=1
net.inet.tcp.msl=5000
net.inet.icmp.icmplim=0


Thanks in advance.


More information about the freebsd-net mailing list