Closed duanrui123456 closed 5 years ago
Thanks for the inputs, I am able to reproduce the error in my setup with
a. 10G dpdk 2 ports b. with a packet rate on the second interface around 1 packet per sec.
note: sending at high the problem does not surface. In the process of investigation, I found
TmThreadsSlotProcessPkt
was not getting invoked for last 4 packets.You can use the patch
diff --git a/suricata-3.0/src/source-dpdkintel.c b/suricata-3.0/src/source-dpdkintel.c
old mode 100644
new mode 100755
index 730f995..6537edd
--- a/suricata-3.0/src/source-dpdkintel.c
+++ b/suricata-3.0/src/source-dpdkintel.c
@@ -70,8 +70,6 @@ static uint32_t portConfigured = 0;
dpdkFrameStats_t dpdkStats [16];
DpdkCoreConfig_t coreConfig;
-struct rte_mbuf *rbQueue[8][1024];
-
extern struct rte_ring *srb [16];
extern file_config_t file_config;
extern uint64_t coreSet;
@@ -226,7 +224,7 @@ static inline Packet *DpdkIntelProcessPacket(DpdkIntelThreadVars_t *ptv, struct
return NULL;
}
- SCLogDebug(" Suricata packet %p for bte %d", p, caplen);
+ SCLogDebug(" Suricata packet %p for byte %d", p, caplen);
PACKET_RECYCLE(p);
PKT_SET_SRC(p, PKT_SRC_WIRE);
@@ -291,34 +289,32 @@ TmEcode ReceiveDpdkLoop(ThreadVars *tv, void *data, void *slot)
ptv->slot = s->slot_next;
- SCLogDebug("RX-TX Intf Id in %d out %d\n", ptv->inIfaceId, ptv->outIfaceId);
-
- if (rte_eth_dev_start(ptv->inIfaceId) < 0) {
- SCLogError(SC_ERR_DPDKINTEL_CONFIG_FAILED, " failed RX-TX start on port %d\n", ptv->inIfaceId);
- SCReturnInt(TM_ECODE_FAILED);
- }
+ SCLogNotice("RX-TX Intf Id in %d out %d\n", ptv->inIfaceId, ptv->outIfaceId);
while(1) {
if (unlikely(suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL))) {
+ SCLogDebug(" Recieved Signal!");
SCReturnInt(TM_ECODE_OK);
}
+ struct rte_mbuf *rbQueue[1024] = {0};
/* invoke rte_api for getting packets*/
packet_q_len = rte_ring_dequeue_burst(srb[ptv->ringBuffId],
- (void *)&rbQueue[ptv->ringBuffId],
+ (void *)&rbQueue,
/*64*/128, &avail);
- SCLogDebug("rte dequeue ringId: %d count: %d", ptv->ringBuffId, packet_q_len);
+ SCLogDebug("dequeue ringId: %d count: %d remaining: %d", ptv->ringBuffId, packet_q_len,avail);
+
/* ToDo: update counters - phase 2 */
if (likely(packet_q_len)) {
/*printf("rte dequeue count: %d", packet_q_len);*/
for (j = 0; ((j < PREFETCH_OFFSET) && (j < packet_q_len)); j++) {
- rte_prefetch0(rte_pktmbuf_mtod(rbQueue[ptv->ringBuffId][j], void *));
+ rte_prefetch0(rte_pktmbuf_mtod(rbQueue[j], void *));
}
for (j = 0; j < (packet_q_len - PREFETCH_OFFSET); j++) {
- struct rte_mbuf *tmp = rbQueue[ptv->ringBuffId][j];
+ struct rte_mbuf *tmp = rbQueue[j];
/* Prefetch others and process prev prefetched packets */
- rte_prefetch0(rte_pktmbuf_mtod(rbQueue[ptv->ringBuffId][j + PREFETCH_OFFSET], void *));
+ rte_prefetch0(rte_pktmbuf_mtod(rbQueue[j + PREFETCH_OFFSET], void *));
SCLogDebug(" User data %"PRIx64, tmp->udata64);
@@ -331,8 +327,8 @@ TmEcode ReceiveDpdkLoop(ThreadVars *tv, void *data, void *slot)
continue;
}
- SCLogDebug("Acquired Suricata Pkt %p", p);
- SCLogDebug(" mbuff %p len %u offset %u ", tmp, tmp->pkt_len, tmp->data_off);
+ SCLogNotice("Acquired Suricata Pkt %p, mbuff %p len %u offset %u ",
+ p, tmp, tmp->pkt_len, tmp->data_off);
SET_PKT_LEN(p, tmp->pkt_len);
@@ -358,7 +354,7 @@ TmEcode ReceiveDpdkLoop(ThreadVars *tv, void *data, void *slot)
}
for (; j < packet_q_len; j++) {
- struct rte_mbuf *tmp = rbQueue[ptv->ringBuffId][j];
+ struct rte_mbuf *tmp = rbQueue[j];
SCLogDebug(" User data %"PRIx64, tmp->udata64);
@@ -388,6 +384,14 @@ TmEcode ReceiveDpdkLoop(ThreadVars *tv, void *data, void *slot)
continue;
#endif
SCLogDebug("Invoking thread slot process!!");
+ if (unlikely(TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) != TM_ECODE_OK)) {
+ TmqhOutputPacketpool(ptv->tv, p);
+ /* update counters */
+ dpdkStats[ptv->inIfaceId].sc_fail++;
+ rte_pktmbuf_free(tmp);
+ continue;
+ }
+
}
} /* dequed frames from ring buffer */
}
@@ -464,7 +468,7 @@ TmEcode ReceiveDpdkThreadInit(ThreadVars *tv, void *initdata, void **data)
ditv->inIfaceId = intfId;
ditv->outIfaceId = atoi(dpdkconf->outIface);
- SCLogDebug(" ***** DPDK Ports In %d & Out %d", ditv->inIfaceId, ditv->outIfaceId);
+ SCLogNotice(" ***** DPDK Ports In %d & Out %d", ditv->inIfaceId, ditv->outIfaceId);
ditv->ringBuffId = dpdkconf->ringBufferId;
ditv->threads = dpdkconf->threads;
@@ -714,44 +718,17 @@ int32_t ReceiveDpdkPkts_IPS_10_100(__attribute__((unused)) void *arg)
continue;
} /* end of ring full */
- for (j = 0; ((j < PREFETCH_OFFSET) && (j < nb_rx)); j++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
- }
-
- for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
- struct rte_mbuf *m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j + PREFETCH_OFFSET], void *));
-
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [inPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
- }
- }
+ SCLogDebug(" packets to enqueue %d", nb_rx);
- for (; j < nb_rx; j++) {
- struct rte_mbuf *m = pkts_burst[j];
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [inPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
- }
+ enq = rte_ring_enqueue_burst(srb [RingId], (void *)pkts_burst, nb_rx, &freespace);
+ if (unlikely(enq != nb_rx)) {
+ dpdkStats [inPort].enq_err += (nb_rx - enq);
+ SCLogDebug(
+ " RingEnq %d core :%u full %d, fail to enq %d",
+ enq, rte_lcore_id(), rte_ring_full(srb [RingId]), (nb_rx - enq));
+ for (j = enq; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ continue;
}
/* End of enqueue */
} /* end of 1st intf*/
@@ -784,46 +761,19 @@ int32_t ReceiveDpdkPkts_IPS_10_100(__attribute__((unused)) void *arg)
continue;
} /* end of ring full */
- for (j = 0; ((j < PREFETCH_OFFSET) && (j < nb_rx)); j++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
- }
+ SCLogDebug(" packets to enqueue %d", nb_rx);
- for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
- struct rte_mbuf *m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j + PREFETCH_OFFSET], void *));
-
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [outPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
- }
- }
-
- for (; j < nb_rx; j++) {
- struct rte_mbuf *m = pkts_burst[j];
-
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [outPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
- }
+ enq = rte_ring_enqueue_burst(srb [RingId], (void *)pkts_burst, nb_rx, &freespace);
+ if (unlikely(enq != nb_rx)) {
+ dpdkStats [outPort].enq_err += (nb_rx - enq);
+ SCLogDebug(
+ " RingEnq %d core :%u full %d, fail to enq %d",
+ enq, rte_lcore_id(), rte_ring_full(srb [RingId]), (nb_rx - enq));
+ for (j = enq; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ continue;
}
+
/* End of enqueue */
} /* end of 2nd intf */
}
@@ -893,58 +843,28 @@ int32_t ReceiveDpdkPkts_IPS_1000(__attribute__((unused)) void *arg)
for (; ret < nb_rx; ret++)
rte_pktmbuf_free(pkts_burst[ret]);
}
- continue;
} /* end of totalRules */
+ else {
+ RingId = inPort; /* Ring Index same as port Index from DPDK */
- RingId = inPort; /* Ring Index same as port Index from DPDK */
-
- if (unlikely(1 == rte_ring_full(srb [RingId]))) {
- dpdkStats [inPort].ring_full++;
- for (ret = 0; ret < nb_rx; ret++)
- rte_pktmbuf_free(pkts_burst[ret]);
- continue;
- } /* end of ring full */
-
- for (j = 0; ((j < nb_rx) && (j < nb_rx)); j++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
- }
-
- for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
- struct rte_mbuf *m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j + PREFETCH_OFFSET], void *));
-
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [inPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
- }
- }
-
- for (; j < nb_rx; j++) {
- struct rte_mbuf *m = pkts_burst[j];
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [inPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
+ if (unlikely(1 == rte_ring_full(srb [RingId]))) {
+ dpdkStats [inPort].ring_full++;
+ for (ret = 0; ret < nb_rx; ret++)
+ rte_pktmbuf_free(pkts_burst[ret]);
+ } /* end of ring full */
+ else {
+ enq = rte_ring_enqueue_burst(srb [RingId], (void *)&pkts_burst, nb_rx, &freespace);
+ if (unlikely(enq != nb_rx)) {
+ dpdkStats [inPort].enq_err += (nb_rx - enq);
+ SCLogDebug(
+ " RingEnq %d core :%u full %d fail to enq %d",
+ enq, rte_lcore_id(),
+ rte_ring_full(srb [RingId]), (nb_rx - enq));
+ for (j = enq; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ } /* End of enqueue */
}
}
- /* End of enqueue */
} /* end of 1st intf*/
nb_rx = rte_eth_rx_burst(outPort, 0, pkts_burst, MAX_PKT_BURST);
@@ -963,59 +883,28 @@ int32_t ReceiveDpdkPkts_IPS_1000(__attribute__((unused)) void *arg)
for (; ret < nb_rx; ret++)
rte_pktmbuf_free(pkts_burst[ret]);
}
- continue;
} /* end of totalRules */
+ else {
+ RingId = outPort; /* Ring Index same as port Index from DPDK */
- RingId = outPort; /* Ring Index same as port Index from DPDK */
-
- if (unlikely(1 == rte_ring_full(srb [RingId]))) {
- dpdkStats [outPort].ring_full++;
- for (ret = 0; ret < nb_rx; ret++)
- rte_pktmbuf_free(pkts_burst[ret]);
- continue;
- } /* end of ring full */
-
- for (j = 0; ((j < PREFETCH_OFFSET) && (j < nb_rx)); j++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
- }
-
- for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
- struct rte_mbuf *m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j + PREFETCH_OFFSET], void *));
-
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [outPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
- }
- }
-
- for (; j < nb_rx; j++) {
- struct rte_mbuf *m = pkts_burst[j];
-
- SCLogDebug("add frame to RB %u len %d for %p",
- RingId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [RingId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [outPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [RingId]));
- rte_pktmbuf_free(m);
- continue;
+ if (unlikely(1 == rte_ring_full(srb [RingId]))) {
+ dpdkStats [outPort].ring_full++;
+ for (ret = 0; ret < nb_rx; ret++)
+ rte_pktmbuf_free(pkts_burst[ret]);
+ } /* end of ring full */
+ else {
+ enq = rte_ring_enqueue_burst(srb [RingId], (void *)&pkts_burst, nb_rx, &freespace);
+ if (unlikely(enq != nb_rx)) {
+ dpdkStats [inPort].enq_err += (nb_rx - enq);
+ SCLogDebug(
+ " RingEnq %d core :%u full %d fail to enq %d",
+ enq, rte_lcore_id(),
+ rte_ring_full(srb [RingId]), (nb_rx - enq));
+ for (j = enq; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ }
}
- }
- /* End of enqueue */
+ }/* End of enqueue */
}
} /* end of while */
@@ -1079,53 +968,26 @@ int32_t ReceiveDpdkPkts_IPS_10000(__attribute__((unused)) void *arg)
} /* end of totalRules */
if (unlikely(1 == rte_ring_full(srb [ringId]))) {
+ SCLogNotice("ring %u Frames: %u is full", inPort, nb_rx);
dpdkStats [inPort].ring_full++;
for (ret = 0; ret < nb_rx; ret++)
rte_pktmbuf_free(pkts_burst[ret]);
continue;
} /* end of ring full */
- for (j = 0; ((j < PREFETCH_OFFSET) && (j < nb_rx)); j++) {
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j], void *));
- }
+ SCLogDebug(" Ring %d packets to enqueue %d", ringId, nb_rx);
- for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
- struct rte_mbuf *m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[j + PREFETCH_OFFSET], void *));
-
- SCLogDebug("add frame to RB %u len %d for %p",
- ringId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [ringId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [inPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [ringId]));
- rte_pktmbuf_free(m);
- continue;
- }
- }
-
- for (; j < PREFETCH_OFFSET; j++) {
- struct rte_mbuf *m = pkts_burst[j];
-
- SCLogDebug("add frame to RB %u len %d for %p",
- ringId, m->pkt_len, m);
-
- enq = rte_ring_enqueue_burst(srb [ringId], (void *)&m, 1, &freespace);
- if (unlikely(enq != 1)) {
- dpdkStats [inPort].enq_err++;
- SCLogDebug(
- " RingEnq %d core :%u full %d",
- enq, rte_lcore_id(),
- rte_ring_full(srb [ringId]));
- rte_pktmbuf_free(m);
- continue;
- }
- }
- /* End of enqueue */
+ enq = rte_ring_enqueue_burst(srb [ringId], (void *)pkts_burst, nb_rx, &freespace);
+ if (unlikely(enq != nb_rx)) {
+ dpdkStats [inPort].enq_err += (nb_rx - enq);
+ SCLogDebug(
+ " RingEnq %d core :%u full %d, fails to enq %d",
+ enq, rte_lcore_id(), rte_ring_full(srb [ringId]), (nb_rx - enq));
+
+ for (j = enq; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+ continue;
+ }
}
} /* end of while */
@@ -1374,6 +1236,11 @@ int32_t launchDpdkFrameParser(void)
{
SCLogError(SC_ERR_DPDKINTEL_CONFIG_FAILED, "Unknown speed (%u) for %u", linkSpeed.link_speed, reqCores);
}
+
+ if (rte_eth_dev_start(portMap[portIndex].inport) < 0) {
+ SCLogError(SC_ERR_DPDKINTEL_CONFIG_FAILED, " failed RX-TX start on port %d\n", portMap[portIndex].inport);
+ SCReturnInt(TM_ECODE_FAILED);
+ }
}
SCLogDebug("10-100 Mb/s %x, 1000 Mb/s %x, 10000 Mb/s %x",
@@ -1421,12 +1288,22 @@ int32_t launchDpdkFrameParser(void)
if (portIndexBmp_10000)
{
+ uint32_t portBmpSet = 0x00, ports = 0x00;
+
portIndex = 0x00;
while (portIndexBmp_10000)
{
- if (portIndexBmp_10000 & 0x01)
- rte_eal_remote_launch(ReceiveDpdkPkts_IPS_10000,
- &portMap[portIndex].inport, getCpuIndex());
+ if (portIndexBmp_10000 & 0x01) {
+ ports = (portMap[portIndex].inport << 0 )|
+ (portMap[portIndex].outport << 8);
+
+ SCLogDebug(" Ports In-Out %x", ports);
+
+ rte_eal_remote_launch(ReceiveDpdkPkts_IPS_10000, &ports, getCpuIndex());
+
+ portBmpSet = portBmpSet | ((1 << portMap[portIndex].inport) |
+ (1 << portMap[portIndex].outport));
+ }
portIndexBmp_10000 = portIndexBmp_10000 >> 1;
portIndex++;
@llyyrr are there any updates from your end?
Hi, After a long break, I applied your patch. But this did not lead to anything. The compilation was successful, but everything remained as it was before the patch was applied. After which, I compiled the latest meerkat from the master branch (Latest commit 7f72ac4). After the update, the problems were not resolved. I use 1G configuration.
@llyyr you update is incomplete and inconsistent. Have shared details
So I do not know how to help you
Here are the overwhelming concerns on your request,
launched VM with
reason: I do not have 1G physical NIC so testing was not done with the same. Currently, I have emulated the same in VM.
Fix: based on hint from @llyrr his setup has 1G
tested on VM and added with the latest build.
./src/suricata --list-dpdkintel-ports
--- DPDK Intel Ports ---
* Overall Ports: 2
-- Port: 0
--- MTU: 1500
--- MAX RX MTU: 16128
--- Driver: net_e1000_em
--- Index: 0
--- Queues RX 1 & TX 1
--- SRIOV VF: 0
--- Offload RX: 12a0f TX: 800f
--- CPU NUMA node: 0
--- Status: Up
Led for 5 sec.......
-- Port: 1
--- MTU: 1500
--- MAX RX MTU: 16128
--- Driver: net_e1000_em
--- Index: 0
--- Queues RX 1 & TX 1
--- SRIOV VF: 0
--- Offload RX: 12a0f TX: 800f
--- CPU NUMA node: 0
--- Status: Up
Led for 5 sec.......
------------------------
8/2/2020 -- 01:39:06 - <Notice> - --- thread stats for Intf: 0 to 1 --- [291/1996]
8/2/2020 -- 01:39:06 - <Notice> - +++ ACL +++
8/2/2020 -- 01:39:06 - <Notice> - - non IP 0
8/2/2020 -- 01:39:06 - <Notice> - +++ ipv4 0 +++
8/2/2020 -- 01:39:06 - <Notice> - - lookup: success 0, fail 0
8/2/2020 -- 01:39:06 - <Notice> - - result: hit 0, miss 0
8/2/2020 -- 01:39:06 - <Notice> - +++ ipv6 0 +++
8/2/2020 -- 01:39:06 - <Notice> - - lookup: success 0, fail 0
8/2/2020 -- 01:39:06 - <Notice> - - result: hit 0, miss 0
8/2/2020 -- 01:39:06 - <Notice> - +++ ring +++
8/2/2020 -- 01:39:06 - <Notice> - ERR: full 0, enq 0, tx 0
8/2/2020 -- 01:39:06 - <Notice> - +++ port 0 +++
8/2/2020 -- 01:39:06 - <Notice> - - index 0 pkts RX 0 TX 129 MISS 0
8/2/2020 -- 01:39:06 - <Notice> - - Errors RX: 0 TX: 0 Mbuff: 0
8/2/2020 -- 01:39:06 - <Notice> - - Queue Dropped pkts: 0
8/2/2020 -- 01:39:06 - <Notice> - ----------------------------------
8/2/2020 -- 01:39:06 - <Notice> - --- thread stats for Intf: 1 to 0 ---
8/2/2020 -- 01:39:06 - <Notice> - +++ ACL +++
8/2/2020 -- 01:39:06 - <Notice> - - non IP 129
8/2/2020 -- 01:39:06 - <Notice> - +++ ipv4 0 +++
8/2/2020 -- 01:39:06 - <Notice> - - lookup: success 0, fail 0
8/2/2020 -- 01:39:06 - <Notice> - - result: hit 0, miss 0
8/2/2020 -- 01:39:06 - <Notice> - +++ ipv6 0 +++
8/2/2020 -- 01:39:06 - <Notice> - - lookup: success 0, fail 0
8/2/2020 -- 01:39:06 - <Notice> - - result: hit 0, miss 0
8/2/2020 -- 01:39:06 - <Notice> - +++ ring +++
8/2/2020 -- 01:39:06 - <Notice> - ERR: full 0, enq 0, tx 0
8/2/2020 -- 01:39:06 - <Notice> - +++ port 1 +++
8/2/2020 -- 01:39:06 - <Notice> - - index 1 pkts RX 130 TX 0 MISS 0
8/2/2020 -- 01:39:06 - <Notice> - - Errors RX: 130 TX: 0 Mbuff: 0
8/2/2020 -- 01:39:06 - <Notice> - - Queue Dropped pkts: 0
8/2/2020 -- 01:39:06 - <Notice> - ----------------------------------
@llyyrr I do not think you were using the latest build prior to the current fix. Because dpdk-suricata.ini
had to be modified for accommodating e1000 NIC.
I installed the x710 network card on the server. Now, I don’t understand how bind physical 10G ports to the suricata via net_tap driver?
./src/suricata --list-dpdkintel-ports
10/2/2020 -- 06:11:06 -
10/2/2020 -- 06:11:07 -
--- DPDK Intel Ports ---
Overall Ports: 2
-- Port: 0 --- MTU: 1500 --- MAX RX MTU: 1522 --- Driver: net_tap --- Index: 44 --- Queues RX 16 & TX 16 --- SRIOV VF: 0 --- Offload RX: 200e TX: 802e --- CPU NUMA node: 0 --- Status: Down Led for 5 sec.......
-- Port: 1 --- MTU: 1500 --- MAX RX MTU: 1522 --- Driver: net_tap --- Index: 45 --- Queues RX 16 & TX 16 --- SRIOV VF: 0 --- Offload RX: 200e TX: 802e --- CPU NUMA node: 0 --- Status: Down Led for 5 sec.......
dpdk-devbind.py -s Network devices using DPDK-compatible driver
0000:02:00.0 'Ethernet Controller X710 for 10GbE SFP+ 1572' drv=igb_uio unused=i40e,vfio-pci,uio_pci_generic 0000:02:00.1 'Ethernet Controller X710 for 10GbE SFP+ 1572' drv=igb_uio `unused=i40e,vfio-pci,uio_pci_generic```
@llyyrr after analysing your logs, it is clear
the problem is due to
solution:
README
.reason:
Driver: net_tap
but you should be i40e
as DPDK PMD.Configuration for dpdk-suricata.ini
updated dpdk-suricata.ini
(which has been tested with igb, ixgbe, i40e, e1000` is
[EAL]
--file-prefix=suricata_1
-c=0xf
--master-lcore=3
--socket-mem=128,0
--log-level=eal,1
-w=0000:00:0a.0
-w=0000:00:09.0
Since you are using the old one
[EAL]
--lcores=10-15
--file-prefix=suricata_1
--socket-mem=2048,2048
--vdev=net_tap0,iface=nacEth1
--vdev=net_tap1,iface=nacEth2
--log-level=eal,1
--no-pci
this prevents any pci device from being probed and use vdev of type net_tap
@llyyrr for https://github.com/vipinpv85/DPDK-Suricata_3.0/issues/4#issuecomment-584085515, I am not clear with your statements. So open a new ticket only if you can populate in according to the issue template for
If you are not having the details and ssh to share, I can not help you.
Note: on checking the logs and your information it is clear you are running into configuration issue with your VM.
So be please apt and respond ASAP if you are ready only.
I am deleting your comment as this is not part of the ticket too.
@llyyrr as per the live debug your issue is 'Status: Down'
@duanrui123456 there are no updates from your end
@ahubaoan there are no updates from your end. What is the progress on 4.1 suricata project??
1、tar xf dpdk-16.07.tar.xz cd dpdk-16.07 make install T=x86_64-native-linuxapp-gcc
To set the environment variables
export RTE_SDK=/home/dpdk-16.07 export RTE_TARGET=x86_64-native-linuxapp-gcc
Then to do the following test DPDK operation, compile and run normally cd /home/dpdk-16.07/examples/helloworld make cd build ./helloworld
2、 unzip DPDK-Suricata_3.0-master.zip cd DPDK-Suricata_3.0-master/suricata-3.0/ ./configure -enable-dpdkintel --with-libdpdkintel-includes=$RTE_SDK/$RTE_TARGET/include/ --with-libdpdkintel-libraries=$RTE_SDK/$RTE_TARGET/lib make -j all then In file included from /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h:58:0, from /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h:52, from dpdk-include-common.h:17, from suricata.c:202: /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:155:2: error: expected specifier-qualifier-list before ‘STAILQ_ENTRY’ STAILQ_ENTRY(rte_mempool_objhdr) next; /< Next in list. / ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:166:1: warning: data definition has no type or storage class STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr); ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:166:1: warning: type defaults to ‘int’ in declaration of ‘STAILQ_HEAD’ [-Wimplicit-int] /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:166:1: warning: parameter names (without types) in function declaration /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:185:1: warning: data definition has no type or storage class STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr); ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:185:1: warning: type defaults to ‘int’ in declaration of ‘STAILQ_HEAD’ [-Wimplicit-int] /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:185:1: warning: parameter names (without types) in function declaration /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:191:2: warning: ‘struct rte_mempool_memhdr’ declared inside parameter list void opaque); ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:191:2: warning: its scope is only this definition or declaration, which is probably not what you want /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:200:2: error: expected specifier-qualifier-list before ‘STAILQ_ENTRY’ STAILQ_ENTRY(rte_mempool_memhdr) next; /< Next in list. */ ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:248:33: error: field ‘elt_list’ has incomplete type struct rte_mempool_objhdr_list elt_list; /< List of objects in pool */ ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:250:33: error: field ‘mem_list’ has incomplete type struct rte_mempool_memhdr_list mem_list; /< List of memory chunks / ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h: In function ‘rte_mempool_from_obj’: /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:318:12: error: ‘struct rte_mempool_objhdr’ has no member named ‘mp’ return hdr->mp; ^ /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h: In function ‘rte_mempool_virt2phy’: /home/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h:1624:12: error: ‘const struct rte_mempool_objhdr’ has no member named ‘physaddr’ return hdr->physaddr; ^ Makefile:1639: recipe for target 'suricata.o' failed make[3]: [suricata.o] Error 1 make[3]: Leaving directory '/home/DPDK-Suricata_3.0-master/suricata-3.0/src' Makefile:1139: recipe for target 'all' failed make[2]: * [all] Error 2 make[2]: Leaving directory '/home/DPDK-Suricata_3.0-master/suricata-3.0/src' Makefile:468: recipe for target 'all-recursive' failed make[1]: [all-recursive] Error 1 make[1]: Leaving directory '/home/DPDK-Suricata_3.0-master/suricata-3.0' Makefile:398: recipe for target 'all' failed make: [all] Error 2
This is the environment in which I operate