Open darkstar007 opened 6 years ago
Matt, Thank you for reporting the issue.
To understand the issue I'll tell you how RX works. XTRX itself has a small internal buffer on FPGA BRAM (128k MAX). All samples taken from ADC are stored here. Then PCIe DMA machine took control. The host side DMA ring buffer (32 x 32Kb = 2048Kb, size was taken as a tradeoff) are automatically filled when any packet of data is available in FPGA buffer. This is done automatically until all DMA buffers are filled. Each call of recv() function frees DMA buffers by a number of samples consumed.
In your application between recv() calls write to ssd is made. During this write DMA buffer got overrun.
First, check if increasing DMA buffer improves it. These patches increase it from 2M to 4M. For increasing more FPGA image need to be updated.
Apply to xtrxll
diff --git a/mod_pcie/xtrxll_pcie_linux.c b/mod_pcie/xtrxll_pcie_linux.c
index 09cb6f9..c33af50 100644
--- a/mod_pcie/xtrxll_pcie_linux.c
+++ b/mod_pcie/xtrxll_pcie_linux.c
@@ -384,16 +384,16 @@ static int xtrxllpciev0_dma_rx_init(struct xtrxll_base_dev* bdev, int chan,
int err;
if (chan !=0)
return -EINVAL;
- if (buf_szs % 16 || buf_szs > RXDMA_MMAP_BUFF) {
+ if (buf_szs % 16 || buf_szs > 2 * RXDMA_MMAP_BUFF) {
XTRXLLS_LOG("PCIE", XTRXLL_ERROR, "Wire RX pkt size is %d, should be rounded to 128 bit and less %d\n",
- buf_szs, RXDMA_MMAP_BUFF);
+ buf_szs, 2 * RXDMA_MMAP_BUFF);
return -EINVAL;
} else if (buf_szs == 0) {
- buf_szs = RXDMA_MMAP_BUFF;
+ buf_szs = 2 * RXDMA_MMAP_BUFF;
}
if (dev->mmap_rx_kernel_buf == 0) {
- void* m = mmap(0, RXDMA_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
+ void* m = mmap(0, 2*RXDMA_MMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
dev->fd, XTRX_MMAP_RX_OFF);
if (m == MAP_FAILED) {
err = errno;
@@ -426,7 +426,7 @@ static int xtrxllpciev0_dma_rx_deinit(struct xtrxll_base_dev* bdev, int chan)
int err;
if (chan !=0)
return -EINVAL;
- err = munmap(dev->mmap_rx_kernel_buf, RXDMA_MMAP_SIZE);
+ err = munmap(dev->mmap_rx_kernel_buf, 2 * RXDMA_MMAP_SIZE);
if (err) {
err = errno;
XTRXLLS_LOG("PCIE", XTRXLL_DEBUG, "%s: DMA RX unmmap error: %s\n",
@@ -513,7 +513,7 @@ static int xtrxllpciev0_dma_rx_getnext(struct xtrxll_base_dev* bdev, int chan,
}
}
- *addr = (void*)((char*)dev->mmap_rx_kernel_buf + RXDMA_MMAP_BUFF * bn);
+ *addr = (void*)((char*)dev->mmap_rx_kernel_buf + 2 * RXDMA_MMAP_BUFF * bn);
return 0;
}
@@ -524,7 +524,7 @@ static int xtrxllpciev0_dma_rx_release(struct xtrxll_base_dev* bdev, int chan,
if (chan != 0)
return -EINVAL;
- unsigned bufno = ((char*)addr - (char*)dev->mmap_rx_kernel_buf) / RXDMA_MMAP_BUFF;
+ unsigned bufno = ((char*)addr - (char*)dev->mmap_rx_kernel_buf) / (2 * RXDMA_MMAP_BUFF);
XTRXLLS_LOG("PCIE", XTRXLL_DEBUG, "%s: RX DMA RELEASE %d\n", dev->base.id, bufno);
if (bufno > 0x1f)
driver patch
diff --git a/xtrx.c b/xtrx.c
index c7e35cf..cf33478 100644
--- a/xtrx.c
+++ b/xtrx.c
@@ -73,7 +73,11 @@ enum xtrx_uart_types {
#define BUFS 32
-#define BUF_SIZE 32768
+#define D_BUF_SIZE 32768
+
+#define RX_BUF_SIZE (D_BUF_SIZE*2)
+#define TX_BUF_SIZE D_BUF_SIZE
+
#define UART_PORT_OPEN 1
@@ -503,36 +507,36 @@ static void xtrx_update_rxdma_len(struct xtrx_dev *d, struct xtrx_dmabuf_nfo *pb
iowrite32(cpu_to_be32(reg), (void __iomem *)(addr));
//xtrx_writel(d, config_off + 4 * i, reg);
- memset(pbufs[i].virt, i+1, BUF_SIZE);
+ memset(pbufs[i].virt, i+1, len_qw * 16);
}
}
-static int xtrx_allocdma(struct xtrx_dev *d, struct xtrx_dmabuf_nfo *pbufs, unsigned config_off)
+static int xtrx_allocdma(struct xtrx_dev *d, struct xtrx_dmabuf_nfo *pbufs, unsigned buf_sz, unsigned config_off)
{
int i;
for (i = 0; i < BUFS; i++) {
- pbufs[i].virt = pci_alloc_consistent(d->pdev, BUF_SIZE, &pbufs[i].phys);
+ pbufs[i].virt = pci_alloc_consistent(d->pdev, buf_sz, &pbufs[i].phys);
if (!pbufs[i].virt) {
printk(KERN_INFO PFX "Failed to allocate %d DMA buffer", i);
for (; i >= 0; --i) {
- pci_free_consistent(d->pdev, BUF_SIZE, pbufs[i].virt, pbufs[i].phys);
+ pci_free_consistent(d->pdev, buf_sz, pbufs[i].virt, pbufs[i].phys);
}
return -1;
}
}
- xtrx_update_rxdma_len(d, pbufs, config_off, BUF_SIZE / 16);
+ xtrx_update_rxdma_len(d, pbufs, config_off, buf_sz / 16);
return 0;
}
static int xtrx_allocdma_tx(struct xtrx_dev *d)
{
- return xtrx_allocdma(d, d->buf_tx, 0xC00);
+ return xtrx_allocdma(d, d->buf_tx, TX_BUF_SIZE, 0xC00);
}
static int xtrx_allocdma_rx(struct xtrx_dev *d)
{
- return xtrx_allocdma(d, d->buf_rx, 0x800);
+ return xtrx_allocdma(d, d->buf_rx, RX_BUF_SIZE, 0x800);
}
@@ -541,7 +545,7 @@ static void xtrx_freedma_rx(struct xtrx_dev *d)
{
int i;
for (i = 0; i < BUFS; i++) {
- pci_free_consistent(d->pdev, BUF_SIZE, d->buf_rx[i].virt, d->buf_rx[i].phys);
+ pci_free_consistent(d->pdev, RX_BUF_SIZE, d->buf_rx[i].virt, d->buf_rx[i].phys);
}
}
@@ -549,7 +553,7 @@ static void xtrx_freedma_tx(struct xtrx_dev *d)
{
int i;
for (i = 0; i < BUFS; i++) {
- pci_free_consistent(d->pdev, BUF_SIZE, d->buf_tx[i].virt, d->buf_tx[i].phys);
+ pci_free_consistent(d->pdev, TX_BUF_SIZE, d->buf_tx[i].virt, d->buf_tx[i].phys);
}
}
@@ -814,12 +818,12 @@ static long xtrxfd_ioctl(struct file *filp,
case 0x123458: {
int i;
for ( i = 0; (i < BUFS); ++i) {
- memset( xtrxdev->buf_rx[i].virt, i + 1, BUF_SIZE);
+ memset( xtrxdev->buf_rx[i].virt, i + 1, RX_BUF_SIZE);
}
return 0;
}
case 0x123459: {
- if (ioctl_param > BUF_SIZE)
+ if (ioctl_param > RX_BUF_SIZE)
return -E2BIG;
xtrx_update_rxdma_len(xtrxdev, xtrxdev->buf_rx, 0x800, ioctl_param);
@@ -892,23 +896,24 @@ static int xtrxfd_mmap(struct file *filp, struct vm_area_struct *vma)
unsigned long pfn, off;
unsigned i;
int ret;
+ size_t buf_sz = (vma->vm_pgoff == (XTRX_MMAP_RX_OFF >> PAGE_SHIFT)) ? RX_BUF_SIZE : TX_BUF_SIZE;
struct xtrx_dmabuf_nfo *pbufs = (vma->vm_pgoff == (XTRX_MMAP_RX_OFF >> PAGE_SHIFT)) ? xtrxdev->buf_rx : xtrxdev->buf_tx;
printk(KERN_NOTICE PFX "mmap() call: VMA=%p vma->vm_pgoff=%lu\n", vma, vma->vm_pgoff);
//DO this unportable first!
- if ((vma->vm_end - vma->vm_start) != BUF_SIZE * BUFS) {
+ if ((vma->vm_end - vma->vm_start) != buf_sz * BUFS) {
return -EINVAL;
}
//vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_LOCKED;
- for (i = 0, off = 0; i < BUFS; ++i, off += BUF_SIZE) {
+ for (i = 0, off = 0; i < BUFS; ++i, off += buf_sz) {
pfn = page_to_pfn(virt_to_page(pbufs[i].virt));
ret = remap_pfn_range(vma, vma->vm_start + off,
pfn,
- BUF_SIZE,
+ buf_sz,
vma->vm_page_prot);
printk(KERN_NOTICE PFX "mmap() : remap() VMA=%p addr=%lx pfn=%lx ret=%d\n",
That appears to reduce the frequency of the issue, but not eliminate the issue:
09:50:27.463538 INFO: [BPCI] PCI:/dev/xtrx0: RX DMA 16 bit MIMO (BLK:8192 TS:4096); TX DMA SKIP MIMO
09:50:27.463549 INFO: [CTRL] PCI:/dev/xtrx0: Placing TC @4096 on 0 data: 0 stat:20
[INFO] SoapyXTRX::activateStream(RX) 0 Samples per packet; res = 0
Using avx for xtrxdsp_iq16_ic16i
09:55:16.425536 ERROR: [BPCI] PCI:/dev/xtrx0: RX DMA STAT O- 00000000 Bytes -R03 00/32 I:33
09:55:16.428029 INFO: [BPCI] PCI:/dev/xtrx0: BUF_OVF TS:1155796992 WTS:1155856962 WTS_NXT:1155866624 TS_NXT:1157963776 SKIP 264 buffers INT_S:0
09:55:16.428059 INFO: [CTRL] PCI:/dev/xtrx0: Placing TC @1157963776 on 0 data: 0 stat:21
09:55:16.956832 WARN: [XTRX] PCI:/dev/xtrx0: Total=65536 Processed=0 UserTotal=2097152 UserProcessed=0 BUFTS=1157963776+0 OURTS=1155796992
09:55:32.255169 ERROR: [BPCI] PCI:/dev/xtrx0: RX DMA STAT O- 00000000 Bytes -R03 32/00 I:33
09:55:32.257531 INFO: [BPCI] PCI:/dev/xtrx0: BUF_OVF TS:1219043328 WTS:1219174995 WTS_NXT:1219182592 TS_NXT:1221279744 SKIP 273 buffers INT_S:0
09:55:32.257548 INFO: [CTRL] PCI:/dev/xtrx0: Placing TC @1221279744 on 0 data: 0 stat:22
09:55:32.785818 WARN: [XTRX] PCI:/dev/xtrx0: Total=65536 Processed=0 UserTotal=2097152 UserProcessed=0 BUFTS=1221279744+0 OURTS=1219043328
10:01:12.136399 ERROR: [BPCI] PCI:/dev/xtrx0: RX DMA STAT O- 00000000 Bytes -R03 32/00 I:33
10:01:12.138964 INFO: [BPCI] PCI:/dev/xtrx0: BUF_OVF TS:2578661376 WTS:-1716268035 WTS_NXT:-1716256768 TS_NXT:2580807680 SKIP 262 buffers INT_S:0
10:01:12.138984 INFO: [CTRL] PCI:/dev/xtrx0: Placing TC @-1714159616 on 0 data: 0 stat:23
10:01:12.668194 WARN: [XTRX] PCI:/dev/xtrx0: Total=65536 Processed=0 UserTotal=2097152 UserProcessed=0 BUFTS=2580807680+0 OURTS=2578661376
10:01:37.422995 ERROR: [BPCI] PCI:/dev/xtrx0: RX DMA STAT O- 00000000 Bytes -R03 32/00 I:33
10:01:37.425173 INFO: [BPCI] PCI:/dev/xtrx0: BUF_OVF TS:2679373824 WTS:-1615123405 WTS_NXT:-1615118336 TS_NXT:2681946112 SKIP 314 buffers INT_S:0
10:01:37.425189 INFO: [CTRL] PCI:/dev/xtrx0: Placing TC @-1613021184 on 0 data: 0 stat:24
10:01:37.952855 WARN: [XTRX] PCI:/dev/xtrx0: Total=65536 Processed=0 UserTotal=2097152 UserProcessed=0 BUFTS=2681946112+0 OURTS=2679373824
10:01:42.584492 ERROR: [BPCI] PCI:/dev/xtrx0: RX DMA STAT O- 00000000 Bytes -R03 00/32 I:33
10:01:42.586462 INFO: [BPCI] PCI:/dev/xtrx0: BUF_OVF TS:2700034048 WTS:-1594478291 WTS_NXT:-1594466304 TS_NXT:2702598144 SKIP 313 buffers INT_S:0
10:01:42.586482 INFO: [CTRL] PCI:/dev/xtrx0: Placing TC @-1592369152 on 0 data: 0 stat:25
10:01:43.115869 WARN: [XTRX] PCI:/dev/xtrx0: Total=65536 Processed=0 UserTotal=2097152 UserProcessed=0 BUFTS=2702598144+0 OURTS=2700034048
Short write, samples lost, exiting!
Short write, samples lost, exiting!
User cancel, exiting...
10:04:51.532631 INFO: [BPCI] PCI:/dev/xtrx0: RX DMA STOP MIMO (BLK:0 TS:0); TX DMA SKIP MIMO
10:04:51.533275 INFO: [LSM7] PCI:/dev/xtrx0: 0x0124[1c, 08]
10:04:51.533546 INFO: [BPCI] PCI:/dev/xtrx0: RX DMA STOP MIMO (BLK:0 TS:0); TX DMA SKIP MIMO
10:04:51.535437 INFO: [PCIE] PCI:/dev/xtrx0: Device closing
(It exited as it filled my disk up).
Thanks,
Matt
Has any progress been made on this?
This issue is preventing me from the using the XTRX.
Is the XTRX truly dead?
Not sure if this is the correct place to raise this issue.
If I do a 2 channel, 4Ms/s collection I get the follwing errors:
When I process the data I see a number of issues every so often - so I'm expecting the errors above cause a glitch in the output data stream.
This is running on an old'ish i7 writing to a SSD running Debian unstable. I can do the same capture on a LimeSDR on USB3 on the same machine with no issues.
The script I'm running to do the capture is:
and the rx_sdr program can be found here
Thanks,
Matt