niivue / niivue

a WebGL2 based medical image viewer. Supports over 30 formats of volumes and meshes.
https://niivue.github.io/niivue/
BSD 2-Clause "Simplified" License
259 stars 65 forks source link

voxel coordinate discrepancy #475

Closed haehn closed 1 year ago

haehn commented 1 year ago

Hi all, this is a follow-up to a problem reported in the last meeting and also to issue #471 reported by @fib which then turned out to be another problem.

If we load our data in 3D Slicer, XTK, and NiiVue - then, the nose of the patient in 3D Slicer and XTK is at voxel coordinates 274, 33, 117 but in NiiVue: 268, 473, 120. So somehow the origin of the voxelspace is different than in 3D Slicer or XTK.

Here are screenshots demonstrating this https://docs.google.com/presentation/d/1QgnulfyY2arT8_YBXiLUlGcn7rsKkYx6XihydoZQcPI/edit?usp=sharing

We use the following code to set the labelmap pixel in NiiVue programmatically:

H.Drawer.prototype.setLabelmapPixel = function (x, y, z, label) {

  let dx = H.D.nv.volumes[0].dims[1];
  let dy = H.D.nv.volumes[0].dims[2];
  let dz = H.D.nv.volumes[0].dims[3];

  x = Math.min(Math.max(x, 0), dx - 1);
  y = Math.min(Math.max(y, 0), dy - 1);
  z = Math.min(Math.max(z, 0), dz - 1);

  H.D.nv.drawBitmap[x + y * dx + z * dx * dy] = label;

};

and we grab the x y z coordinates directly like this from the onLocationChange event:

nv.onLocationChange = function (e) {
  let i = e['vox'][0];
  let j = e['vox'][1];
  let k = e['vox'][2];

  //...

}

Now if we invert i and j in the onLocationChange callback, then something looks a little better but the drawing is mirrored:


  // ... 
  i = 512 - i; // based on w,h = 512
  j = 512 - j;
  //...

image

Since we are using e['vox'] - can we somehow set the nv.drawBitmap in a different way that we don't need to convert anything and it just works?

Or could we please get a convenience function nv.setDrawingPixel(i,j,k,lavel) which matches the e['vox'] callback values so we don't need to convert anything?

If we use just the internal NiiVue drawingMode, everything works properly so it must be somehow the combination of e['vox'] and out setLabelmapPixel method from above.

Thank you so much!

neurolabusc commented 1 year ago

This is a feature, not a bug. There are 48 possible lossless orders for 3D voxel data. NiiVue always shuffles voxel order so the images are stored in R,A,S. Therefore, regardless of whether the data was acquired as sagittal, coronal or axial images, the column (i), row (j) and slice (k) dimensions correspond to left->right, posterior->anterior and inferior->superior respectively. This provides spatial consistency regardless of acquisition settings. For estimating the number of rows, columns and slices for the background image for the NiiVue object nv you can call let dims = [nv.back.dims[1], nv.back.dims[2], nv.back.dims[3]]. The affine transform nv.back.matRAS provides the residual sform to convert voxels to world space (millimeters from origin). This is better than hard-coding dimensions, and it lets NiiVue read many voxel-based formats for you (mgh, NIfTI, NRRD, Brik, etc). It even reformats 4D formats where the 3 spatial dimensions do not correspond with the first three dimensions on disk. Another nice feature is that NiiVue will write drawings back to the original voxel order (RAS, LAS, SPL, etc) so disk order matches the original scalar dataset.

In brief, nv.volumes[0].dims[1] provides the number of columns saved to disk with no promises about spatial orientation, while nv.back.dims[1] provides the number of columns for the image stored in memory as well as reliably encoding the left-right spatial dimension.

neurolabusc commented 1 year ago

@haehn and @fib below is a minimal demo for drawing. It is insanely inefficient to redraw on each event, does not use Bresenham's line method so will generate dots and not lines, and reloads dimensions for each and every voxel. However, it demonstrates how to properly draw on any voxel based image.

git clone git@github.com:niivue/niivue.git
cd niivue
npm install
<replace /src/index.html with text below>
npm run dev

index.html:

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="utf-8">
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <link rel="stylesheet" href="niivue.css">
</head>
<body>
  <main id="container">
    <canvas id="gl1"></canvas>
  </main>
  <footer id="location">
    &nbsp;
  </footer>
</body>
</html>
<script type="module" async>
  import { Niivue } from './niivue.js'
  import {NVImage} from './nvimage.js'
  import {NVMesh} from './nvmesh.js'
  function handleLocationChange(data){
    if (!nv1.drawBitmap) {
      nv1.createEmptyDrawing();
    }
    document.getElementById('location').innerHTML = data.vox[0]+'×'+data.vox[1]+'×'+data.vox[2]
    let x = data.vox[0];
    let y = data.vox[1];
    let z = data.vox[2];
    let dx = nv1.back.dims[1];
    let dy = nv1.back.dims[2];
    let dz = nv1.back.dims[3];
    //next lines not needed: data clamped
    // x = Math.min(Math.max(x, 0), dx - 1);
    // y = Math.min(Math.max(y, 0), dy - 1);
    // z = Math.min(Math.max(z, 0), dz - 1);
    nv1.drawBitmap[x + y * dx + z * dx * dy] = 1;
    nv1.refreshDrawing(true);
  }
  var nv1 = new Niivue({
      onLocationChange:handleLocationChange
  })
  nv1.attachTo('gl1')
  var volumeList1 = [{url: "../demos/images/FLAIR.nii.gz"}]
  await nv1.loadVolumes(volumeList1)
  await nv1.loadDrawingFromUrl("../demos/images/lesion.nii.gz")
</script>

Writing this, I do think that we need to make createEmptyDrawing() an async function so you can await its completion: both this and refreshDrawing() block transfer an entire volume worth of data to the GPU. However, it demonstrates how to write to any voxel based image regardless of the voxel order on disk.

haehn commented 1 year ago

Thank you - for our data both dims give the same values

image

Is it possible to have a method like nv.setDrawingPixel(i,j,k,label) that does all the transforms for us if we want to work in voxel space? or how can we work in voxelspace and then update to worldspace - do we need to multiply every voxel with the matRAS before accessing the array? Is there a convenience method that does the multiplication in niivue?

neurolabusc commented 1 year ago

See the formula in my previous comment. It is a live demo that allows you to visually confirm that cursor maps to voxel.

haehn commented 1 year ago

Yes - the demo works for the cursor since data['vox'] is transformed using matRAS before the callback. If we want to now take that position and implement region growing or any voxel-based algorithm it adds complexity since we can't just "grow" in voxelspace like in 3D Slicer or XTK

haehn commented 1 year ago

@neurolabusc Or is there an example where we programmatically segment a whole region of a volume outside of niivue? that would be enough for us - even an example that labels a rectangle or cube in voxelspace

neurolabusc commented 1 year ago

@haehn I have created a new helper function img2RAS. This is showcased in the demo page indexIntensityDraw.html so you can evaluate it here.

git clone --branch thresholding git@github.com:niivue/niivue.git
cd niivue
npm install
mv ./src/indexIntensityDraw.html ./src/index.html
npm run dev

JavaScript numerical code is about an order of magnitude slower than native compiled C code, but using WebGL compute shader provides the native performance of the massively parallel GPU. The one consideration is that it takes time to transfer data back and forth between the CPU and GPU. NiiVue's solution to this is to keep only compute the spatial, color and intensity transforms on the GPU, where they are used for the visualization. However, this means that the raw image data remains in its native space.

We can see the issue with your CACTUS data which is LPS:

fslhd plaque.nii.gz
...
sform_xorient   Right-to-Left
sform_yorient   Anterior-to-Posterior
sform_zorient   Inferior-to-Superior

The other thing to bear in mind is that by design JavaScript wants to store numerical data as 64-bit float, which is problematic mobile devices displaying large medical imaging. Therefore, NiiVue uses retains the native datatype of the imaging data (often 16-bit integers for CT and MRI) and uses the NIfTI/DICOM rescale slope and intercept functions. The demo showcases how the helper function intensityScaled2Raw() can convert from calibrated units (e.g. Hounsfield Units for CT data) to the raw voxel data.

NiiVue was designed from the ground up to leverage the WebGL2's best features and deliver a fluid user experience even on mobile devices. The API shields most developers from the internals. However, the learning curve for the internals is steep as we are pretty close to the metal.

Another discovery that @cdrake and I made in the development of NiiVue is that JavaScript just-in-time interpreters do not do apply the clever but time consuming tricks of modern native compilers. You can dramatically improve JavaScript performance by using the (now obsolete) C programming tricks from the 1980's including loop hoisting, unrolling, lookup tables and recognizing that functions do not get inlined. In my sample project I provide two functions that provide identical numerical results: the 3D function uses the setLabelmapPixel(x,y,z,label) function, while the 1D routine unrolls and inlines this. Since the results are identical, you can choose if you prefer the clarity and maintainability of the 3D function, or desire the speed of the inlined function. For the internals of NiiVue we have gravitated toward the latter approach in performance critical sections of the code.

haehn commented 1 year ago

Thanks - is it not possible to access individual voxels in IJK space?

neurolabusc commented 1 year ago

@haehn I have fixed getValue(x,y,z) to work appropriately. Its not fast, but it does work and it will also return scalar luminance for RGB and RGBA volumes (at the moment, img2RAS() assumes scalar images).

haehn commented 1 year ago

Thank you so much! I finally got it to work even without the latest changes

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="utf-8">
  <meta name="viewport" content="width=device-width, initial-scale=1">
  <link rel="stylesheet" href="niivue.css">
</head>
<body>
  <main id="container">
    <canvas id="gl1"></canvas>
  </main>
  <footer>
    <input type="range" min="0" max="100" value="50" class="slider" id="threshold">
    <button id="array1D">1D</button>&nbsp;<button id="array3D">3D</button>
    <x id="location">&nbsp;</x>
  </footer>
</body>
</html>
<script type="module" async>
  import { Niivue } from './niivue.js'
  import {NVImage} from './nvimage.js'
  import {NVMesh} from './nvmesh.js'

  var lastPos = null;

  function setLabelmapPixel(x, y, z, label) {

    let dx = nv1.back.dims[1];
    let dy = nv1.back.dims[2];
    let dz = nv1.back.dims[3];
    nv1.drawBitmap[x + y * dx + z * dx * dy] = label;

  };

  function getIntensity(backImg, x,y,z) {

    let dx = nv1.back.dims[1];
    let dy = nv1.back.dims[2];
    let dz = nv1.back.dims[3];
    return backImg[x + y * dx + z * dx * dy];

  };

  function grow(x,y,z){

    var backImg = nv1.volumes[0].img2RAS();

    var threshold_tolerance = 50;

    var threshold = getIntensity(backImg, x,y,z);

    console.log(threshold);

    var threshold_scaled = nv1.volumes[0].intensityScaled2Raw(threshold);

    console.log(threshold_scaled);

    for (var i=0; i<10; i++) {
      for (var j=0; j<10; j++) {
        for (var k=0; k<10; k++) {

          var newPos = [x+i, y+j, z+k];

          var newIntensity = getIntensity(backImg, newPos[0], newPos[1], newPos[2]);

          var newIntensity_scaled = nv1.volumes[0].intensityScaled2Raw(newIntensity);

          if ((newIntensity >= threshold_scaled) || (Math.abs(newIntensity-threshold_scaled) <= (threshold_tolerance / 100.0 * threshold_scaled))) {

            setLabelmapPixel(newPos[0], newPos[1], newPos[2], 1);

          }
        }
      }
    }

    nv1.refreshDrawing(true);

  }

  function handleLocationChange(data){

    if (!nv1.drawBitmap) {
      nv1.createEmptyDrawing();
    }

    lastPos = data.vox;

  }

  document.getElementById('gl1').onmouseup = function(e) {

    if (!e.ctrlKey) {
      return;
    }

    grow(lastPos[0], lastPos[1], lastPos[2]);

  };

  var nv1 = new Niivue({onLocationChange:handleLocationChange})
  nv1.attachTo('gl1')
  nv1.setInterpolation(true);

  var volumeList1 = [{url: "../demos/images/esus12.nrrd"}]

  nv1.setSliceMM(true);
  nv1.opts.dragMode = nv1.dragModes.slicer3D;

  await nv1.loadVolumes(volumeList1)

</script>
neurolabusc commented 1 year ago

@haehn this is great. Beyond helping you out, you also discovered an error in the existing getValue() function. So this issue helped your work but also the core project. Given your success I am closing this issue.