scalableminds / webknossos

Visualize, share and annotate your large 3D images online
https://webknossos.org
GNU Affero General Public License v3.0
124 stars 23 forks source link

Shader broken, shows only RGB #4295

Closed leowe closed 4 years ago

leowe commented 4 years ago
image

As you can see in the picture when trying to show a wkw dataset with 6 layers, this showed. The corresponding error message is:

client.js:1803 THREE.WebGLProgram: shader error:  0 35715 false gl.getProgramInfoLog invalid shaders  THREE.WebGLShader: gl.getShaderInfoLog() fragment
ERROR: 0:110: '.' : syntax error
1: precision highp float;
2: precision highp int;
3: #define SHADER_NAME ShaderMaterial
4: #define GAMMA_FACTOR 2
5: #define DOUBLE_SIDED
6: uniform mat4 viewMatrix;
7: uniform vec3 cameraPosition;
8: #define TONE_MAPPING
9: #ifndef saturate
10:     #define saturate(a) clamp( a, 0.0, 1.0 )
11: #endif
12: uniform float toneMappingExposure;
13: uniform float toneMappingWhitePoint;
14: vec3 LinearToneMapping( vec3 color ) {
15:     return toneMappingExposure * color;
16: }
17: vec3 ReinhardToneMapping( vec3 color ) {
18:     color *= toneMappingExposure;
19:     return saturate( color / ( vec3( 1.0 ) + color ) );
20: }
21: #define Uncharted2Helper( x ) max( ( ( x * ( 0.15 * x + 0.10 * 0.50 ) + 0.20 * 0.02 ) / ( x * ( 0.15 * x + 0.50 ) + 0.20 * 0.30 ) ) - 0.02 / 0.30, vec3( 0.0 ) )
22: vec3 Uncharted2ToneMapping( vec3 color ) {
23:     color *= toneMappingExposure;
24:     return saturate( Uncharted2Helper( color ) / Uncharted2Helper( vec3( toneMappingWhitePoint ) ) );
25: }
26: vec3 OptimizedCineonToneMapping( vec3 color ) {
27:     color *= toneMappingExposure;
28:     color = max( vec3( 0.0 ), color - 0.004 );
29:     return pow( ( color * ( 6.2 * color + 0.5 ) ) / ( color * ( 6.2 * color + 1.7 ) + 0.06 ), vec3( 2.2 ) );
30: }
31: vec3 ACESFilmicToneMapping( vec3 color ) {
32:     color *= toneMappingExposure;
33:     return saturate( ( color * ( 2.51 * color + 0.03 ) ) / ( color * ( 2.43 * color + 0.59 ) + 0.14 ) );
34: }
35: vec3 toneMapping( vec3 color ) { return LinearToneMapping( color ); }
36: 
37: vec4 LinearToLinear( in vec4 value ) {
38:     return value;
39: }
40: vec4 GammaToLinear( in vec4 value, in float gammaFactor ) {
41:     return vec4( pow( value.rgb, vec3( gammaFactor ) ), value.a );
42: }
43: vec4 LinearToGamma( in vec4 value, in float gammaFactor ) {
44:     return vec4( pow( value.rgb, vec3( 1.0 / gammaFactor ) ), value.a );
45: }
46: vec4 sRGBToLinear( in vec4 value ) {
47:     return vec4( mix( pow( value.rgb * 0.9478672986 + vec3( 0.0521327014 ), vec3( 2.4 ) ), value.rgb * 0.0773993808, vec3( lessThanEqual( value.rgb, vec3( 0.04045 ) ) ) ), value.a );
48: }
49: vec4 LinearTosRGB( in vec4 value ) {
50:     return vec4( mix( pow( value.rgb, vec3( 0.41666 ) ) * 1.055 - vec3( 0.055 ), value.rgb * 12.92, vec3( lessThanEqual( value.rgb, vec3( 0.0031308 ) ) ) ), value.a );
51: }
52: vec4 RGBEToLinear( in vec4 value ) {
53:     return vec4( value.rgb * exp2( value.a * 255.0 - 128.0 ), 1.0 );
54: }
55: vec4 LinearToRGBE( in vec4 value ) {
56:     float maxComponent = max( max( value.r, value.g ), value.b );
57:     float fExp = clamp( ceil( log2( maxComponent ) ), -128.0, 127.0 );
58:     return vec4( value.rgb / exp2( fExp ), ( fExp + 128.0 ) / 255.0 );
59: }
60: vec4 RGBMToLinear( in vec4 value, in float maxRange ) {
61:     return vec4( value.rgb * value.a * maxRange, 1.0 );
62: }
63: vec4 LinearToRGBM( in vec4 value, in float maxRange ) {
64:     float maxRGB = max( value.r, max( value.g, value.b ) );
65:     float M = clamp( maxRGB / maxRange, 0.0, 1.0 );
66:     M = ceil( M * 255.0 ) / 255.0;
67:     return vec4( value.rgb / ( M * maxRange ), M );
68: }
69: vec4 RGBDToLinear( in vec4 value, in float maxRange ) {
70:     return vec4( value.rgb * ( ( maxRange / 255.0 ) / value.a ), 1.0 );
71: }
72: vec4 LinearToRGBD( in vec4 value, in float maxRange ) {
73:     float maxRGB = max( value.r, max( value.g, value.b ) );
74:     float D = max( maxRange / maxRGB, 1.0 );
75:     D = min( floor( D ) / 255.0, 1.0 );
76:     return vec4( value.rgb * ( D * ( 255.0 / maxRange ) ), D );
77: }
78: const mat3 cLogLuvM = mat3( 0.2209, 0.3390, 0.4184, 0.1138, 0.6780, 0.7319, 0.0102, 0.1130, 0.2969 );
79: vec4 LinearToLogLuv( in vec4 value )  {
80:     vec3 Xp_Y_XYZp = cLogLuvM * value.rgb;
81:     Xp_Y_XYZp = max( Xp_Y_XYZp, vec3( 1e-6, 1e-6, 1e-6 ) );
82:     vec4 vResult;
83:     vResult.xy = Xp_Y_XYZp.xy / Xp_Y_XYZp.z;
84:     float Le = 2.0 * log2(Xp_Y_XYZp.y) + 127.0;
85:     vResult.w = fract( Le );
86:     vResult.z = ( Le - ( floor( vResult.w * 255.0 ) ) / 255.0 ) / 255.0;
87:     return vResult;
88: }
89: const mat3 cLogLuvInverseM = mat3( 6.0014, -2.7008, -1.7996, -1.3320, 3.1029, -5.7721, 0.3008, -1.0882, 5.6268 );
90: vec4 LogLuvToLinear( in vec4 value ) {
91:     float Le = value.z * 255.0 + value.w;
92:     vec3 Xp_Y_XYZp;
93:     Xp_Y_XYZp.y = exp2( ( Le - 127.0 ) / 2.0 );
94:     Xp_Y_XYZp.z = Xp_Y_XYZp.y / value.y;
95:     Xp_Y_XYZp.x = value.x * Xp_Y_XYZp.z;
96:     vec3 vRGB = cLogLuvInverseM * Xp_Y_XYZp.rgb;
97:     return vec4( max( vRGB, 0.0 ), 1.0 );
98: }
99: vec4 mapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
100: vec4 matcapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
101: vec4 envMapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
102: vec4 emissiveMapTexelToLinear( vec4 value ) { return LinearToLinear( value ); }
103: vec4 linearToOutputTexel( vec4 value ) { return LinearToLinear( value ); }
104: 
105: 
106: precision highp float;
107: const int dataTextureCountPerLayer = 1;
108: 
109: 
110:   uniform sampler2D layer_c1t2.nii_textures[dataTextureCountPerLayer];
111:   uniform float layer_c1t2.nii_data_texture_width;
112:   uniform sampler2D layer_c1t2.nii_lookup_texture;
113:   uniform float layer_c1t2.nii_maxZoomStep;
114:   uniform vec3 layer_c1t2.nii_color;
115:   uniform float layer_c1t2.nii_alpha;
116:   uniform float layer_c1t2.nii_min;
117:   uniform float layer_c1t2.nii_max;
118: 
119:   uniform sampler2D layer_c2t2.nii_textures[dataTextureCountPerLayer];
120:   uniform float layer_c2t2.nii_data_texture_width;
121:   uniform sampler2D layer_c2t2.nii_lookup_texture;
122:   uniform float layer_c2t2.nii_maxZoomStep;
123:   uniform vec3 layer_c2t2.nii_color;
124:   uniform float layer_c2t2.nii_alpha;
125:   uniform float layer_c2t2.nii_min;
126:   uniform float layer_c2t2.nii_max;
127: 
128:   uniform sampler2D layer_c3t2.nii_textures[dataTextureCountPerLayer];
129:   uniform float layer_c3t2.nii_data_texture_width;
130:   uniform sampler2D layer_c3t2.nii_lookup_texture;
131:   uniform float layer_c3t2.nii_maxZoomStep;
132:   uniform vec3 layer_c3t2.nii_color;
133:   uniform float layer_c3t2.nii_alpha;
134:   uniform float layer_c3t2.nii_min;
135:   uniform float layer_c3t2.nii_max;
136: 
137:   uniform sampler2D layer_color_textures[dataTextureCountPerLayer];
138:   uniform float layer_color_data_texture_width;
139:   uniform sampler2D layer_color_lookup_texture;
140:   uniform float layer_color_maxZoomStep;
141:   uniform vec3 layer_color_color;
142:   uniform float layer_color_alpha;
143:   uniform float layer_color_min;
144:   uniform float layer_color_max;
145: 
146:   uniform sampler2D layer_ix_AVGTmask.nii_textures[dataTextureCountPerLayer];
147:   uniform float layer_ix_AVGTmask.nii_data_texture_width;
148:   uniform sampler2D layer_ix_AVGTmask.nii_lookup_texture;
149:   uniform float layer_ix_AVGTmask.nii_maxZoomStep;
150:   uniform vec3 layer_ix_AVGTmask.nii_color;
151:   uniform float layer_ix_AVGTmask.nii_alpha;
152:   uniform float layer_ix_AVGTmask.nii_min;
153:   uniform float layer_ix_AVGTmask.nii_max;
154: 
155: 
156: 
157:   uniform vec4 activeCellId;
158:   uniform bool isMouseInActiveViewport;
159:   uniform float activeVolumeToolIndex;
160:   uniform sampler2D layer_segmentation_lookup_texture;
161:   uniform sampler2D layer_segmentation_textures[dataTextureCountPerLayer];
162:   uniform float layer_segmentation_data_texture_width;
163:   uniform float layer_segmentation_maxZoomStep;
164: 
165:   
166:     uniform bool isMappingEnabled;
167:     uniform float mappingSize;
168:     uniform bool hideUnmappedIds;
169:     uniform sampler2D layer_segmentation_mapping_texture;
170:     uniform sampler2D layer_segmentation_mapping_lookup_texture;
171:     uniform sampler2D layer_segmentation_mapping_color_texture;
172:   
173: 
174: 
175: uniform float sphericalCapRadius;
176: uniform float viewMode;
177: uniform float alpha;
178: uniform bool renderBucketIndices;
179: uniform bool highlightHoveredCellId;
180: uniform vec3 bboxMin;
181: uniform vec3 bboxMax;
182: uniform vec3 globalPosition;
183: uniform vec3 anchorPoint;
184: uniform float zoomStep;
185: uniform float zoomValue;
186: uniform vec3 uvw;
187: uniform bool useBilinearFiltering;
188: uniform vec3 globalMousePosition;
189: uniform bool isMouseInCanvas;
190: uniform float brushSizeInPixel;
191: uniform float planeID;
192: uniform vec3 addressSpaceDimensions;
193: uniform vec4 hoveredIsosurfaceId;
194: 
195: varying vec4 worldCoord;
196: varying vec4 modelCoord;
197: varying mat4 savedModelMatrix;
198: 
199: const float bucketWidth = 32.0;
200: const float bucketSize = 32768.0;
201: const float l_texture_width = 512.0;
202: 
203: // For some reason, taking the dataset scale from the uniform results is imprecise
204: // rendering of the brush circle (and issues in the arbitrary modes). That's why it
205: // is directly inserted into the source via templating.
206: const vec3 datasetScale = vec3(10.0, 10.0, 20.0);
207: 
208: const vec4 fallbackGray = vec4(0.5, 0.5, 0.5, 1.0);
209: 
210: 
211:     // https://github.com/glslify/glsl-inverse/blob/master/index.glsl
212:     mat4 inverse(mat4 m) {
213:       float
214:         a00 = m[0][0], a01 = m[0][1], a02 = m[0][2], a03 = m[0][3],
215:         a10 = m[1][0], a11 = m[1][1], a12 = m[1][2], a13 = m[1][3],
216:         a20 = m[2][0], a21 = m[2][1], a22 = m[2][2], a23 = m[2][3],
217:         a30 = m[3][0], a31 = m[3][1], a32 = m[3][2], a33 = m[3][3],
218: 
219:         b00 = a00 * a11 - a01 * a10,
220:         b01 = a00 * a12 - a02 * a10,
221:         b02 = a00 * a13 - a03 * a10,
222:         b03 = a01 * a12 - a02 * a11,
223:         b04 = a01 * a13 - a03 * a11,
224:         b05 = a02 * a13 - a03 * a12,
225:         b06 = a20 * a31 - a21 * a30,
226:         b07 = a20 * a32 - a22 * a30,
227:         b08 = a20 * a33 - a23 * a30,
228:         b09 = a21 * a32 - a22 * a31,
229:         b10 = a21 * a33 - a23 * a31,
230:         b11 = a22 * a33 - a23 * a32,
231: 
232:         det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
233: 
234:       return mat4(
235:         a11 * b11 - a12 * b10 + a13 * b09,
236:         a02 * b10 - a01 * b11 - a03 * b09,
237:         a31 * b05 - a32 * b04 + a33 * b03,
238:         a22 * b04 - a21 * b05 - a23 * b03,
239:         a12 * b08 - a10 * b11 - a13 * b07,
240:         a00 * b11 - a02 * b08 + a03 * b07,
241:         a32 * b02 - a30 * b05 - a33 * b01,
242:         a20 * b05 - a22 * b02 + a23 * b01,
243:         a10 * b10 - a11 * b08 + a13 * b06,
244:         a01 * b08 - a00 * b10 - a03 * b06,
245:         a30 * b04 - a31 * b02 + a33 * b00,
246:         a21 * b02 - a20 * b04 - a23 * b00,
247:         a11 * b07 - a10 * b09 - a12 * b06,
248:         a00 * b09 - a01 * b07 + a02 * b06,
249:         a31 * b01 - a30 * b03 - a32 * b00,
250:         a20 * b03 - a21 * b01 + a22 * b00) / det;
251:     }
252:   
253: 
254: 
255:     float div(float a, float b) {
256:       return floor(a / b);
257:     }
258: 
259:     vec3 div(vec3 a, float b) {
260:       return floor(a / b);
261:     }
262:   
263: 
264: 
265:     float round(float a) {
266:       return floor(a + 0.5);
267:     }
268: 
269:     vec3 round(vec3 a) {
270:       return floor(a + 0.5);
271:     }
272: 
273:     vec4 round(vec4 a) {
274:       return floor(a + 0.5);
275:     }
276: 
277: 
278: 
279:     bool isNan(float val) {
280:       // https://stackoverflow.com/questions/9446888/best-way-to-detect-nans-in-opengl-shaders
281:       return !(val < 0.0 || 0.0 < val || val == 0.0);
282:       // important: some nVidias failed to cope with version below.
283:       // Probably wrong optimization.
284:       /*return ( val <= 0.0 || 0.0 <= val ) ? false : true;*/
285:     }
286:   
287: 
288: 
289:     bool isFlightMode() {
290:       return viewMode == 1.0;
291:     }
292:   
293: 
294: 
295:     // Similar to the transDim function in dimensions.js, this function transposes dimensions for the current plane.
296:     vec3 transDim(vec3 array) {
297:       if (planeID == 0.0) {
298:         return array;
299:       } else if (planeID == 1.0) {
300:         return vec3(array.z, array.y, array.x); // [2, 1, 0]
301:       } else if (planeID == 2.0) {
302:         return vec3(array.x, array.z, array.y); // [0, 2, 1]
303:       } else {
304:         return vec3(0.0, 0.0, 0.0);
305:       }
306:     }
307:   
308: 
309: 
310:     vec3 getResolution(float zoomStep) {
311:       if (zoomStep == 0.0) {
312:         return vec3(1.0, 1.0, 1.0);
313:       } 
314:       else {
315:         return vec3(0.0, 0.0, 0.0);
316:       }
317:     }
318:   
319: 
320: 
321:     vec3 getRelativeCoords(vec3 worldCoordUVW, float usedZoomStep) {
322:       vec3 resolution = getResolution(usedZoomStep);
323:       vec3 resolutionUVW = transDim(resolution);
324: 
325:       vec3 anchorPointUVW = transDim(anchorPoint);
326:       vec3 anchorPointAsGlobalPositionUVW =
327:         anchorPointUVW * resolutionUVW * bucketWidth;
328:       vec3 relativeCoords = (worldCoordUVW - anchorPointAsGlobalPositionUVW) / resolutionUVW;
329: 
330:       vec3 coords = transDim(relativeCoords);
331:       return coords;
332:     }
333:   
334: 
335: 
336:     float getW(vec3 vector) {
337:       if (planeID == 0.0) {
338:         return vector[2];
339:       } else if (planeID == 1.0) {
340:         return vector[0];
341:       } else if (planeID == 2.0) {
342:         return vector[1];
343:       }
344:       return 0.0;
345:     }
346:   
347: 
348: 
349:     vec3 getWorldCoordUVW() {
350:       vec3 worldCoordUVW = transDim(worldCoord.xyz);
351: 
352:       if (isFlightMode()) {
353:         vec4 modelCoords = inverse(savedModelMatrix) * worldCoord;
354:         float sphericalRadius = sphericalCapRadius;
355: 
356:         vec4 centerVertex = vec4(0.0, 0.0, -sphericalRadius, 0.0);
357:         modelCoords.z = 0.0;
358:         modelCoords += centerVertex;
359:         modelCoords.xyz = modelCoords.xyz * (sphericalRadius / length(modelCoords.xyz));
360:         modelCoords -= centerVertex;
361: 
362:         worldCoordUVW = (savedModelMatrix * modelCoords).xyz;
363:       }
364: 
365:       vec3 datasetScaleUVW = transDim(datasetScale);
366: 
367:       worldCoordUVW = vec3(
368:         // For u and w we need to divide by datasetScale because the threejs scene is scaled
369:         worldCoordUVW.x / datasetScaleUVW.x,
370:         worldCoordUVW.y / datasetScaleUVW.y,
371: 
372:         // In orthogonal mode, the planes are offset in 3D space to allow skeletons to be rendered before
373:         // each plane. Since w (e.g., z for xy plane) is
374:         // the same for all texels computed in this shader, we simply use globalPosition[w] instead
375:         
376:           getW(globalPosition)
377:         
378:       );
379: 
380:       return worldCoordUVW;
381:     }
382:   
383: 
384: 
385:     bool isOutsideOfBoundingBox(vec3 worldCoordUVW) {
386:       vec3 worldCoord = transDim(worldCoordUVW);
387:       return (
388:         worldCoord.x < bboxMin.x || worldCoord.y < bboxMin.y || worldCoord.z < bboxMin.z ||
389:         worldCoord.x > bboxMax.x || worldCoord.y > bboxMax.y || worldCoord.z > bboxMax.z
390:       );
391:     }
392:   
393: 
394: 
395:     // E.g., the vector [9, 5, 2]  will be linearized to the scalar index 900 + 50 + 2, when base == 10
396:     float linearizeVec3ToIndex(vec3 position, float base) {
397:       return position.z * base * base + position.y * base + position.x;
398:     }
399: 
400:     float linearizeVec3ToIndex(vec3 position, vec3 base) {
401:       return position.z * base.x * base.y + position.y * base.x + position.x;
402:     }
403: 
404: 
405: 
406:     // Same as linearizeVec3ToIndex. However, a mod parameter m can be passed when the final index
407:     // is going to be modded, anyway. This circumvents floating overflows by modding the intermediary results.
408:     float linearizeVec3ToIndexWithMod(vec3 position, float base, float m) {
409:       return mod(mod(position.z * base * base, m) + mod(position.y * base, m) + position.x, m);
410:     }
411: 
412: 
413: 
414:     vec4 getRgbaAtIndex(sampler2D texture, float textureWidth, float idx) {
415:       float finalPosX = mod(idx, textureWidth);
416:       float finalPosY = div(idx, textureWidth);
417: 
418:       return texture2D(
419:           texture,
420:           vec2(
421:             (floor(finalPosX) + 0.5) / textureWidth,
422:             (floor(finalPosY) + 0.5) / textureWidth
423:           )
424:         ).rgba;
425:     }
426:   
427: 
428: 
429:     vec4 getRgbaAtXYIndex(sampler2D texture, float textureWidth, float x, float y) {
430:       return texture2D(
431:           texture,
432:           vec2(
433:             (floor(x) + 0.5) / textureWidth,
434:             (floor(y) + 0.5) / textureWidth
435:           )
436:         ).rgba;
437:     }
438: 
439:     // Define this function for each segmentation and color layer, since iOS cannot handle
440:     // sampler2D textures[dataTextureCountPerLayer]
441:     // as a function parameter properly
442: 
443:     
444:       vec4 getRgbaAtXYIndex_layer_c1t2.nii(float textureIdx, float textureWidth, float x, float y) {
445:         vec2 accessPoint = (floor(vec2(x, y)) + 0.5) / textureWidth;
446: 
447:         // Since WebGL 1 doesnt allow dynamic texture indexing, we use an exhaustive if-else-construct
448:         // here which checks for each case individually. The else-if-branches are constructed via
449:         // lodash templates.
450: 
451:         
452:             // Don't use if-else when there is only one data texture anyway
453:             return texture2D(layer_c1t2.nii_textures[0], accessPoint).rgba;
454:         
455:       }
456:     
457:       vec4 getRgbaAtXYIndex_layer_c2t2.nii(float textureIdx, float textureWidth, float x, float y) {
458:         vec2 accessPoint = (floor(vec2(x, y)) + 0.5) / textureWidth;
459: 
460:         // Since WebGL 1 doesnt allow dynamic texture indexing, we use an exhaustive if-else-construct
461:         // here which checks for each case individually. The else-if-branches are constructed via
462:         // lodash templates.
463: 
464:         
465:             // Don't use if-else when there is only one data texture anyway
466:             return texture2D(layer_c2t2.nii_textures[0], accessPoint).rgba;
467:         
468:       }
469:
470:       vec4 getRgbaAtXYIndex_layer_c3t2.nii(float textureIdx, float textureWidth, float x, float y) {
471:         vec2 accessPoint = (floor(vec2(x, y)) + 0.5) / textureWidth;
472: 
473:         // Since WebGL 1 doesnt allow dynamic texture indexing, we use an exhaustive if-else-construct
474:         // here which checks for each case individually. The else-if-branches are constructed via
475:         // lodash templates.
476: 
477:         
478:             // Don't use if-else when there is only one data texture anyway
479:             return texture2D(layer_c3t2.nii_textures[0], accessPoint).rgba;
480:         
481:       }
482:     
483:       vec4 getRgbaAtXYIndex_layer_color(float textureIdx, float textureWidth, float x, float y) {
484:         vec2 accessPoint = (floor(vec2(x, y)) + 0.5) / textureWidth;
485: 
486:         // Since WebGL 1 doesnt allow dynamic texture indexing, we use an exhaustive if-else-construct
487:         // here which checks for each case individually. The else-if-branches are constructed via
488:         // lodash templates.
489: 
490:         
491:             // Don't use if-else when there is only one data texture anyway
492:             return texture2D(layer_color_textures[0], accessPoint).rgba;
493:         
494:       }
495:     
496:       vec4 getRgbaAtXYIndex_layer_ix_AVGTmask.nii(float textureIdx, float textureWidth, float x, float y) {
497:         vec2 accessPoint = (floor(vec2(x, y)) + 0.5) / textureWidth;
498: 
499:         // Since WebGL 1 doesnt allow dynamic texture indexing, we use an exhaustive if-else-construct
500:         // here which checks for each case individually. The else-if-branches are constructed via
501:         // lodash templates.
502: 
503:         
504:             // Don't use if-else when there is only one data texture anyway
505:             return texture2D(layer_ix_AVGTmask.nii_textures[0], accessPoint).rgba;
506:         
507:       }
508:     
509:       vec4 getRgbaAtXYIndex_layer_segmentation(float textureIdx, float textureWidth, float x, float y) {
510:         vec2 accessPoint = (floor(vec2(x, y)) + 0.5) / textureWidth;
511: 
512:         // Since WebGL 1 doesnt allow dynamic texture indexing, we use an exhaustive if-else-construct
513:         // here which checks for each case individually. The else-if-branches are constructed via
514:         // lodash templates.
515: 
516:         
517:             // Don't use if-else when there is only one data texture anyway
518:             return texture2D(layer_segmentation_textures[0], accessPoint).rgba;
519:         
520:       }
521:     
522: 
523:     vec4 getRgbaAtXYIndex(float layerIndex, float textureIdx, float textureWidth, float x, float y) {
524:       if (layerIndex == 0.0) {
525:         return getRgbaAtXYIndex_layer_c1t2.nii(textureIdx, textureWidth, x, y);
526:       } 
527:         else if (layerIndex == 1.0) {
528:           return getRgbaAtXYIndex_layer_c2t2.nii(textureIdx, textureWidth, x, y);
529:         }
530:       
531:         else if (layerIndex == 2.0) {
532:           return getRgbaAtXYIndex_layer_c3t2.nii(textureIdx, textureWidth, x, y);
533:         }
534:       
535:         else if (layerIndex == 3.0) {
536:           return getRgbaAtXYIndex_layer_color(textureIdx, textureWidth, x, y);
537:         }
538:       
539:         else if (layerIndex == 4.0) {
540:           return getRgbaAtXYIndex_layer_ix_AVGTmask.nii(textureIdx, textureWidth, x, y);
541:         }
542:       
543:         else if (layerIndex == 5.0) {
544:           return getRgbaAtXYIndex_layer_segmentation(textureIdx, textureWidth, x, y);
545:         }
546:       
547:       return vec4(0.0);
548:     }
549:   
550: 
551: 
552:     vec3 getResolutionFactors(float zoomStepA, float zoomStepB) {
553:       return getResolution(zoomStepA) / getResolution(zoomStepB);
554:     }
555:   
556: 
557: 
558:     vec4 getColorForCoords(
559:       sampler2D lookUpTexture,
560:       float layerIndex,
561:       float d_texture_width,
562:       float packingDegree,
563:       vec3 worldPositionUVW
564:     ) {
565:       vec3 coords = floor(getRelativeCoords(worldPositionUVW, zoomStep));
566:       vec3 relativeBucketPosition = div(coords, bucketWidth);
567:       vec3 offsetInBucket = mod(coords, bucketWidth);
568: 
569:       if (relativeBucketPosition.x > addressSpaceDimensions.x ||
570:           relativeBucketPosition.y > addressSpaceDimensions.y ||
571:           relativeBucketPosition.z > addressSpaceDimensions.z ||
572:           relativeBucketPosition.x < 0.0 ||
573:           relativeBucketPosition.y < 0.0 ||
574:           relativeBucketPosition.z < 0.0) {
575:         // In theory, the current magnification should always be selected
576:         // so that we won't have to address data outside of the addresSpaceDimensions.
577:         // Nevertheless, we explicitly guard against this situation here to avoid
578:         // rendering wrong data.
579:         return vec4(1.0, 1.0, 0.0, 1.0);
580:       }
581: 
582:       float bucketIdx = linearizeVec3ToIndex(relativeBucketPosition, addressSpaceDimensions);
583: 
584:       vec2 bucketAddressWithZoomStep = getRgbaAtIndex(
585:         lookUpTexture,
586:         l_texture_width,
587:         bucketIdx
588:       ).ra;
589: 
590:       float bucketAddress = bucketAddressWithZoomStep.x;
591:       float renderedZoomStep = bucketAddressWithZoomStep.y;
592: 
593:       if (bucketAddress == -2.0) {
594:         // The bucket is out of bounds. Render black
595:         // In flight mode, it can happen that buckets were not passed to the GPU
596:         // since the approximate implementation of the bucket picker missed the bucket.
597:         // We simply handle this case as if the bucket was not yet loaded which means
598:         // that fallback data is loaded.
599:         // The downside is that data which does not exist, will be rendered gray instead of black.
600:         // Issue to track progress: #3446
601:         float alpha = isFlightMode() ? -1.0 : 0.0;
602:         return vec4(0.0, 0.0, 0.0, alpha);
603:       }
604: 
605:       if (bucketAddress < 0. ||
606:           isNan(bucketAddress)) {
607:         // Not-yet-existing data is encoded with a = -1.0
608:         return vec4(0.0, 0.0, 0.0, -1.0);
609:       }
610: 
611:       if (renderedZoomStep != zoomStep) {
612:         /* We already know which fallback bucket we have to look into. However,
613:          * for 8 mag-1 buckets, there is usually one fallback bucket in mag-2.
614:          * Therefore, depending on the actual mag-1 bucket, we have to look into
615:          * different sub-volumes of the one fallback bucket. This is calculated as
616:          * the subVolumeIndex.
617:          * Then, we adapt the look up position *within* the bucket.
618:          *
619:          * Example Scenario (let's consider only the x axis):
620:          * If we are in the [4, _, _, 0]-bucket, we have to look into the **first** half
621:          * of the [2, _, _, 1]-bucket.
622:          * If we are in the [5, _, _, 0]-bucket, we have to look into the **second** half
623:          * of the [2, _, _, 1]-bucket.
624:          * We can determine which "half" (subVolumeIndex) is relevant by doing a modulo operation
625:          * with the resolution factor. A typical resolution factor is 2.
626:          */
627: 
628:         vec3 magnificationFactors = getResolutionFactors(renderedZoomStep, zoomStep);
629:         vec3 worldBucketPosition = relativeBucketPosition + anchorPoint;
630:         vec3 subVolumeIndex = mod(worldBucketPosition, magnificationFactors);
631:         offsetInBucket = floor(
632:           (offsetInBucket + vec3(bucketWidth) * subVolumeIndex)
633:           / magnificationFactors
634:         );
635:       }
636: 
637:       // bucketAddress can span multiple data textures. If the address is higher
638:       // than the capacity of one texture, we mod the value and use the div (floored division) as the
639:       // texture index
640:       float packedBucketSize = bucketSize / packingDegree;
641:       float bucketCapacityPerTexture = d_texture_width * d_texture_width / packedBucketSize;
642:       float textureIndex = floor(bucketAddress / bucketCapacityPerTexture);
643:       bucketAddress = mod(bucketAddress, bucketCapacityPerTexture);
644: 
645:       float x =
646:         // Mod while linearizing to avoid imprecisions for large numbers
647:         linearizeVec3ToIndexWithMod(offsetInBucket / packingDegree, bucketWidth, d_texture_width);
648: 
649:       float pixelIdxInBucket =
650:         // Don't mod since we have to calculate pixelIdxInBucket / d_texture_width
651:         linearizeVec3ToIndex(offsetInBucket / packingDegree, bucketWidth);
652:       float y =
653:         div(pixelIdxInBucket, d_texture_width) +
654:         div(packedBucketSize * bucketAddress, d_texture_width);
655: 
656:       vec4 bucketColor = getRgbaAtXYIndex(
657:         layerIndex,
658:         textureIndex,
659:         d_texture_width,
660:         x,
661:         y
662:       );
663: 
664:       if (packingDegree == 1.0) {
665:         return bucketColor;
666:       }
667: 
668:       float rgbaIndex = linearizeVec3ToIndexWithMod(offsetInBucket, bucketWidth, packingDegree);
669: 
670:       if (packingDegree == 2.0) {
671:         // It's essentially irrelevant what we return as the 3rd and 4th value here as we only have 2 byte of information.
672:         // The caller needs to unpack this vec4 according to the packingDegree, see getSegmentationId for an example.
673:         // The same goes for the following code where the packingDegree is 4 and we only have 1 byte of information.
674:         if (rgbaIndex == 0.0) {
675:           return vec4(bucketColor.r, bucketColor.g, bucketColor.r, bucketColor.g);
676:         } else if (rgbaIndex == 1.0) {
677:           return vec4(bucketColor.b, bucketColor.a, bucketColor.b, bucketColor.a);
678:         }
679:       }
680: 
681:       // The following code deals with packingDegree == 4.0
682:       if (rgbaIndex == 0.0) {
683:         return vec4(bucketColor.r);
684:       } else if (rgbaIndex == 1.0) {
685:         return vec4(bucketColor.g);
686:       } else if (rgbaIndex == 2.0) {
687:         return vec4(bucketColor.b);
688:       } else if (rgbaIndex == 3.0) {
689:         return vec4(bucketColor.a);
690:       }
691: 
692:       return vec4(0.0);
693:     }
694:   
695: 
696: 
697:     vec4 getBilinearColorFor(
698:       sampler2D lookUpTexture,
699:       float layerIndex,
700:       float d_texture_width,
701:       float packingDegree,
702:       vec3 coordsUVW
703:     ) {
704:       coordsUVW = coordsUVW + vec3(-0.5, -0.5, 0.0);
705:       vec2 bifilteringParams = (coordsUVW - floor(coordsUVW)).xy;
706:       coordsUVW = floor(coordsUVW);
707: 
708:       vec4 a = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW);
709:       vec4 b = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(1, 0, 0));
710:       vec4 c = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(0, 1, 0));
711:       vec4 d = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(1, 1, 0));
712:       if (a.a < 0.0 || b.a < 0.0 || c.a < 0.0 || d.a < 0.0) {
713:         // We need to check all four colors for a negative parts, because there will be black
714:         // lines at the borders otherwise (black gets mixed with data)
715:         return vec4(0.0, 0.0, 0.0, -1.0);
716:       }
717: 
718:       vec4 ab = mix(a, b, bifilteringParams.x);
719:       vec4 cd = mix(c, d, bifilteringParams.x);
720: 
721:       return mix(ab, cd, bifilteringParams.y);
722:     }
723:   
724: 
725: 
726:     vec4 getTrilinearColorFor(
727:       sampler2D lookUpTexture,
728:       float layerIndex,
729:       float d_texture_width,
730:       float packingDegree,
731:       vec3 coordsUVW
732:     ) {
733:       coordsUVW = coordsUVW + vec3(-0.5, -0.5, 0.0);
734:       vec3 bifilteringParams = (coordsUVW - floor(coordsUVW)).xyz;
735:       coordsUVW = floor(coordsUVW);
736: 
737:       vec4 a = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW);
738:       vec4 b = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(1, 0, 0));
739:       vec4 c = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(0, 1, 0));
740:       vec4 d = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(1, 1, 0));
741: 
742:       vec4 a2 = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(0, 0, 1));
743:       vec4 b2 = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(1, 0, 1));
744:       vec4 c2 = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(0, 1, 1));
745:       vec4 d2 = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, coordsUVW + vec3(1, 1, 1));
746: 
747:       if (a.a < 0.0 || b.a < 0.0 || c.a < 0.0 || d.a < 0.0 ||
748:         a2.a < 0.0 || b2.a < 0.0 || c2.a < 0.0 || d2.a < 0.0) {
749:         // We need to check all four colors for a negative parts, because there will be black
750:         // lines at the borders otherwise (black gets mixed with data)
751:         return vec4(0.0, 0.0, 0.0, -1.0);
752:       }
753: 
754:       vec4 ab = mix(a, b, bifilteringParams.x);
755:       vec4 cd = mix(c, d, bifilteringParams.x);
756:       vec4 abcd = mix(ab, cd, bifilteringParams.y);
757: 
758:       vec4 ab2 = mix(a2, b2, bifilteringParams.x);
759:       vec4 cd2 = mix(c2, d2, bifilteringParams.x);
760: 
761:       vec4 abcd2 = mix(ab2, cd2, bifilteringParams.y);
762: 
763:       return mix(abcd, abcd2, bifilteringParams.z);
764:     }
765:   
766: 
767:
768:     vec4 getMaybeFilteredColor(
769:       sampler2D lookUpTexture,
770:       float layerIndex,
771:       float d_texture_width,
772:       float packingDegree,
773:       vec3 worldPositionUVW,
774:       bool suppressBilinearFiltering
775:     ) {
776:       vec4 color;
777:       if (!suppressBilinearFiltering && useBilinearFiltering) {
778:         
779:           color = getBilinearColorFor(lookUpTexture, layerIndex, d_texture_width, packingDegree, worldPositionUVW);
780:         
781:       } else {
782:         color = getColorForCoords(lookUpTexture, layerIndex, d_texture_width, packingDegree, worldPositionUVW);
783:       }
784:       return color;
785:     }
786:   
787: 
788: 
789:     vec4 getMaybeFilteredColorOrFallback(
790:       sampler2D lookUpTexture,
791:       float layerIndex,
792:       float d_texture_width,
793:       float packingDegree,
794:       vec3 worldPositionUVW,
795:       bool suppressBilinearFiltering,
796:       vec4 fallbackColor
797:     ) {
798:       vec4 color = getMaybeFilteredColor(lookUpTexture, layerIndex, d_texture_width, packingDegree, worldPositionUVW, suppressBilinearFiltering);
799: 
800:       if (color.a < 0.0) {
801:         // Render gray for not-yet-existing data
802:         color = fallbackColor;
803:       }
804: 
805:       return color;
806:     }
807:   
808: 
809: 
810:     /* Inspired from: http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl */
811:     vec3 hsvToRgb(vec4 HSV)
812:     {
813:       vec4 K;
814:       vec3 p;
815:       K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
816:       p = abs(fract(HSV.xxx + K.xyz) * 6.0 - K.www);
817:       return HSV.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), HSV.y);
818:     }
819:   
820: 
821: 
822:     vec3 convertCellIdToRGB(vec4 id) {
823:       float golden_ratio = 0.618033988749895;
824:       float lastEightBits = id.r;
825:       float value = mod( lastEightBits * golden_ratio, 1.0);
826: 
827:       
828:         // If the first element of the mapping colors texture is still the initialized
829:         // value of -1, no mapping colors have been specified
830:         bool hasCustomMappingColors = getRgbaAtIndex(
831:           layer_segmentation_mapping_color_texture,
832:           16.0,
833:           0.0
834:         ).r != -1.0;
835:         if (isMappingEnabled && hasCustomMappingColors) {
836:           value = getRgbaAtIndex(
837:             layer_segmentation_mapping_color_texture,
838:             16.0,
839:             lastEightBits
840:           ).r;
841:         }
842:       
843: 
844:       vec4 HSV = vec4( value, 1.0, 1.0, 1.0 );
845:       return hsvToRgb(HSV);
846:     }
847:   
848: 
849: 
850:     vec4 getBrushOverlay(vec3 worldCoordUVW) {
851:       vec4 brushOverlayColor = vec4(0.0);
852:       bool isBrushModeActive = activeVolumeToolIndex == 2.0;
853: 
854:       if (!isMouseInCanvas || !isMouseInActiveViewport || !isBrushModeActive) {
855:         return brushOverlayColor;
856:       }
857:       vec3 flooredMousePos = floor(globalMousePosition);
858:       float baseVoxelSize = min(min(datasetScale.x, datasetScale.y), datasetScale.z);
859:       vec3 datasetScaleUVW = transDim(datasetScale) / baseVoxelSize;
860: 
861:       float dist = length((floor(worldCoordUVW.xy) - transDim(flooredMousePos).xy) * datasetScaleUVW.xy);
862: 
863:       float radius = round(brushSizeInPixel / 2.0);
864:       if (radius > dist) {
865:         brushOverlayColor = vec4(vec3(1.0), 0.5);
866:       }
867: 
868:       return brushOverlayColor;
869:     }
870:   
871: 
872: 
873:     bool greaterThanVec4(vec4 x, vec4 y) {
874:       if (x.a > y.a) return true;
875:       if (x.a < y.a) return false;
876:       if (x.b > y.b) return true;
877:       if (x.b < y.b) return false;
878:       if (x.g > y.g) return true;
879:       if (x.g < y.g) return false;
880:       if (x.r > y.r) return true;
881:       else return false;
882:     }
883:   
884: 
885: 
886:     float binarySearchIndex(sampler2D texture, float maxIndex, vec4 value) {
887:       float low = 0.0;
888:       float high = maxIndex - 1.0;
889:       // maxIndex is at most MAPPING_TEXTURE_WIDTH**2, requiring a maximum of log2(MAPPING_TEXTURE_WIDTH**2)+1 loop passes
890:       for (float i = 0.0; i < 25.0; i++) {
891:         float mid = floor((low + high) / 2.0);
892:         vec4 cur = getRgbaAtIndex(texture, 4096.0, mid);
893:         if (cur == value) {
894:           return mid;
895:         } else if (greaterThanVec4(cur, value)) {
896:           high = mid - 1.0;
897:         } else {
898:           low = mid + 1.0;
899:         }
900:       }
901:       return -1.0;
902:     }
903:   
904: 
905: 
906:     vec4 getSegmentationId(vec3 worldPositionUVW) {
907:       vec4 volume_color =
908:         getMaybeFilteredColorOrFallback(
909:           layer_segmentation_lookup_texture,
910:           5.0,
911:           layer_segmentation_data_texture_width,
912:           1.0,
913:           worldPositionUVW,
914:           true, // Don't use bilinear filtering for volume data
915:           vec4(0.0, 0.0, 0.0, 0.0)
916:         );
917: 
918: 
919:       
920:         if (isMappingEnabled) {
921:           // Depending on the packing degree, the returned volume color contains extra values
922:           // which would make the binary search fail
923: 
924:           
925: 
926:           float index = binarySearchIndex(
927:             layer_segmentation_mapping_lookup_texture,
928:             mappingSize,
929:             volume_color
930:           );
931:           if (index != -1.0) {
932:             volume_color = getRgbaAtIndex(
933:               layer_segmentation_mapping_texture,
934:               4096.0,
935:               index
936:             );
937:           } else if (hideUnmappedIds) {
938:             volume_color = vec4(0.0);
939:           }
940:         }
941:       
942: 
943:       return volume_color * 255.0;
944:     }
945:   
946: 
947: void main() {
948:   vec3 worldCoordUVW = getWorldCoordUVW();
949:   if (isOutsideOfBoundingBox(worldCoordUVW)) {
950:     gl_FragColor = vec4(0.0);
951:     return;
952:   }
953:   vec3 relativeCoords = getRelativeCoords(worldCoordUVW, zoomStep);
954: 
955:   vec3 bucketPosition = div(floor(relativeCoords), bucketWidth);
956:   if (renderBucketIndices) {
957:     gl_FragColor = vec4(bucketPosition, zoomStep) / 255.;
958:     return;
959:   }
960: 
961:   
962:     vec4 id = getSegmentationId(worldCoordUVW);
963: 
964:     vec3 flooredMousePosUVW = transDim(floor(globalMousePosition));
965: 
966:     // When hovering an isosurface in the 3D viewport, the hoveredIsosurfaceId contains
967:     // the hovered cell id. Otherwise, we use the mouse position to look up the active cell id.
968:     // Passing the mouse position from the 3D viewport is not an option here, since that position
969:     // isn't on the orthogonal planes necessarily.
970:     vec4 cellIdUnderMouse = length(hoveredIsosurfaceId) > 0.1 ? hoveredIsosurfaceId : getSegmentationId(flooredMousePosUVW);
971:   
972: 
973:   // Get Color Value(s)
974:   vec3 data_color = vec3(0.0);
975:   vec3 color_value  = vec3(0.0);
976:   
977: 
978:     // Get grayscale value for layer_c1t2.nii
979:     color_value =
980:       getMaybeFilteredColorOrFallback(
981:         layer_c1t2.nii_lookup_texture,
982:         0.0,
983:         layer_c1t2.nii_data_texture_width,
984:         4.0,
985:         worldCoordUVW,
986:         false,
987:         fallbackGray
988:       ).xyz;
989: 
990:     
991:     // Keep the color in bounds of min and max
992:     color_value = clamp(color_value, layer_c1t2.nii_min, layer_c1t2.nii_max);
993:     // Scale the color value according to the histogram settings
994:     color_value = (color_value - layer_c1t2.nii_min) / (layer_c1t2.nii_max - layer_c1t2.nii_min);
995: 
996:     // Multiply with color and alpha for layer_c1t2.nii
997:     data_color += color_value * layer_c1t2.nii_alpha * layer_c1t2.nii_color;
998:   
999: 
1000:     // Get grayscale value for layer_c2t2.nii
1001:     color_value =
1002:       getMaybeFilteredColorOrFallback(
1003:         layer_c2t2.nii_lookup_texture,
1004:         1.0,
1005:         layer_c2t2.nii_data_texture_width,
1006:         4.0,
1007:         worldCoordUVW,
1008:         false,
1009:         fallbackGray
1010:       ).xyz;
1011: 
1012:     
1013:     // Keep the color in bounds of min and max
1014:     color_value = clamp(color_value, layer_c2t2.nii_min, layer_c2t2.nii_max);
1015:     // Scale the color value according to the histogram settings
1016:     color_value = (color_value - layer_c2t2.nii_min) / (layer_c2t2.nii_max - layer_c2t2.nii_min);
1017: 
1018:     // Multiply with color and alpha for layer_c2t2.nii
1019:     data_color += color_value * layer_c2t2.nii_alpha * layer_c2t2.nii_color;
1020:   
1021: 
1022:     // Get grayscale value for layer_c3t2.nii
1023:     color_value =
1024:       getMaybeFilteredColorOrFallback(
1025:         layer_c3t2.nii_lookup_texture,
1026:         2.0,
1027:         layer_c3t2.nii_data_texture_width,
1028:         4.0,
1029:         worldCoordUVW,
1030:         false,
1031:         fallbackGray
1032:       ).xyz;
1033: 
1034:     
1035:     // Keep the color in bounds of min and max
1036:     color_value = clamp(color_value, layer_c3t2.nii_min, layer_c3t2.nii_max);
1037:     // Scale the color value according to the histogram settings
1038:     color_value = (color_value - layer_c3t2.nii_min) / (layer_c3t2.nii_max - layer_c3t2.nii_min);
1039: 
1040:     // Multiply with color and alpha for layer_c3t2.nii
1041:     data_color += color_value * layer_c3t2.nii_alpha * layer_c3t2.nii_color;
1042:   
1043: 
1044:     // Get grayscale value for layer_color
1045:     color_value =
1046:       getMaybeFilteredColorOrFallback(
1047:         layer_color_lookup_texture,
1048:         3.0,
1049:         layer_color_data_texture_width,
1050:         4.0,
1051:         worldCoordUVW,
1052:         false,
1053:         fallbackGray
1054:       ).xyz;
1055: 
1056:     
1057:     // Keep the color in bounds of min and max
1058:     color_value = clamp(color_value, layer_color_min, layer_color_max);
1059:     // Scale the color value according to the histogram settings
1060:     color_value = (color_value - layer_color_min) / (layer_color_max - layer_color_min);
1061: 
1062:     // Multiply with color and alpha for layer_color
1063:     data_color += color_value * layer_color_alpha * layer_color_color;
1064:   
1065: 
    // Get grayscale value for layer_ix_AVGTmask.nii
1067:     color_value =
1068:       getMaybeFilteredColorOrFallback(
1069:         layer_ix_AVGTmask.nii_lookup_texture,
1070:         4.0,
1071:         layer_ix_AVGTmask.nii_data_texture_width,
1072:         4.0,
1073:         worldCoordUVW,
1074:         false,
1075:         fallbackGray
1076:       ).xyz;
1077: 
1078:     
1079:     // Keep the color in bounds of min and max
1080:     color_value = clamp(color_value, layer_ix_AVGTmask.nii_min, layer_ix_AVGTmask.nii_max);
1081:     // Scale the color value according to the histogram settings
1082:     color_value = (color_value - layer_ix_AVGTmask.nii_min) / (layer_ix_AVGTmask.nii_max - layer_ix_AVGTmask.nii_min);
1083: 
1084:     // Multiply with color and alpha for layer_ix_AVGTmask.nii
1085:     data_color += color_value * layer_ix_AVGTmask.nii_alpha * layer_ix_AVGTmask.nii_color;
1086:   
1087:   data_color = clamp(data_color, 0.0, 1.0);
1088: 
1089:   gl_FragColor = vec4(data_color, 1.0);
1090: 
1091:   
1092:     // Color map (<= to fight rounding mistakes)
1093:     if ( length(id) > 0.1 ) {
1094:       // Increase cell opacity when cell is hovered
1095:       float hoverAlphaIncrement =
1096:         // Hover cell only if it's the active one, if the feature is enabled
1097:         // and if segmentation opacity is not zero
1098:         cellIdUnderMouse == id && highlightHoveredCellId && alpha > 0.0
1099:           ? 0.2 : 0.0;
1100:       gl_FragColor = vec4(mix(data_color, convertCellIdToRGB(id), alpha + hoverAlphaIncrement ), 1.0);
1101:     }
1102: 
1103:     vec4 brushOverlayColor = getBrushOverlay(worldCoordUVW);
1104:     brushOverlayColor.xyz = convertCellIdToRGB(activeCellId);
1105:     gl_FragColor = mix(gl_FragColor, brushOverlayColor, brushOverlayColor.a);
1106:   
1107: }
leowe commented 4 years ago

When editing the datasource_properties to only show two layers, the error goes away

daniel-wer commented 4 years ago

Could be another occurence of https://github.com/scalableminds/webknossos/issues/4099, it looks like the . in your layer names is problematic.

leowe commented 4 years ago

yes, this was the problem.