Closed SiddhantBahuguna closed 2 years ago
Alright, I was able to reproduce the error as well with this image
which produces this error:
×
Unhandled Rejection (Error): Failed to compile fragment shader.
Module.createFragmentShader
src/webgl_util.ts:103
100 | if (gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS) === false) {
101 | logShaderSourceAndInfoLog(
102 | fragmentShaderSource, gl.getShaderInfoLog(fragmentShader));
> 103 | throw new Error('Failed to compile fragment shader.');
104 | }
105 | return fragmentShader;
106 | }
View compiled
GPGPUContext.createProgram
src/gpgpu_context.ts:281
278 | public createProgram(fragmentShaderSource: string): WebGLProgram {
279 | this.throwIfDisposed();
280 | const gl = this.gl;
> 281 | const fragmentShader: WebGLShader =
| ^ 282 | webgl_util.createFragmentShader(gl, fragmentShaderSource);
283 | if (this.vertexShader == null) {
284 | this.vertexShader = gpgpu_util.createVertexShader(gl);
View compiled
Module.compileProgram
src/gpgpu_math.ts:100
97 | };
98 | const source = shader_compiler.makeShader(inputInfos, outShapeInfo, program);
99 |
> 100 | const webGLProgram = gpgpu.createProgram(source);
| ^ 101 |
102 | // Add special uniforms (NAN, INFINITY)
103 | let infLoc: WebGLUniformLocation = null;
View compiled
(anonymous function)
src/backend_webgl.ts:860
857 | TensorData = {shape: output.shape, texData: outData, isUniform: false};
858 | const key = gpgpu_math.makeShaderKey(program, inputsData, outputData);
859 | const binary = this.getAndSaveBinary(key, () => {
> 860 | return gpgpu_math.compileProgram(
| ^ 861 | this.gpgpu, program, inputsData, outputData);
862 | });
863 | const shouldTimeProgram = this.activeTimers != null;
View compiled
MathBackendWebGL.getAndSaveBinary
src/backend_webgl.ts:913
910 | private getAndSaveBinary(key: string, getBinary: () => GPGPUBinary):
911 | GPGPUBinary {
912 | if (!(key in this.binaryCache)) {
> 913 | this.binaryCache[key] = getBinary();
| ^ 914 | }
915 | return this.binaryCache[key];
916 | }
View compiled
MathBackendWebGL.runWebGLProgram
src/backend_webgl.ts:859
856 | const outputData:
857 | TensorData = {shape: output.shape, texData: outData, isUniform: false};
858 | const key = gpgpu_math.makeShaderKey(program, inputsData, outputData);
> 859 | const binary = this.getAndSaveBinary(key, () => {
| ^ 860 | return gpgpu_math.compileProgram(
861 | this.gpgpu, program, inputsData, outputData);
862 | });
View compiled
conv2dWithIm2Row
src/kernels/Conv2D_impl.ts:237
234 | [convInfo.dilationHeight, convInfo.dilationWidth], [convInfo.inChannels],
235 | [convInfo.filterWidth * convInfo.inChannels], [convInfo.outWidth]
236 | ];
> 237 | const im2Col = backend.runWebGLProgram(
| ^ 238 | im2ColProgram, [xSqueezed], 'float32', customValues);
239 | const im2ColReshaped = reshape({
240 | inputs: {x: im2Col},
View compiled
fusedConv2d [as kernelFunc]
src/kernels/FusedConv2D.ts:67
64 | leakyreluAlpha
65 | });
66 | } else if (env().getBool('WEBGL_CONV_IM2COL') && x.shape[0] === 1) {
> 67 | out = conv2dWithIm2Row({
| ^ 68 | x,
69 | filter,
70 | convInfo,
View compiled
kernelFunc
src/engine.ts:644
641 |
642 | kernelFunc = () => {
643 | const numDataIdsBefore = this.backend.numDataIds();
> 644 | out = kernel.kernelFunc({inputs, attrs, backend: this.backend});
| ^ 645 | const outInfos = Array.isArray(out) ? out : [out];
646 | if (this.shouldCheckForMemLeaks()) {
647 | this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);
View compiled
(anonymous function)
src/engine.ts:711
708 | // Stop recording to a tape when running a kernel.
709 | () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {
710 | if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {
> 711 | outputs = kernelFunc();
| ^ 712 | } else {
713 | kernelProfile = this.profiler.profileKernel(
714 | kernelOrScopeName, inputs, () => kernelFunc());
View compiled
Engine.scopedRun
src/engine.ts:478
475 | private scopedRun<T>(start: () => void, end: () => void, f: () => T): T {
476 | start();
477 | try {
> 478 | const res = f();
| ^ 479 | end();
480 | return res;
481 | } catch (ex) {
View compiled
Engine.runKernelFunc
src/engine.ts:707
704 | kernelParams.backwardsFunc;
705 |
706 | let kernelProfile: KernelProfile;
> 707 | this.scopedRun(
| ^ 708 | // Stop recording to a tape when running a kernel.
709 | () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {
710 | if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {
View compiled
Engine.runKernel
src/engine.ts:551
548 | throw new Error(`Kernel '${kernelName}' not registered for backend '${
549 | this.backendName}'`);
550 | }
> 551 | return this.runKernelFunc({kernelName, inputs, attrs});
| ^ 552 | }
553 |
554 | private shouldCheckForMemLeaks(): boolean {
View compiled
(anonymous function)
src/ops/fused/conv2d.ts:256
253 | } else {
254 | const customOpWithBias = customGrad(
255 | (x4D: Tensor4D, filter: Tensor4D, bias: Tensor, save: GradSaveFunc) => {
> 256 | let res: Tensor4D|Tensor3D = ENGINE.runKernel(
| ^ 257 | FusedConv2D, inputs as {} as NamedTensorMap,
258 | attrs as {} as NamedAttrMap);
259 |
View compiled
forwardFunc
src/engine.ts:1163
1160 | });
1161 |
1162 | const forwardFunc: ForwardFunc<T> = (_, save) => {
> 1163 | res = f(...[...inputs, save]);
| ^ 1164 | util.assert(
1165 | res.value instanceof Tensor,
1166 | () => 'The function f passed in customGrad(f) must return an ' +
View compiled
(anonymous function)
src/engine.ts:688
685 |
686 | kernelFunc = () => {
687 | const numDataIdsBefore = this.backend.numDataIds();
> 688 | out = this.tidy(() => forwardFunc(this.backend, saveFunc));
| ^ 689 | const outs = (Array.isArray(out) ? out : [out]) as Tensor[];
690 | if (this.shouldCheckForMemLeaks()) {
691 | // Scope name is used to print a more helpful error message if needed.
View compiled
(anonymous function)
src/engine.ts:467
464 | let result: T;
465 | return this.scopedRun(
466 | () => this.startScope(name), () => this.endScope(result), () => {
> 467 | result = fn();
| ^ 468 | if (result instanceof Promise) {
469 | console.error('Cannot return a Promise inside of tidy.');
470 | }
View compiled
Engine.scopedRun
src/engine.ts:478
475 | private scopedRun<T>(start: () => void, end: () => void, f: () => T): T {
476 | start();
477 | try {
> 478 | const res = f();
| ^ 479 | end();
480 | return res;
481 | } catch (ex) {
View compiled
Engine.tidy
src/engine.ts:465
462 | // profiling.
463 | }
464 | let result: T;
> 465 | return this.scopedRun(
| ^ 466 | () => this.startScope(name), () => this.endScope(result), () => {
467 | result = fn();
468 | if (result instanceof Promise) {
View compiled
kernelFunc
src/engine.ts:688
685 |
686 | kernelFunc = () => {
687 | const numDataIdsBefore = this.backend.numDataIds();
> 688 | out = this.tidy(() => forwardFunc(this.backend, saveFunc));
| ^ 689 | const outs = (Array.isArray(out) ? out : [out]) as Tensor[];
690 | if (this.shouldCheckForMemLeaks()) {
691 | // Scope name is used to print a more helpful error message if needed.
View compiled
(anonymous function)
src/engine.ts:711
708 | // Stop recording to a tape when running a kernel.
709 | () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {
710 | if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {
> 711 | outputs = kernelFunc();
| ^ 712 | } else {
713 | kernelProfile = this.profiler.profileKernel(
714 | kernelOrScopeName, inputs, () => kernelFunc());
View compiled
Engine.scopedRun
src/engine.ts:478
475 | private scopedRun<T>(start: () => void, end: () => void, f: () => T): T {
476 | start();
477 | try {
> 478 | const res = f();
| ^ 479 | end();
480 | return res;
481 | } catch (ex) {
View compiled
Engine.runKernelFunc
src/engine.ts:707
704 | kernelParams.backwardsFunc;
705 |
706 | let kernelProfile: KernelProfile;
> 707 | this.scopedRun(
| ^ 708 | // Stop recording to a tape when running a kernel.
709 | () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {
710 | if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {
View compiled
(anonymous function)
src/engine.ts:1195
1192 | return gradMap;
1193 | };
1194 |
> 1195 | return this.runKernelFunc({
| ^ 1196 | forwardFunc,
1197 | backwardsFunc,
1198 | inputs: inputMap,
View compiled
fusedConv2d_
src/ops/fused/conv2d.ts:271
268 | return {value: res, gradFunc: grad};
269 | });
270 |
> 271 | return customOpWithBias(x4D, $filter, $bias) as T;
272 | }
273 | }
274 | export const conv2d = op({fusedConv2d_});
View compiled
Module.fusedConv2d__op
src/ops/operation.ts:51
48 | const f2 = (...args: any[]) => {
49 | ENGINE.startScope(opName);
50 | try {
> 51 | const result = fn(...args);
| ^ 52 | if (isPromise(result)) {
53 | console.error('Cannot return a Promise inside of tidy.');
54 | }
View compiled
Module.executeOp
src/operations/executors/convolution_executor.ts:131
128 | leakyreluAlpha
129 | } = fusedConvAndDepthWiseParams(node, tensorMap, context);
130 |
> 131 | return [tfOps.fused.conv2d({
| ^ 132 | x: getParamValue('x', node, tensorMap, context) as Tensor3D |
133 | Tensor4D,
134 | filter: getParamValue('filter', node, tensorMap, context) as
View compiled
(anonymous function)
src/operations/operation_executor.ts:69
66 | case 'control':
67 | return control.executeOp(node, tensorMap, context);
68 | case 'convolution':
> 69 | return tfc.tidy(
| ^ 70 | () => convolution.executeOp(node, tensorMap, context));
71 | case 'creation':
72 | return tfc.tidy(() => creation.executeOp(node, tensorMap, context));
View compiled
(anonymous function)
src/engine.ts:467
464 | let result: T;
465 | return this.scopedRun(
466 | () => this.startScope(name), () => this.endScope(result), () => {
> 467 | result = fn();
| ^ 468 | if (result instanceof Promise) {
469 | console.error('Cannot return a Promise inside of tidy.');
470 | }
View compiled
Engine.scopedRun
src/engine.ts:478
475 | private scopedRun<T>(start: () => void, end: () => void, f: () => T): T {
476 | start();
477 | try {
> 478 | const res = f();
| ^ 479 | end();
480 | return res;
481 | } catch (ex) {
View compiled
Engine.tidy
src/engine.ts:465
462 | // profiling.
463 | }
464 | let result: T;
> 465 | return this.scopedRun(
| ^ 466 | () => this.startScope(name), () => this.endScope(result), () => {
467 | result = fn();
468 | if (result instanceof Promise) {
View compiled
Module.tidy
src/globals.ts:192
189 | */
190 | export function tidy<T extends TensorContainer>(
191 | nameOrFn: string|ScopeFn<T>, fn?: ScopeFn<T>): T {
> 192 | return ENGINE.tidy(nameOrFn, fn);
193 | }
194 |
195 | /**
View compiled
(anonymous function)
src/operations/operation_executor.ts:69
66 | case 'control':
67 | return control.executeOp(node, tensorMap, context);
68 | case 'convolution':
> 69 | return tfc.tidy(
| ^ 70 | () => convolution.executeOp(node, tensorMap, context));
71 | case 'creation':
72 | return tfc.tidy(() => creation.executeOp(node, tensorMap, context));
View compiled
executeOp
src/operations/operation_executor.ts:121
118 | `https://github.com/tensorflow/tfjs/issues so we can add it` +
119 | `, or register a custom execution with tf.registerOp()`);
120 | }
> 121 | })(node, tensorMap, context);
| ^ 122 | if (tfc.util.isPromise(value)) {
123 | return (value as Promise<tfc.Tensor>).then((data) => [].concat(data));
124 | }
View compiled
(anonymous function)
src/executor/graph_executor.ts:237
234 | for (let i = 0; i < orderedNodes.length; i++) {
235 | const node = orderedNodes[i];
236 | if (!tensorsMap[node.name]) {
> 237 | const tensors =
| ^ 238 | executeOp(node, tensorsMap, context, this._resourceManager) as
239 | Tensor[];
240 | if (util.isPromise(tensors)) {
View compiled
(anonymous function)
src/engine.ts:467
464 | let result: T;
465 | return this.scopedRun(
466 | () => this.startScope(name), () => this.endScope(result), () => {
> 467 | result = fn();
| ^ 468 | if (result instanceof Promise) {
469 | console.error('Cannot return a Promise inside of tidy.');
470 | }
View compiled
Engine.scopedRun
src/engine.ts:478
475 | private scopedRun<T>(start: () => void, end: () => void, f: () => T): T {
476 | start();
477 | try {
> 478 | const res = f();
| ^ 479 | end();
480 | return res;
481 | } catch (ex) {
View compiled
Engine.tidy
src/engine.ts:465
462 | // profiling.
463 | }
464 | let result: T;
> 465 | return this.scopedRun(
| ^ 466 | () => this.startScope(name), () => this.endScope(result), () => {
467 | result = fn();
468 | if (result instanceof Promise) {
View compiled
tidy
src/globals.ts:192
189 | */
190 | export function tidy<T extends TensorContainer>(
191 | nameOrFn: string|ScopeFn<T>, fn?: ScopeFn<T>): T {
> 192 | return ENGINE.tidy(nameOrFn, fn);
193 | }
194 |
195 | /**
View compiled
GraphExecutor.execute
src/executor/graph_executor.ts:219
216 | const tensorArrayMap: TensorArrayMap = {};
217 | const tensorListMap: TensorListMap = {};
218 |
> 219 | return tidy(() => {
| ^ 220 | const context = new ExecutionContext(
221 | this.weightMap, tensorArrayMap, tensorListMap,
222 | this.functionExecutorMap);
View compiled
GraphModel.execute
src/executor/graph_model.ts:338
335 | Tensor|Tensor[] {
336 | inputs = this.normalizeInputs(inputs);
337 | outputs = this.normalizeOutputs(outputs);
> 338 | const result = this.executor.execute(inputs, outputs);
| ^ 339 | return result.length > 1 ? result : result[0];
340 | }
341 | /**
View compiled
(anonymous function)
src/utils/index.ts:204
201 | heatmapContainer!.width = imageObject.width;
202 | heatmapContainer!.height = imageObject.height;
203 | let tensor = getImageTensorForDetectionModel(imageObject, size);
> 204 | let prediction: any = await detectionModel?.execute(tensor);
| ^ 205 | // @ts-ignore
206 | prediction = squeeze(prediction, 0);
207 | if (Array.isArray(prediction)) {
View compiled
getHeatMapFromImage
src/utils/index.ts:184
181 | return words;
182 | };
183 |
> 184 | export const getHeatMapFromImage = async ({
185 | heatmapContainer,
186 | detectionModel,
187 | imageObject,
View compiled
Image.imageObject.current.onload
src/components/VisionWrapper.tsx:113
110 |
111 | const loadImage = async (uploadedFile: UploadedFile) => {
112 | imageObject.current.onload = async () => {
> 113 | await getHeatMapFromImage({
| ^ 114 | heatmapContainer: heatMapContainerObject.current,
115 | detectionModel: detectionModel.current,
116 | imageObject: imageObject.current,
It looks like the fragments/binaries of the db_resnet50 are not nice to the GPU memory limit of WebGL :thinking: @AbdelfattahSekak can you confirm if this is indeed the cause, and whether we can do something about this?
@charlesmindee I think I remember you managed to get the db_resnet50 to run in the interface, you don't encounter this error at all?
If we can't get it to work, I suggest we remove the resnet backbone for now since the bundle is much bigger with it :upside_down_face:
They are both working with the latest version.
For inference with Resnet, I am getting the following error:
Unhandled Rejection (Error): Requested texture size [8689x8689] greater than WebGL maximum on this browser / GPU [8192x8192]
Please find the input image as:
It is however working perfectly fine with MobileNet. Could you please have a look if you are also experiencing the same problem? Thanks a lot!