Closed ibelem closed 4 years ago
assets/js/webnn/util/BaseRunner.js
class BaseRunner {
// if (src.tagName === 'AUDIO') {
// tensorArray = await getTensorArrayByAudio(src, options);
// } else {
tensorArray = getTensorArray(src, options);
// }
}
class SemanticSegmentationRunner extends BaseRunner {
constructor() {
super();
this._labels = null;
}
_setLabels = (labels) => {
this._labels = labels;
};
_getLabels = async (url) => {
const result = await this._loadURL(url);
this._setLabels(result.split('\n'));
console.log(`labels: ${this._labels}`);
};
_getOtherResources = async () => {
await this._getLabels(this._currentModelInfo.labelsFile);
};
_getOutputTensorTypedArray = () => {
return Int32Array;
};
_updateOutput = (output) => {
output.labels = this._labels;
};
}
export { BaseRunner, SemanticSegmentationRunner }
config.js
semanticsegmentation: {
modelName: 'Deeplab 257 Atrous (TFLite)',
format: 'TFLite',
modelId: 'deeplab_mobilenet_v2_257_atrous_tflite',
modelSize: '8.4MB',
modelFile: '../../js/webnn/ss/model/deeplab_mobilenetv2_257_dilated.tflite',
labelsFile: '../../js/webnn/ss/model/labels.txt',
isQuantized: false,
inputSize: [257, 257, 3],
outputSize: [257, 257, 1],
preOptions: {
mean: [127.5, 127.5, 127.5],
std: [127.5, 127.5, 127.5]
}
isQuantized: false,
id.js
getClippedSize(source) {
const width = config.semanticsegmentation.inputSize[0]
const imWidth = source.naturalWidth || source.videoWidth
const imHeight = source.naturalHeight || source.videoHeight
const resizeRatio = Math.max(Math.max(imWidth, imHeight) / width, 1)
const scaledWidth = Math.floor(imWidth / resizeRatio)
const scaledHeight = Math.floor(imHeight / resizeRatio)
return [scaledWidth, scaledHeight]
},
getSegMap() {
const output = this.runner.getOutput()
const segMap = {
data: output.outputTensor,
outputShape: config.semanticsegmentation.outputSize,
labels: output.labels
}
return segMap
},
customOutput(source) {
this.renderer.uploadNewTexture(source, this.getClippedSize(source))
this.renderer.drawOutputs(this.getSegMap())
},
output(source) {
const output = this.runner.getOutput()
this.inferencetime = output.inferenceTime.toFixed(2)
this.showfps = fps
console.log(
`Inference time: ${this.inferencetime} ms / FPS: ${this.showfps}`
)
},
async predict(source) {
// const inputSize = config.semanticsegmentation.inputSize
const drawOptions = {
inputSize: config.semanticsegmentation.inputSize,
preOptions: config.semanticsegmentation.preOptions || {},
imageChannels: 4,
scaledFlag: true
}
// const ret = await this.runner.run(source, drawOptions)
// return ret
await this.runner.run(source, drawOptions)
},
async predictFrame() {
const source = this.$refs.localvideo
await this.predict(source)
this.output()
this.renderer.uploadNewTexture(source, this.getClippedSize(source))
this.stats.begin()
this.renderer.drawOutputs(this.getSegMap())
this.stats.end()
this.sstimer = setTimeout(this.predictFrame, 0)
},
async ss(effect) {
this.initRenderer(effect)
this.predictFrame()
console.log('delete stream')
deleteStream(this.roomId, this.localPublication.id)
}
.gitignore
*.tflite
Update static/js/webnn/util/
to latest
├── base.js
├── decoders
│ ├── SsdDecoder.js
│ └── Yolo2Decoder.js
├── mfcc
│ ├── mfcc.js
│ └── mfcc.wasm
├── onnx
│ ├── onnx.js
│ ├── OnnxModelImporter.js
│ └── OnnxModelUtils.js
├── openvino
│ ├── openvino.js
│ ├── OpenVINOModelImporter.js
│ └── OpenVINOModelUtils.js
└── tflite
├── flatbuffers.js
├── schema
│ ├── README.md
│ └── schema_generated.js
├── TFliteModelImporter.js
└── TfLiteModelUtils.js
Also reproduced with WebML SS Examples.