Closed IDONKN closed 1 year ago
The main code is as follows:
const tileOptions = { contentLoader: async (tile) => { let tileContent = null; switch (tile.type) { case TILE_TYPE.POINTCLOUD: { tileContent = createPointNodes(tile, pointcloudMaterial, options, rootTransformInverse); break; } case TILE_TYPE.SCENEGRAPH: case TILE_TYPE.MESH: { tileContent = await createGLTFNodes(gltfLoader, tile, unlitMaterial, options, rootTransformInverse); break; } default: break; } if (tileContent) { tileContent.visible = false; renderMap[tile.id] = tileContent; root.add(renderMap[tile.id]); if (options.debug) { const box = Util.loadersBoundingBoxToMesh(tile); tileBoxes.add(box); boxMap[tile.id] = box; } } }, onTileLoad: async (tile) => { if (tileset) { if (!orientationDetected && tile?.depth <= MAX_DEPTH_FOR_ORIENTATION) { detectOrientation(tile); } tileset._frameNumber++; tilesetUpdate(tileset, renderMap, rendererReference, cameraReference); } }, onTileUnload: (tile) => { unloadQueue.push(tile); }, onTileError: (tile, message) => { console.error('Tile error', tile.id, message); }, }; const tileset = new Tileset3D(tilesetJson, { ...tileOptions, });
async function createGLTFNodes(gltfLoader, tile, unlitMaterial, options, rootTransformInverse): Promise
// The computed trasnform already contains the root's transform, so we have to invert it
const contentTransform = new Matrix4().fromArray(tile.computedTransform).premultiply(rootTransformInverse);
if (shouldRotate) {
contentTransform.multiply(rotateX); // convert from GLTF Y-up to Z-up
}
console.log("gltf", tile);
// Because I don’t know how to use gltf’s buffer
// So I read the glb file again with glbloader and got binChunks
const content = await load(tile.contentUrl, GLBLoader, {});
console.log("glbcontent", content);
gltfLoader.parse(
content.binChunks[0].arrayBuffer,
tile.contentUrl ? tile.contentUrl.substr(0, tile.contentUrl.lastIndexOf('/') + 1) : '',
(gltf) => {
const tileContent = gltf.scenes[0] as Group;
tileContent.applyMatrix4(contentTransform);
tileContent.traverse((object) => {
if (object.type == "Mesh") {
const mesh = object as Mesh;
const originalMaterial = (mesh.material as MeshStandardMaterial);
const originalMap = originalMaterial.map;
if (options.material) {
mesh.material = options.material.clone();
originalMaterial.dispose();
} else if (options.shading == Shading.FlatTexture) {
mesh.material = unlitMaterial.clone();
originalMaterial.dispose();
}
if (options.shading != Shading.ShadedNoTexture) {
if ((mesh.material as Material).type == "ShaderMaterial") {
(mesh.material as ShaderMaterial).uniforms.map = {value: originalMap};
} else {
(mesh.material as MeshStandardMaterial).map = originalMap;
}
} else {
if (originalMap) {
originalMap.dispose();
}
(mesh.material as MeshStandardMaterial).map = null;
}
if (options.shaderCallback) {
mesh.onBeforeRender = options.shaderCallback;
}
(mesh.material as MeshStandardMaterial | MeshBasicMaterial).wireframe = options.wireframe;
if (options.computeNormals) {
mesh.geometry.computeVertexNormals();
}
}
});
resolve(tileContent);
},
(e) => {
reject(new Error(`error parsing gltf in tile ${tile.id}: ${e}`));
},
);
}); }
If you are integrating loaders.gl with THREE.js you may want to look at the following example that was contributed to loaders.gl by someone who did just that: https://github.com/visgl/loaders.gl/tree/master/examples/experimental/3d-tiles-with-three.js.
The example is not maintainated and may no longer work on latest loaders.gl without small modifications but I expect that it shows how all these integration issues your are dealing with were solved by another developer.
I need to use .binChunks[0].arrayBuffer in glb data and then use gltfloader of threejs for webgl display.
But the gltf format generated by loadersgl seems to have no binchunks.