Open iwoplaza opened 3 months ago
const incrementStep = wgsl.computeStep({
code: wgsl`${asMutable(counterBuffer)} += 1;`,
});
async function increment() {
runtime.compute(incrementStep);
const result = await runtime.readBuffer(counterBuffer);
}
const vertexStep = wgsl.vertexStep({
code: wgsl`
var pos = array<vec2f, 6>(
vec2<f32>( 1, 1),
vec2<f32>( 1, -1),
vec2<f32>(-1, -1),
vec2<f32>( 1, 1),
vec2<f32>(-1, -1),
vec2<f32>(-1, 1)
);
let outPos = vec4f(pos[${builtin.vertexIndex}], 0, 1);
`,
output: {
[builtin.position]: 'outPos',
},
});
const fragmentStep = wgsl.fragmentStep({
code: () => wgsl`
let minDim = f32(min(${canvasDimsData}.width, ${canvasDimsData}.height));
var ray: ${rayStruct};
ray.origin = ${cameraPositionData};
ray.direction += ${cameraAxesData}.right * (${builtin.position}.x - f32(${canvasDimsData}.width)/2)/minDim;
ray.direction += ${cameraAxesData}.up * (${builtin.position}.y - f32(${canvasDimsData}.height)/2)/minDim;
ray.direction += ${cameraAxesData}.forward;
ray.direction = normalize(ray.direction);
let bigBoxIntersection = ${getBoxIntersectionFn}(
-vec3f(f32(${boxSizeData}))/2,
vec3f(
${cubeSize[0]},
${cubeSize[1]},
${cubeSize[2]},
) + vec3f(f32(${boxSizeData}))/2,
ray,
);
var color = vec4f(0);
if bigBoxIntersection.intersects {
var tMin: f32;
var intersectionFound = false;
for (var i = 0; i < ${X}; i = i+1) {
for (var j = 0; j < ${Y}; j = j+1) {
for (var k = 0; k < ${Z}; k = k+1) {
if ${boxMatrixData}[i][j][k].isActive == 0 {
continue;
}
let intersection = ${getBoxIntersectionFn}(
vec3f(f32(i), f32(j), f32(k)) * ${MAX_BOX_SIZE} - vec3f(f32(${boxSizeData}))/2,
vec3f(f32(i), f32(j), f32(k)) * ${MAX_BOX_SIZE} + vec3f(f32(${boxSizeData}))/2,
ray,
);
if intersection.intersects && (!intersectionFound || intersection.tMin < tMin) {
color = ${boxMatrixData}[i][j][k].albedo;
tMin = intersection.tMin;
intersectionFound = true;
}
}
}
}
}
return color;
`,
targets: [
{
format: presentationFormat,
},
],
});
async function increment() {
runtime
.vertex(vertexStep)
.fragment(fragmentStep)
.render({
colorAttachments: [
{
view: textureView,
clearValue: [0, 0, 0, 0],
loadOp: 'clear',
storeOp: 'store',
},
],
vertexCount: 6,
});
const result = await runtime.readBuffer(counterBuffer);
}
@mhawryluk @reczkok Thoughts? 👀
Currently I don't think separating vertex and fragment shaders is a very good idea. They always go together and imo separating them just creates room for errors and confusion. I like the idea of dropping the 'pipeline' name tho.
I like this new API. I don't think it's a massive improvement to readability, but it's a nice touch.
Currently I don't think separating vertex and fragment shaders is a very good idea. They always go together and imo separating them just creates room for errors and confusion. I like the idea of dropping the 'pipeline' name tho.
That is true, however if we want to go in the direction of .tgpu
, then we would need to think of a different syntax than creating two separate @vertex
and @fragment
functions. With them being separated, we can still enforce types statically:
// Inferred as WgslVertexStep<{ uv: Vec2f }>
const vertexStep = wgsl.vertexStep({
code: wgsl``,
output: {
[builtin.position]: 'out_pos',
'uv': vec2f,
},
});
const fragmentStep = wgsl.fragmentStep<{ uv: Vec2f }>({
code: ({ uv }) => wgsl`
let val = ${uv};
`,
// ...
});
If we have it typed it would also create a nice way to create multiple fragment shaders for one vertex shader and easily swap between them without risking incompatibility issues between vertex output and fragment input. I like it 👍
Before:
Iteration 1:
Pipelines are created on demand and cached based on the passed in code's object identity and parameters (optional).