Closed lwansbrough closed 8 years ago
This addresses the same issue outlined in #31. Here's example usage:
THREE.VREffect = function(onError) {
var vrHMD;
var eyeTranslationL, eyeFOVL;
var eyeTranslationR, eyeFOVR;
function gotVRDevices(devices) {
for (var i = 0; i < devices.length; i++) {
if (devices[i] instanceof HMDVRDevice) {
vrHMD = devices[i];
if (vrHMD.getEyeParameters !== undefined) {
var eyeParamsL = vrHMD.getEyeParameters('left');
var eyeParamsR = vrHMD.getEyeParameters('right');
eyeTranslationL = eyeParamsL.eyeTranslation;
eyeTranslationR = eyeParamsR.eyeTranslation;
eyeFOVL = eyeParamsL.recommendedFieldOfView;
eyeFOVR = eyeParamsR.recommendedFieldOfView;
}
else {
// TODO: This is an older code path and not spec compliant.
// It should be removed at some point in the near future.
eyeTranslationL = vrHMD.getEyeTranslation('left');
eyeTranslationR = vrHMD.getEyeTranslation('right');
eyeFOVL = vrHMD.getRecommendedEyeFieldOfView('left');
eyeFOVR = vrHMD.getRecommendedEyeFieldOfView('right');
}
break; // We keep the first we encounter
}
}
if (vrHMD === undefined) {
if (onError) onError( 'HMD not available' );
}
}
if (navigator.getVRDevices) {
navigator.getVRDevices().then(gotVRDevices);
}
this.scale = 1;
var cameraL = new THREE.PerspectiveCamera();
var cameraR = new THREE.PerspectiveCamera();
this.render = function (renderer, scene, camera) {
if (vrHMD) {
var sceneL, sceneR;
if (Array.isArray(scene)) {
sceneL = scene[0];
sceneR = scene[1];
}
else {
sceneL = scene;
sceneR = scene;
}
var size = renderer.getSize();
size.width /= 2;
renderer.enableScissorTest(true);
renderer.clear();
if (camera.parent === null) camera.updateMatrixWorld();
cameraL.projectionMatrix = fovToProjection(eyeFOVL, true, camera.near, camera.far);
cameraR.projectionMatrix = fovToProjection(eyeFOVR, true, camera.near, camera.far);
camera.matrixWorld.decompose(cameraL.position, cameraL.quaternion, cameraL.scale);
camera.matrixWorld.decompose(cameraR.position, cameraR.quaternion, cameraR.scale);
cameraL.translateX(eyeTranslationL.x * this.scale);
cameraR.translateX(eyeTranslationR.x * this.scale);
// render left eye
renderer.setViewport(0, 0, size.width, size.height);
renderer.setScissor(0, 0, size.width, size.height);
renderer.render(sceneL, cameraL);
// render right eye
renderer.setViewport(size.width, 0, size.width, size.height);
renderer.setScissor(size.width, 0, size.width, size.height);
renderer.render(sceneR, cameraR);
renderer.enableScissorTest(false);
return;
}
// Regular render mode if not HMD
if (Array.isArray(scene)) scene = scene[0];
renderer.render(scene, camera);
};
function fovToNDCScaleOffset( fov ) {
var pxscale = 2.0 / ( fov.leftTan + fov.rightTan );
var pxoffset = ( fov.leftTan - fov.rightTan ) * pxscale * 0.5;
var pyscale = 2.0 / ( fov.upTan + fov.downTan );
var pyoffset = ( fov.upTan - fov.downTan ) * pyscale * 0.5;
return { scale: [ pxscale, pyscale ], offset: [ pxoffset, pyoffset ] };
}
function fovPortToProjection( fov, rightHanded, zNear, zFar ) {
rightHanded = rightHanded === undefined ? true : rightHanded;
zNear = zNear === undefined ? 0.01 : zNear;
zFar = zFar === undefined ? 10000.0 : zFar;
var handednessScale = rightHanded ? - 1.0 : 1.0;
// start with an identity matrix
var mobj = new THREE.Matrix4();
var m = mobj.elements;
// and with scale/offset info for normalized device coords
var scaleAndOffset = fovToNDCScaleOffset( fov );
// X result, map clip edges to [-w,+w]
m[ 0 * 4 + 0 ] = scaleAndOffset.scale[ 0 ];
m[ 0 * 4 + 1 ] = 0.0;
m[ 0 * 4 + 2 ] = scaleAndOffset.offset[ 0 ] * handednessScale;
m[ 0 * 4 + 3 ] = 0.0;
// Y result, map clip edges to [-w,+w]
// Y offset is negated because this proj matrix transforms from world coords with Y=up,
// but the NDC scaling has Y=down (thanks D3D?)
m[ 1 * 4 + 0 ] = 0.0;
m[ 1 * 4 + 1 ] = scaleAndOffset.scale[ 1 ];
m[ 1 * 4 + 2 ] = - scaleAndOffset.offset[ 1 ] * handednessScale;
m[ 1 * 4 + 3 ] = 0.0;
// Z result (up to the app)
m[ 2 * 4 + 0 ] = 0.0;
m[ 2 * 4 + 1 ] = 0.0;
m[ 2 * 4 + 2 ] = zFar / ( zNear - zFar ) * - handednessScale;
m[ 2 * 4 + 3 ] = ( zFar * zNear ) / ( zNear - zFar );
// W result (= Z in)
m[ 3 * 4 + 0 ] = 0.0;
m[ 3 * 4 + 1 ] = 0.0;
m[ 3 * 4 + 2 ] = handednessScale;
m[ 3 * 4 + 3 ] = 0.0;
mobj.transpose();
return mobj;
}
function fovToProjection( fov, rightHanded, zNear, zFar ) {
var DEG2RAD = Math.PI / 180.0;
var fovPort = {
upTan: Math.tan(fov.upDegrees * DEG2RAD),
downTan: Math.tan(fov.downDegrees * DEG2RAD),
leftTan: Math.tan(fov.leftDegrees * DEG2RAD),
rightTan: Math.tan(fov.rightDegrees * DEG2RAD)
};
return fovPortToProjection(fovPort, rightHanded, zNear, zFar);
}
};
then later...
componentWillMount() {
this.effect = new THREE.VREffect();
}
...
render() {
<Scene customRender={this.effect.render} ... />
}
I should be able to dump the above example code into the cupcake example and have it work, correct? Would make for a good VR example/test.
Presumably yeah!
For WebVR you'd need to map HMD inputs to the camera's quaternion.
This is particularly useful for rendering custom effects, which are used in VR scenarios.