aboutsummaryrefslogtreecommitdiffstats
path: root/LibOVR/Src/Util
diff options
context:
space:
mode:
Diffstat (limited to 'LibOVR/Src/Util')
-rw-r--r--LibOVR/Src/Util/Util_ImageWindow.cpp511
-rw-r--r--LibOVR/Src/Util/Util_ImageWindow.h200
-rw-r--r--LibOVR/Src/Util/Util_Interface.cpp34
-rw-r--r--LibOVR/Src/Util/Util_Interface.h37
-rw-r--r--LibOVR/Src/Util/Util_LatencyTest.cpp570
-rw-r--r--LibOVR/Src/Util/Util_LatencyTest.h173
-rw-r--r--LibOVR/Src/Util/Util_LatencyTest2.cpp191
-rw-r--r--LibOVR/Src/Util/Util_LatencyTest2.h238
-rw-r--r--LibOVR/Src/Util/Util_Render_Stereo.cpp1472
-rw-r--r--LibOVR/Src/Util/Util_Render_Stereo.h498
10 files changed, 3924 insertions, 0 deletions
diff --git a/LibOVR/Src/Util/Util_ImageWindow.cpp b/LibOVR/Src/Util/Util_ImageWindow.cpp
new file mode 100644
index 0000000..cb091c7
--- /dev/null
+++ b/LibOVR/Src/Util/Util_ImageWindow.cpp
@@ -0,0 +1,511 @@
+/************************************************************************************
+
+Filename : Util_ImageWindow.cpp
+Content : An output object for windows that can display raw images for testing
+Created : March 13, 2014
+Authors : Dean Beeler
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+#include "../../Include/OVR.h"
+
+#include "Util_ImageWindow.h"
+
+#if defined(OVR_OS_WIN32)
+
+#include <Windows.h>
+
+#include "DWrite.h"
+
+typedef HRESULT (WINAPI *D2D1CreateFactoryFn)(
+ _In_ D2D1_FACTORY_TYPE,
+ _In_ REFIID,
+ _In_opt_ const D2D1_FACTORY_OPTIONS*,
+ _Out_ ID2D1Factory **
+ );
+
+typedef HRESULT (WINAPI *DWriteCreateFactoryFn)(
+ _In_ DWRITE_FACTORY_TYPE factoryType,
+ _In_ REFIID iid,
+ _Out_ IUnknown **factory
+ );
+
+
+namespace OVR { namespace Util {
+
+ID2D1Factory* ImageWindow::pD2DFactory = NULL;
+IDWriteFactory* ImageWindow::pDWriteFactory = NULL;
+ImageWindow* ImageWindow::globalWindow[4];
+int ImageWindow::windowCount = 0;
+
+LRESULT CALLBACK MainWndProc(
+ HWND hwnd,
+ UINT uMsg,
+ WPARAM wParam,
+ LPARAM lParam)
+{
+ switch (uMsg)
+ {
+ case WM_CREATE:
+ return 0;
+
+ case WM_PAINT:
+ {
+ LONG_PTR ptr = GetWindowLongPtr( hwnd, GWLP_USERDATA );
+ if( ptr )
+ {
+ ImageWindow* iw = (ImageWindow*)ptr;
+ iw->OnPaint();
+ }
+ }
+
+ return 0;
+
+ case WM_SIZE:
+ // Set the size and position of the window.
+ return 0;
+
+ case WM_DESTROY:
+ // Clean up window-specific data objects.
+ return 0;
+
+ //
+ // Process other messages.
+ //
+
+ default:
+ return DefWindowProc(hwnd, uMsg, wParam, lParam);
+ }
+ //return 0;
+}
+
+ImageWindow::ImageWindow( uint32_t width, uint32_t height ) :
+ frontBufferMutex( new Mutex() )
+{
+
+ HINSTANCE hInst = LoadLibrary( L"d2d1.dll" );
+ HINSTANCE hInstWrite = LoadLibrary( L"Dwrite.dll" );
+
+ D2D1CreateFactoryFn createFactory = NULL;
+ DWriteCreateFactoryFn writeFactory = NULL;
+
+ if( hInst )
+ {
+ createFactory = (D2D1CreateFactoryFn)GetProcAddress( hInst, "D2D1CreateFactory" );
+ }
+
+ if( hInstWrite )
+ {
+ writeFactory = (DWriteCreateFactoryFn)GetProcAddress( hInstWrite, "DWriteCreateFactory" );
+ }
+
+ globalWindow[windowCount] = this;
+
+ ++windowCount;
+
+ if( pD2DFactory == NULL && createFactory && writeFactory )
+ {
+ createFactory(
+ D2D1_FACTORY_TYPE_MULTI_THREADED,
+ __uuidof(ID2D1Factory),
+ NULL,
+ &pD2DFactory
+ );
+
+ // Create a DirectWrite factory.
+ writeFactory(
+ DWRITE_FACTORY_TYPE_SHARED,
+ __uuidof(pDWriteFactory),
+ reinterpret_cast<IUnknown **>(&pDWriteFactory)
+ );
+
+ }
+
+ resolution = D2D1::SizeU( width, height );
+
+ SetWindowLongPtr( hWindow, GWLP_USERDATA, (LONG_PTR)this );
+
+ pRT = NULL;
+ greyBitmap = NULL;
+ colorBitmap = NULL;
+}
+
+ImageWindow::~ImageWindow()
+{
+ for( int i = 0; i < MaxWindows; ++i )
+ {
+ if( globalWindow[i] == this )
+ {
+ globalWindow[i] = NULL;
+ break;
+ }
+}
+
+ if( greyBitmap )
+ greyBitmap->Release();
+
+ if( colorBitmap )
+ colorBitmap->Release();
+
+ if( pRT )
+ pRT->Release();
+
+ {
+ Mutex::Locker locker( frontBufferMutex );
+
+ while( frames.GetSize() )
+ {
+ Ptr<Frame> aFrame = frames.PopBack();
+ }
+ }
+
+ delete frontBufferMutex;
+
+ ShowWindow( hWindow, SW_HIDE );
+ DestroyWindow( hWindow );
+}
+
+void ImageWindow::AssociateSurface( void* surface )
+{
+ // Assume an IUnknown
+ IUnknown* unknown = (IUnknown*)surface;
+
+ IDXGISurface *pDxgiSurface = NULL;
+ HRESULT hr = unknown->QueryInterface(&pDxgiSurface);
+ if( hr == S_OK )
+ {
+ D2D1_RENDER_TARGET_PROPERTIES props =
+ D2D1::RenderTargetProperties(
+ D2D1_RENDER_TARGET_TYPE_DEFAULT,
+ D2D1::PixelFormat(DXGI_FORMAT_UNKNOWN, D2D1_ALPHA_MODE_PREMULTIPLIED),
+ 96,
+ 96
+ );
+
+
+ pRT = NULL;
+ ID2D1RenderTarget* tmpTarget;
+
+ hr = pD2DFactory->CreateDxgiSurfaceRenderTarget( pDxgiSurface, &props, &tmpTarget );
+
+ if( hr == S_OK )
+ {
+ DXGI_SURFACE_DESC desc = {0};
+ pDxgiSurface->GetDesc( &desc );
+ int width = desc.Width;
+ int height = desc.Height;
+
+ D2D1_SIZE_U size = D2D1::SizeU( width, height );
+
+ D2D1_PIXEL_FORMAT pixelFormat = D2D1::PixelFormat(
+ DXGI_FORMAT_A8_UNORM,
+ D2D1_ALPHA_MODE_PREMULTIPLIED
+ );
+
+ D2D1_PIXEL_FORMAT colorPixelFormat = D2D1::PixelFormat(
+ DXGI_FORMAT_B8G8R8A8_UNORM,
+ D2D1_ALPHA_MODE_PREMULTIPLIED
+ );
+
+ D2D1_BITMAP_PROPERTIES bitmapProps;
+ bitmapProps.dpiX = 96;
+ bitmapProps.dpiY = 96;
+ bitmapProps.pixelFormat = pixelFormat;
+
+ D2D1_BITMAP_PROPERTIES colorBitmapProps;
+ colorBitmapProps.dpiX = 96;
+ colorBitmapProps.dpiY = 96;
+ colorBitmapProps.pixelFormat = colorPixelFormat;
+
+ HRESULT result = tmpTarget->CreateBitmap( size, bitmapProps, &greyBitmap );
+ if( result != S_OK )
+ {
+ tmpTarget->Release();
+ tmpTarget = NULL;
+ }
+
+ result = tmpTarget->CreateBitmap( size, colorBitmapProps, &colorBitmap );
+ if( result != S_OK )
+ {
+ greyBitmap->Release();
+ greyBitmap = NULL;
+
+ tmpTarget->Release();
+ tmpTarget = NULL;
+ }
+ pRT = tmpTarget;
+ }
+ }
+}
+
+void ImageWindow::Process()
+{
+ if( pRT && greyBitmap )
+ {
+ OnPaint();
+
+ pRT->Flush();
+ }
+}
+
+void ImageWindow::Complete()
+{
+ Mutex::Locker locker( frontBufferMutex );
+
+ if( frames.IsEmpty() )
+ return;
+
+ if( frames.PeekBack(0)->ready )
+ return;
+
+ Ptr<Frame> frame = frames.PeekBack(0);
+
+ frame->ready = true;
+}
+
+void ImageWindow::OnPaint()
+{
+ Mutex::Locker locker( frontBufferMutex );
+
+ // Nothing to do
+ if( frames.IsEmpty() )
+ return;
+
+ if( !frames.PeekFront(0)->ready )
+ return;
+
+ Ptr<Frame> currentFrame = frames.PopFront();
+
+ Ptr<Frame> nextFrame = NULL;
+
+ if( !frames.IsEmpty() )
+ nextFrame = frames.PeekFront(0);
+
+ while( nextFrame && nextFrame->ready )
+ {
+ // Free up the current frame since it's been removed from the deque
+ currentFrame = frames.PopFront();
+
+ if( frames.IsEmpty() )
+ break;
+
+ nextFrame = frames.PeekFront(0);
+ }
+
+ if( currentFrame->imageData )
+ greyBitmap->CopyFromMemory( NULL, currentFrame->imageData, currentFrame->width );
+
+ if( currentFrame->colorImageData )
+ colorBitmap->CopyFromMemory( NULL, currentFrame->colorImageData, currentFrame->colorPitch );
+
+ pRT->BeginDraw();
+
+ pRT->SetAntialiasMode(D2D1_ANTIALIAS_MODE_ALIASED);
+
+ pRT->Clear( D2D1::ColorF(D2D1::ColorF::Black) );
+
+ // This will mirror our image
+ D2D1_MATRIX_3X2_F m;
+ m._11 = -1; m._12 = 0;
+ m._21 = 0; m._22 = 1;
+ m._31 = 0; m._32 = 0;
+ pRT->SetTransform( m );
+
+ ID2D1SolidColorBrush* whiteBrush;
+
+ pRT->CreateSolidColorBrush( D2D1::ColorF(D2D1::ColorF::White, 1.0f), &whiteBrush );
+
+ if( currentFrame->imageData )
+ {
+ pRT->FillOpacityMask( greyBitmap, whiteBrush,
+ D2D1_OPACITY_MASK_CONTENT_TEXT_NATURAL,
+ D2D1::RectF( -(FLOAT)resolution.width, 0.0f, (FLOAT)0.0f, (FLOAT)resolution.height ),
+ //D2D1::RectF( 0.0f, 0.0f, (FLOAT)0.0f, (FLOAT)resolution.height ),
+ D2D1::RectF( 0.0f, 0.0f, (FLOAT)resolution.width, (FLOAT)resolution.height ) );
+ }
+ else if( currentFrame->colorImageData )
+ {
+ pRT->DrawBitmap( colorBitmap,
+ D2D1::RectF( -(FLOAT)resolution.width, 0.0f, (FLOAT)0.0f, (FLOAT)resolution.height ) );
+
+ }
+
+ pRT->SetTransform(D2D1::Matrix3x2F::Identity());
+
+ whiteBrush->Release();
+
+ Array<CirclePlot>::Iterator it;
+
+ for( it = currentFrame->plots.Begin(); it != currentFrame->plots.End(); ++it )
+ {
+ ID2D1SolidColorBrush* aBrush;
+
+ pRT->CreateSolidColorBrush( D2D1::ColorF( it->r, it->g, it->b), &aBrush );
+
+ D2D1_ELLIPSE ellipse;
+ ellipse.point.x = it->x;
+ ellipse.point.y = it->y;
+ ellipse.radiusX = it->radius;
+ ellipse.radiusY = it->radius;
+
+ if( it->fill )
+ pRT->FillEllipse( &ellipse, aBrush );
+ else
+ pRT->DrawEllipse( &ellipse, aBrush );
+
+ aBrush->Release();
+ }
+
+ static const WCHAR msc_fontName[] = L"Verdana";
+ static const FLOAT msc_fontSize = 20;
+
+ IDWriteTextFormat* textFormat = NULL;
+
+ // Create a DirectWrite text format object.
+ pDWriteFactory->CreateTextFormat(
+ msc_fontName,
+ NULL,
+ DWRITE_FONT_WEIGHT_NORMAL,
+ DWRITE_FONT_STYLE_NORMAL,
+ DWRITE_FONT_STRETCH_NORMAL,
+ msc_fontSize,
+ L"", //locale
+ &textFormat
+ );
+
+ D2D1_SIZE_F renderTargetSize = pRT->GetSize();
+
+ Array<TextPlot>::Iterator textIt;
+ for( textIt = currentFrame->textLines.Begin(); textIt != currentFrame->textLines.End(); ++textIt )
+ {
+ ID2D1SolidColorBrush* aBrush;
+
+ pRT->CreateSolidColorBrush( D2D1::ColorF( textIt->r, textIt->g, textIt->b), &aBrush );
+
+ WCHAR* tmpString = (WCHAR*)calloc( textIt->text.GetLength(), sizeof( WCHAR ) );
+ for( unsigned i = 0; i < textIt->text.GetLength(); ++i )
+ {
+ tmpString[i] = (WCHAR)textIt->text.GetCharAt( i );
+ }
+
+ pRT->DrawTextW( tmpString, (UINT32)textIt->text.GetLength(), textFormat,
+ D2D1::RectF(textIt->x, textIt->y, renderTargetSize.width, renderTargetSize.height), aBrush );
+
+ free( tmpString );
+
+ aBrush->Release();
+ }
+
+ if( textFormat )
+ textFormat->Release();
+
+ pRT->EndDraw();
+
+ pRT->Flush();
+}
+
+Ptr<Frame> ImageWindow::lastUnreadyFrame()
+{
+ static int framenumber = 0;
+
+ if( frames.GetSize() && !frames.PeekBack( 0 )->ready )
+ return frames.PeekBack( 0 );
+
+ // Create a new frame if an unready one doesn't already exist
+ Ptr<Frame> tmpFrame = *new Frame( framenumber );
+ frames.PushBack( tmpFrame );
+
+ ++framenumber;
+
+ return tmpFrame;
+}
+
+void ImageWindow::UpdateImageBW( const uint8_t* imageData, uint32_t width, uint32_t height )
+{
+ if( pRT && greyBitmap )
+ {
+ Mutex::Locker locker( frontBufferMutex );
+
+ Ptr<Frame> frame = lastUnreadyFrame();
+ frame->imageData = malloc( width * height );
+ frame->width = width;
+ frame->height = height;
+ memcpy( frame->imageData, imageData, width * height );
+ }
+}
+
+void ImageWindow::UpdateImageRGBA( const uint8_t* imageData, uint32_t width, uint32_t height, uint32_t pitch )
+{
+ if( pRT && colorBitmap )
+ {
+ Mutex::Locker locker( frontBufferMutex );
+
+ Ptr<Frame> frame = lastUnreadyFrame();
+ frame->colorImageData = malloc( pitch * height );
+ frame->width = width;
+ frame->height = height;
+ frame->colorPitch = pitch;
+ memcpy( frame->colorImageData, imageData, pitch * height );
+ }
+}
+
+void ImageWindow::addCircle( float x, float y, float radius, float r, float g, float b, bool fill )
+{
+ if( pRT )
+ {
+ CirclePlot cp;
+
+ cp.x = x;
+ cp.y = y;
+ cp.radius = radius;
+ cp.r = r;
+ cp.g = g;
+ cp.b = b;
+ cp.fill = fill;
+
+ Mutex::Locker locker( frontBufferMutex );
+
+ Ptr<Frame> frame = lastUnreadyFrame();
+ frame->plots.PushBack( cp );
+ }
+
+}
+
+void ImageWindow::addText( float x, float y, float r, float g, float b, OVR::String text )
+{
+ if( pRT )
+ {
+ TextPlot tp;
+
+ tp.x = x;
+ tp.y = y;
+ tp.r = r;
+ tp.g = g;
+ tp.b = b;
+ tp.text = text;
+
+ Mutex::Locker locker( frontBufferMutex );
+ Ptr<Frame> frame = lastUnreadyFrame();
+ frame->textLines.PushBack( tp );
+ }
+}
+
+}}
+
+#endif //defined(OVR_OS_WIN32) \ No newline at end of file
diff --git a/LibOVR/Src/Util/Util_ImageWindow.h b/LibOVR/Src/Util/Util_ImageWindow.h
new file mode 100644
index 0000000..4b88959
--- /dev/null
+++ b/LibOVR/Src/Util/Util_ImageWindow.h
@@ -0,0 +1,200 @@
+/************************************************************************************
+
+Filename : Util_ImageWindow.h
+Content : An output object for windows that can display raw images for testing
+Created : March 13, 2014
+Authors : Dean Beeler
+
+Copyright : Copyright 2014 Oculus, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef UTIL_IMAGEWINDOW_H
+#define UTIL_IMAGEWINDOW_H
+
+#if defined(OVR_OS_WIN32)
+#define WIN32_LEAN_AND_MEAN 1
+#include <windows.h>
+#include <d2d1.h>
+#include <dwrite.h>
+#endif
+
+#include "../../Include/OVR.h"
+#include "../Kernel/OVR_Hash.h"
+#include "../Kernel/OVR_Array.h"
+#include "../Kernel/OVR_Threads.h"
+#include "../Kernel/OVR_Deque.h"
+
+#include <stdint.h>
+
+namespace OVR { namespace Util {
+
+ typedef struct
+ {
+ float x;
+ float y;
+ float radius;
+ float r;
+ float g;
+ float b;
+ bool fill;
+ } CirclePlot;
+
+ typedef struct
+ {
+ float x;
+ float y;
+ float r;
+ float g;
+ float b;
+ OVR::String text;
+ } TextPlot;
+
+class Frame : virtual public RefCountBaseV<Frame>
+ {
+public:
+
+ Frame( int frame ) :
+ frameNumber( frame ),
+ imageData( NULL ),
+ colorImageData( NULL ),
+ plots(),
+ textLines(),
+ width( 0 ),
+ height( 0 ),
+ colorPitch( 0 ),
+ ready( false )
+ {
+
+ }
+
+ ~Frame()
+ {
+ if( imageData )
+ free( imageData );
+ if( colorImageData )
+ free( colorImageData );
+
+ plots.ClearAndRelease();
+ textLines.ClearAndRelease();
+ }
+
+ int frameNumber;
+
+ Array<CirclePlot> plots;
+ Array<TextPlot> textLines;
+ void* imageData;
+ void* colorImageData;
+ int width;
+ int height;
+ int colorPitch;
+ bool ready;
+};
+
+#if defined(OVR_OS_WIN32)
+class ImageWindow
+{
+ HWND hWindow;
+ ID2D1RenderTarget* pRT;
+ D2D1_SIZE_U resolution;
+
+ Mutex* frontBufferMutex;
+
+ InPlaceMutableDeque< Ptr<Frame> > frames;
+
+ ID2D1Bitmap* greyBitmap;
+ ID2D1Bitmap* colorBitmap;
+
+public:
+ // constructors
+ ImageWindow();
+ ImageWindow( uint32_t width, uint32_t height );
+ virtual ~ImageWindow();
+
+ void GetResolution( size_t& width, size_t& height ) { width = resolution.width; height = resolution.height; }
+
+ void OnPaint(); // Called by Windows when it receives a WM_PAINT message
+
+ void UpdateImage( const uint8_t* imageData, uint32_t width, uint32_t height ) { UpdateImageBW( imageData, width, height ); }
+ void UpdateImageBW( const uint8_t* imageData, uint32_t width, uint32_t height );
+ void UpdateImageRGBA( const uint8_t* imageData, uint32_t width, uint32_t height, uint32_t pitch );
+ void Complete(); // Called by drawing thread to submit a frame
+
+ void Process(); // Called by rendering thread to do window processing
+
+ void AssociateSurface( void* surface );
+
+ void addCircle( float x , float y, float radius, float r, float g, float b, bool fill );
+ void addText( float x, float y, float r, float g, float b, OVR::String text );
+
+ static ImageWindow* GlobalWindow( int window ) { return globalWindow[window]; }
+ static int WindowCount() { return windowCount; }
+
+private:
+
+ Ptr<Frame> lastUnreadyFrame();
+
+ static const int MaxWindows = 4;
+ static ImageWindow* globalWindow[MaxWindows];
+ static int windowCount;
+ static ID2D1Factory* pD2DFactory;
+ static IDWriteFactory* pDWriteFactory;
+};
+
+#else
+
+class ImageWindow
+{
+public:
+ // constructors
+ ImageWindow() {}
+ ImageWindow( uint32_t width, uint32_t height ) { OVR_UNUSED( width ); OVR_UNUSED( height ); }
+ virtual ~ImageWindow() { }
+
+ void GetResolution( size_t& width, size_t& height ) { width = 0; height = 0; }
+
+ void OnPaint() { }
+
+ void UpdateImage( const uint8_t* imageData, uint32_t width, uint32_t height ) { UpdateImageBW( imageData, width, height ); }
+ void UpdateImageBW( const uint8_t* imageData, uint32_t width, uint32_t height ) { }
+ void UpdateImageRGBA( const uint8_t* imageData, uint32_t width, uint32_t height, uint32_t pitch ) { }
+ void Complete() { }
+
+ void Process() { }
+
+ void AssociateSurface( void* surface ) { }
+
+ void addCircle( float x , float y, float radius, float r, float g, float b, bool fill ) { }
+ void addText( float x, float y, float r, float g, float b, OVR::String text ) { }
+
+ static ImageWindow* GlobalWindow( int window ) { return globalWindow[window]; }
+ static int WindowCount() { return windowCount; }
+
+private:
+
+ static const int MaxWindows = 4;
+ static ImageWindow* globalWindow[4];
+ static int windowCount;
+};
+
+#endif
+
+}} // namespace OVR::Util
+
+
+#endif \ No newline at end of file
diff --git a/LibOVR/Src/Util/Util_Interface.cpp b/LibOVR/Src/Util/Util_Interface.cpp
new file mode 100644
index 0000000..d96423c
--- /dev/null
+++ b/LibOVR/Src/Util/Util_Interface.cpp
@@ -0,0 +1,34 @@
+/************************************************************************************
+
+Filename : Util_Interface.cpp
+Content : Simple interface, utilised by internal demos,
+ with access to wider SDK as needed.
+ Located in the body of the SDK to ensure updated
+ when new SDK features are added.
+Created : February 20, 2014
+Authors : Tom Heath
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#include "Util_Interface.h"
+
+
+
+//Files left in to ease its possible return...... \ No newline at end of file
diff --git a/LibOVR/Src/Util/Util_Interface.h b/LibOVR/Src/Util/Util_Interface.h
new file mode 100644
index 0000000..1bbf638
--- /dev/null
+++ b/LibOVR/Src/Util/Util_Interface.h
@@ -0,0 +1,37 @@
+/************************************************************************************
+
+PublicHeader: OVR.h
+Filename : Util_Interface.h
+Content : Simple interface, utilised by internal demos,
+ with access to wider SDK as needed.
+ Located in the body of the SDK to ensure updated
+ when new SDK features are added.
+Created : February 20, 2014
+Authors : Tom Heath
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef OVR_Util_Interface_h
+#define OVR_Util_Interface_h
+#include "../../Src/OVR_CAPI.h"
+
+//Files left in to ease its possible return......
+
+#endif
diff --git a/LibOVR/Src/Util/Util_LatencyTest.cpp b/LibOVR/Src/Util/Util_LatencyTest.cpp
new file mode 100644
index 0000000..3017c72
--- /dev/null
+++ b/LibOVR/Src/Util/Util_LatencyTest.cpp
@@ -0,0 +1,570 @@
+/************************************************************************************
+
+Filename : Util_LatencyTest.cpp
+Content : Wraps the lower level LatencyTester interface and adds functionality.
+Created : February 14, 2013
+Authors : Lee Cooper
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#include "Util_LatencyTest.h"
+
+#include "../Kernel/OVR_Log.h"
+#include "../Kernel/OVR_Timer.h"
+
+namespace OVR { namespace Util {
+
+static const UInt32 TIME_TO_WAIT_FOR_SETTLE_PRE_CALIBRATION = 16*10;
+static const UInt32 TIME_TO_WAIT_FOR_SETTLE_POST_CALIBRATION = 16*10;
+static const UInt32 TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT = 16*5;
+static const UInt32 TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT_RANDOMNESS = 16*5;
+static const UInt32 DEFAULT_NUMBER_OF_SAMPLES = 10; // For both color 1->2 and color 2->1 transitions.
+static const UInt32 INITIAL_SAMPLES_TO_IGNORE = 4;
+static const UInt32 TIMEOUT_WAITING_FOR_TEST_STARTED = 1000;
+static const UInt32 TIMEOUT_WAITING_FOR_COLOR_DETECTED = 4000;
+static const Color CALIBRATE_BLACK(0, 0, 0);
+static const Color CALIBRATE_WHITE(255, 255, 255);
+static const Color COLOR1(0, 0, 0);
+static const Color COLOR2(255, 255, 255);
+static const Color SENSOR_DETECT_THRESHOLD(128, 255, 255);
+static const float BIG_FLOAT = 1000000.0f;
+static const float SMALL_FLOAT = -1000000.0f;
+
+//-------------------------------------------------------------------------------------
+// ***** LatencyTest
+
+LatencyTest::LatencyTest(LatencyTestDevice* device)
+ : Handler(getThis())
+{
+ if (device != NULL)
+ {
+ SetDevice(device);
+ }
+
+ reset();
+
+ srand(Timer::GetTicksMs());
+}
+
+LatencyTest::~LatencyTest()
+{
+ clearMeasurementResults();
+}
+
+bool LatencyTest::SetDevice(LatencyTestDevice* device)
+{
+
+ if (device != Device)
+ {
+ Handler.RemoveHandlerFromDevices();
+
+ Device = device;
+
+ if (Device != NULL)
+ {
+ Device->AddMessageHandler(&Handler);
+
+ // Set trigger threshold.
+ LatencyTestConfiguration configuration(SENSOR_DETECT_THRESHOLD, false); // No samples streaming.
+ Device->SetConfiguration(configuration, true);
+
+ // Set display to initial (3 dashes).
+ LatencyTestDisplay ltd(2, 0x40400040);
+ Device->SetDisplay(ltd);
+ }
+ }
+
+ return true;
+}
+
+UInt32 LatencyTest::getRandomComponent(UInt32 range)
+{
+ UInt32 val = rand() % range;
+ return val;
+}
+
+void LatencyTest::BeginTest()
+{
+ if (State == State_WaitingForButton)
+ {
+ // Set color to black and wait a while.
+ RenderColor = CALIBRATE_BLACK;
+
+ State = State_WaitingForSettlePreCalibrationColorBlack;
+ OVR_DEBUG_LOG(("State_WaitingForButton -> State_WaitingForSettlePreCalibrationColorBlack."));
+
+ setTimer(TIME_TO_WAIT_FOR_SETTLE_PRE_CALIBRATION);
+ }
+}
+
+void LatencyTest::handleMessage(const Message& msg, LatencyTestMessageType latencyTestMessage)
+{
+ // For debugging.
+/* if (msg.Type == Message_LatencyTestSamples)
+ {
+ MessageLatencyTestSamples* pSamples = (MessageLatencyTestSamples*) &msg;
+
+ if (pSamples->Samples.GetSize() > 0)
+ {
+ // Just show the first one for now.
+ Color c = pSamples->Samples[0];
+ OVR_DEBUG_LOG(("%d %d %d", c.R, c.G, c.B));
+ }
+ return;
+ }
+*/
+
+ if (latencyTestMessage == LatencyTest_Timer)
+ {
+ if (!Device)
+ {
+ reset();
+ return;
+ }
+
+ if (State == State_WaitingForSettlePreCalibrationColorBlack)
+ {
+ // Send calibrate message to device and wait a while.
+ Device->SetCalibrate(CALIBRATE_BLACK);
+
+ State = State_WaitingForSettlePostCalibrationColorBlack;
+ OVR_DEBUG_LOG(("State_WaitingForSettlePreCalibrationColorBlack -> State_WaitingForSettlePostCalibrationColorBlack."));
+
+ setTimer(TIME_TO_WAIT_FOR_SETTLE_POST_CALIBRATION);
+ }
+ else if (State == State_WaitingForSettlePostCalibrationColorBlack)
+ {
+ // Change color to white and wait a while.
+ RenderColor = CALIBRATE_WHITE;
+
+ State = State_WaitingForSettlePreCalibrationColorWhite;
+ OVR_DEBUG_LOG(("State_WaitingForSettlePostCalibrationColorBlack -> State_WaitingForSettlePreCalibrationColorWhite."));
+
+ setTimer(TIME_TO_WAIT_FOR_SETTLE_PRE_CALIBRATION);
+ }
+ else if (State == State_WaitingForSettlePreCalibrationColorWhite)
+ {
+ // Send calibrate message to device and wait a while.
+ Device->SetCalibrate(CALIBRATE_WHITE);
+
+ State = State_WaitingForSettlePostCalibrationColorWhite;
+ OVR_DEBUG_LOG(("State_WaitingForSettlePreCalibrationColorWhite -> State_WaitingForSettlePostCalibrationColorWhite."));
+
+ setTimer(TIME_TO_WAIT_FOR_SETTLE_POST_CALIBRATION);
+ }
+ else if (State == State_WaitingForSettlePostCalibrationColorWhite)
+ {
+ // Calibration is done. Switch to color 1 and wait for it to settle.
+ RenderColor = COLOR1;
+
+ State = State_WaitingForSettlePostMeasurement;
+ OVR_DEBUG_LOG(("State_WaitingForSettlePostCalibrationColorWhite -> State_WaitingForSettlePostMeasurement."));
+
+ UInt32 waitTime = TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT + getRandomComponent(TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT_RANDOMNESS);
+ setTimer(waitTime);
+ }
+ else if (State == State_WaitingForSettlePostMeasurement)
+ {
+ // Prepare for next measurement.
+
+ // Create a new result object.
+ MeasurementResult* pResult = new MeasurementResult();
+ Results.PushBack(pResult);
+
+ State = State_WaitingToTakeMeasurement;
+ OVR_DEBUG_LOG(("State_WaitingForSettlePostMeasurement -> State_WaitingToTakeMeasurement."));
+ }
+ else if (State == State_WaitingForTestStarted)
+ {
+ // We timed out waiting for 'TestStarted'. Abandon this measurement and setup for the next.
+ getActiveResult()->TimedOutWaitingForTestStarted = true;
+
+ State = State_WaitingForSettlePostMeasurement;
+ OVR_DEBUG_LOG(("** Timed out waiting for 'TestStarted'."));
+ OVR_DEBUG_LOG(("State_WaitingForTestStarted -> State_WaitingForSettlePostMeasurement."));
+
+ UInt32 waitTime = TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT + getRandomComponent(TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT_RANDOMNESS);
+ setTimer(waitTime);
+ }
+ else if (State == State_WaitingForColorDetected)
+ {
+ // We timed out waiting for 'ColorDetected'. Abandon this measurement and setup for the next.
+ getActiveResult()->TimedOutWaitingForColorDetected = true;
+
+ State = State_WaitingForSettlePostMeasurement;
+ OVR_DEBUG_LOG(("** Timed out waiting for 'ColorDetected'."));
+ OVR_DEBUG_LOG(("State_WaitingForColorDetected -> State_WaitingForSettlePostMeasurement."));
+
+ UInt32 waitTime = TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT + getRandomComponent(TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT_RANDOMNESS);
+ setTimer(waitTime);
+ }
+ }
+ else if (latencyTestMessage == LatencyTest_ProcessInputs)
+ {
+ if (State == State_WaitingToTakeMeasurement)
+ {
+ if (!Device)
+ {
+ reset();
+ return;
+ }
+
+ // Send 'StartTest' feature report with opposite target color.
+ if (RenderColor == COLOR1)
+ {
+ RenderColor = COLOR2;
+ }
+ else
+ {
+ RenderColor = COLOR1;
+ }
+
+ getActiveResult()->TargetColor = RenderColor;
+
+ // Record time so we can determine usb roundtrip time.
+ getActiveResult()->StartTestSeconds = Timer::GetSeconds();
+
+ Device->SetStartTest(RenderColor);
+
+ State = State_WaitingForTestStarted;
+ OVR_DEBUG_LOG(("State_WaitingToTakeMeasurement -> State_WaitingForTestStarted."));
+
+ setTimer(TIMEOUT_WAITING_FOR_TEST_STARTED);
+
+ LatencyTestDisplay ltd(2, 0x40090040);
+ Device->SetDisplay(ltd);
+ }
+ }
+ else if (msg.Type == Message_LatencyTestButton)
+ {
+ BeginTest();
+ }
+ else if (msg.Type == Message_LatencyTestStarted)
+ {
+ if (State == State_WaitingForTestStarted)
+ {
+ clearTimer();
+
+ // Record time so we can determine usb roundtrip time.
+ getActiveResult()->TestStartedSeconds = Timer::GetSeconds();
+
+ State = State_WaitingForColorDetected;
+ OVR_DEBUG_LOG(("State_WaitingForTestStarted -> State_WaitingForColorDetected."));
+
+ setTimer(TIMEOUT_WAITING_FOR_COLOR_DETECTED);
+ }
+ }
+ else if (msg.Type == Message_LatencyTestColorDetected)
+ {
+ if (State == State_WaitingForColorDetected)
+ {
+ // Record time to detect color.
+ MessageLatencyTestColorDetected* pDetected = (MessageLatencyTestColorDetected*) &msg;
+ UInt16 elapsedTime = pDetected->Elapsed;
+ OVR_DEBUG_LOG(("Time to 'ColorDetected' = %d", elapsedTime));
+
+ getActiveResult()->DeviceMeasuredElapsedMilliS = elapsedTime;
+
+ if (areResultsComplete())
+ {
+ // We're done.
+ processResults();
+ reset();
+ }
+ else
+ {
+ // Run another measurement.
+ State = State_WaitingForSettlePostMeasurement;
+ OVR_DEBUG_LOG(("State_WaitingForColorDetected -> State_WaitingForSettlePostMeasurement."));
+
+ UInt32 waitTime = TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT + getRandomComponent(TIME_TO_WAIT_FOR_SETTLE_POST_MEASUREMENT_RANDOMNESS);
+ setTimer(waitTime);
+
+ LatencyTestDisplay ltd(2, 0x40400040);
+ Device->SetDisplay(ltd);
+ }
+ }
+ }
+ else if (msg.Type == Message_DeviceRemoved)
+ {
+ reset();
+ }
+}
+
+LatencyTest::MeasurementResult* LatencyTest::getActiveResult()
+{
+ OVR_ASSERT(!Results.IsEmpty());
+ return Results.GetLast();
+}
+
+void LatencyTest::setTimer(UInt32 timeMilliS)
+{
+ ActiveTimerMilliS = timeMilliS;
+}
+
+void LatencyTest::clearTimer()
+{
+ ActiveTimerMilliS = 0;
+}
+
+void LatencyTest::reset()
+{
+ clearMeasurementResults();
+ State = State_WaitingForButton;
+
+ HaveOldTime = false;
+ ActiveTimerMilliS = 0;
+}
+
+void LatencyTest::clearMeasurementResults()
+{
+ while(!Results.IsEmpty())
+ {
+ MeasurementResult* pElem = Results.GetFirst();
+ pElem->RemoveNode();
+ delete pElem;
+ }
+}
+
+LatencyTest::LatencyTestHandler::~LatencyTestHandler()
+{
+ RemoveHandlerFromDevices();
+}
+
+void LatencyTest::LatencyTestHandler::OnMessage(const Message& msg)
+{
+ pLatencyTestUtil->handleMessage(msg);
+}
+
+void LatencyTest::ProcessInputs()
+{
+ updateForTimeouts();
+ handleMessage(Message(), LatencyTest_ProcessInputs);
+}
+
+bool LatencyTest::DisplayScreenColor(Color& colorToDisplay)
+{
+ updateForTimeouts();
+
+ if (State == State_WaitingForButton)
+ {
+ return false;
+ }
+
+ colorToDisplay = RenderColor;
+ return true;
+}
+
+const char* LatencyTest::GetResultsString()
+{
+ if (!ResultsString.IsEmpty() && ReturnedResultString != ResultsString.ToCStr())
+ {
+ ReturnedResultString = ResultsString;
+ return ReturnedResultString.ToCStr();
+ }
+
+ return NULL;
+}
+
+bool LatencyTest::areResultsComplete()
+{
+ UInt32 initialMeasurements = 0;
+
+ UInt32 measurements1to2 = 0;
+ UInt32 measurements2to1 = 0;
+
+ MeasurementResult* pCurr = Results.GetFirst();
+ while(true)
+ {
+ // Process.
+ if (!pCurr->TimedOutWaitingForTestStarted &&
+ !pCurr->TimedOutWaitingForColorDetected)
+ {
+ initialMeasurements++;
+
+ if (initialMeasurements > INITIAL_SAMPLES_TO_IGNORE)
+ {
+ if (pCurr->TargetColor == COLOR2)
+ {
+ measurements1to2++;
+ }
+ else
+ {
+ measurements2to1++;
+ }
+ }
+ }
+
+ if (Results.IsLast(pCurr))
+ {
+ break;
+ }
+ pCurr = Results.GetNext(pCurr);
+ }
+
+ if (measurements1to2 >= DEFAULT_NUMBER_OF_SAMPLES &&
+ measurements2to1 >= DEFAULT_NUMBER_OF_SAMPLES)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+void LatencyTest::processResults()
+{
+
+ UInt32 minTime1To2 = UINT_MAX;
+ UInt32 maxTime1To2 = 0;
+ float averageTime1To2 = 0.0f;
+ UInt32 minTime2To1 = UINT_MAX;
+ UInt32 maxTime2To1 = 0;
+ float averageTime2To1 = 0.0f;
+
+ float minUSBTripMilliS = BIG_FLOAT;
+ float maxUSBTripMilliS = SMALL_FLOAT;
+ float averageUSBTripMilliS = 0.0f;
+ UInt32 countUSBTripTime = 0;
+
+ UInt32 measurementsCount = 0;
+ UInt32 measurements1to2 = 0;
+ UInt32 measurements2to1 = 0;
+
+ MeasurementResult* pCurr = Results.GetFirst();
+ UInt32 count = 0;
+ while(true)
+ {
+ count++;
+
+ if (!pCurr->TimedOutWaitingForTestStarted &&
+ !pCurr->TimedOutWaitingForColorDetected)
+ {
+ measurementsCount++;
+
+ if (measurementsCount > INITIAL_SAMPLES_TO_IGNORE)
+ {
+ if (pCurr->TargetColor == COLOR2)
+ {
+ measurements1to2++;
+
+ if (measurements1to2 <= DEFAULT_NUMBER_OF_SAMPLES)
+ {
+ UInt32 elapsed = pCurr->DeviceMeasuredElapsedMilliS;
+
+ minTime1To2 = Alg::Min(elapsed, minTime1To2);
+ maxTime1To2 = Alg::Max(elapsed, maxTime1To2);
+
+ averageTime1To2 += (float) elapsed;
+ }
+ }
+ else
+ {
+ measurements2to1++;
+
+ if (measurements2to1 <= DEFAULT_NUMBER_OF_SAMPLES)
+ {
+ UInt32 elapsed = pCurr->DeviceMeasuredElapsedMilliS;
+
+ minTime2To1 = Alg::Min(elapsed, minTime2To1);
+ maxTime2To1 = Alg::Max(elapsed, maxTime2To1);
+
+ averageTime2To1 += (float) elapsed;
+ }
+ }
+
+ float usbRountripElapsedMilliS = Timer::MsPerSecond * (float) (pCurr->TestStartedSeconds - pCurr->StartTestSeconds);
+ minUSBTripMilliS = Alg::Min(usbRountripElapsedMilliS, minUSBTripMilliS);
+ maxUSBTripMilliS = Alg::Max(usbRountripElapsedMilliS, maxUSBTripMilliS);
+ averageUSBTripMilliS += usbRountripElapsedMilliS;
+ countUSBTripTime++;
+ }
+ }
+
+ if (measurements1to2 >= DEFAULT_NUMBER_OF_SAMPLES &&
+ measurements2to1 >= DEFAULT_NUMBER_OF_SAMPLES)
+ {
+ break;
+ }
+
+ if (Results.IsLast(pCurr))
+ {
+ break;
+ }
+ pCurr = Results.GetNext(pCurr);
+ }
+
+ averageTime1To2 /= (float) DEFAULT_NUMBER_OF_SAMPLES;
+ averageTime2To1 /= (float) DEFAULT_NUMBER_OF_SAMPLES;
+
+ averageUSBTripMilliS /= countUSBTripTime;
+
+ float finalResult = 0.5f * (averageTime1To2 + averageTime2To1);
+ finalResult += averageUSBTripMilliS;
+
+ ResultsString.Clear();
+ ResultsString.AppendFormat("RESULT=%.1f (add half Tracker period) [b->w %d|%.1f|%d] [w->b %d|%.1f|%d] [usb rndtrp %.1f|%.1f|%.1f] [cnt %d] [tmouts %d]",
+ finalResult,
+ minTime1To2, averageTime1To2, maxTime1To2,
+ minTime2To1, averageTime2To1, maxTime2To1,
+ minUSBTripMilliS, averageUSBTripMilliS, maxUSBTripMilliS,
+ DEFAULT_NUMBER_OF_SAMPLES*2, count - measurementsCount);
+
+ // Display result on latency tester display.
+ LatencyTestDisplay ltd(1, (int)finalResult);
+ Device->SetDisplay(ltd);
+}
+
+void LatencyTest::updateForTimeouts()
+{
+ if (!HaveOldTime)
+ {
+ HaveOldTime = true;
+ OldTime = Timer::GetTicksMs();
+ return;
+ }
+
+ UInt32 newTime = Timer::GetTicksMs();
+ UInt32 elapsedMilliS = newTime - OldTime;
+ if (newTime < OldTime)
+ {
+ elapsedMilliS = OldTime - newTime;
+ elapsedMilliS = UINT_MAX - elapsedMilliS;
+ }
+ OldTime = newTime;
+
+ elapsedMilliS = Alg::Min(elapsedMilliS, (UInt32) 100); // Clamp at 100mS in case we're not being called very often.
+
+
+ if (ActiveTimerMilliS == 0)
+ {
+ return;
+ }
+
+ if (elapsedMilliS >= ActiveTimerMilliS)
+ {
+ ActiveTimerMilliS = 0;
+ handleMessage(Message(), LatencyTest_Timer);
+ return;
+ }
+
+ ActiveTimerMilliS -= elapsedMilliS;
+}
+
+}} // namespace OVR::Util
diff --git a/LibOVR/Src/Util/Util_LatencyTest.h b/LibOVR/Src/Util/Util_LatencyTest.h
new file mode 100644
index 0000000..0844603
--- /dev/null
+++ b/LibOVR/Src/Util/Util_LatencyTest.h
@@ -0,0 +1,173 @@
+/************************************************************************************
+
+PublicHeader: OVR.h
+Filename : Util_LatencyTest.h
+Content : Wraps the lower level LatencyTesterDevice and adds functionality.
+Created : February 14, 2013
+Authors : Lee Cooper
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef OVR_Util_LatencyTest_h
+#define OVR_Util_LatencyTest_h
+
+#include "../OVR_Device.h"
+
+#include "../Kernel/OVR_String.h"
+#include "../Kernel/OVR_List.h"
+
+namespace OVR { namespace Util {
+
+
+//-------------------------------------------------------------------------------------
+// ***** LatencyTest
+//
+// LatencyTest utility class wraps the low level LatencyTestDevice and manages the scheduling
+// of a latency test. A single test is composed of a series of individual latency measurements
+// which are used to derive min, max, and an average latency value.
+//
+// Developers are required to call the following methods:
+// SetDevice - Sets the LatencyTestDevice to be used for the tests.
+// ProcessInputs - This should be called at the same place in the code where the game engine
+// reads the headset orientation from LibOVR (typically done by calling
+// 'GetOrientation' on the SensorFusion object). Calling this at the right time
+// enables us to measure the same latency that occurs for headset orientation
+// changes.
+// DisplayScreenColor - The latency tester works by sensing the color of the pixels directly
+// beneath it. The color of these pixels can be set by drawing a small
+// quad at the end of the rendering stage. The quad should be small
+// such that it doesn't significantly impact the rendering of the scene,
+// but large enough to be 'seen' by the sensor. See the SDK
+// documentation for more information.
+// GetResultsString - Call this to get a string containing the most recent results.
+// If the string has already been gotten then NULL will be returned.
+// The string pointer will remain valid until the next time this
+// method is called.
+//
+
+class LatencyTest : public NewOverrideBase
+{
+public:
+ LatencyTest(LatencyTestDevice* device = NULL);
+ ~LatencyTest();
+
+ // Set the Latency Tester device that we'll use to send commands to and receive
+ // notification messages from.
+ bool SetDevice(LatencyTestDevice* device);
+
+ // Returns true if this LatencyTestUtil has a Latency Tester device.
+ bool HasDevice() const
+ { return Handler.IsHandlerInstalled(); }
+
+ void ProcessInputs();
+ bool DisplayScreenColor(Color& colorToDisplay);
+ const char* GetResultsString();
+
+ bool IsMeasuringNow() const { return (State != State_WaitingForButton); }
+
+ // Begin test. Equivalent to pressing the button on the latency tester.
+ void BeginTest();
+
+private:
+ LatencyTest* getThis() { return this; }
+
+ enum LatencyTestMessageType
+ {
+ LatencyTest_None,
+ LatencyTest_Timer,
+ LatencyTest_ProcessInputs,
+ };
+
+ UInt32 getRandomComponent(UInt32 range);
+ void handleMessage(const Message& msg, LatencyTestMessageType latencyTestMessage = LatencyTest_None);
+ void reset();
+ void setTimer(UInt32 timeMilliS);
+ void clearTimer();
+
+ class LatencyTestHandler : public MessageHandler
+ {
+ LatencyTest* pLatencyTestUtil;
+ public:
+ LatencyTestHandler(LatencyTest* latencyTester) : pLatencyTestUtil(latencyTester) { }
+ ~LatencyTestHandler();
+
+ virtual void OnMessage(const Message& msg);
+ };
+
+ bool areResultsComplete();
+ void processResults();
+ void updateForTimeouts();
+
+ Ptr<LatencyTestDevice> Device;
+ LatencyTestHandler Handler;
+
+ enum TesterState
+ {
+ State_WaitingForButton,
+ State_WaitingForSettlePreCalibrationColorBlack,
+ State_WaitingForSettlePostCalibrationColorBlack,
+ State_WaitingForSettlePreCalibrationColorWhite,
+ State_WaitingForSettlePostCalibrationColorWhite,
+ State_WaitingToTakeMeasurement,
+ State_WaitingForTestStarted,
+ State_WaitingForColorDetected,
+ State_WaitingForSettlePostMeasurement
+ };
+ TesterState State;
+
+ bool HaveOldTime;
+ UInt32 OldTime;
+ UInt32 ActiveTimerMilliS;
+
+ Color RenderColor;
+
+ struct MeasurementResult : public ListNode<MeasurementResult>, public NewOverrideBase
+ {
+ MeasurementResult()
+ : DeviceMeasuredElapsedMilliS(0),
+ TimedOutWaitingForTestStarted(false),
+ TimedOutWaitingForColorDetected(false),
+ StartTestSeconds(0.0),
+ TestStartedSeconds(0.0)
+ {}
+
+ Color TargetColor;
+
+ UInt32 DeviceMeasuredElapsedMilliS;
+
+ bool TimedOutWaitingForTestStarted;
+ bool TimedOutWaitingForColorDetected;
+
+ double StartTestSeconds;
+ double TestStartedSeconds;
+ };
+
+ List<MeasurementResult> Results;
+ void clearMeasurementResults();
+
+ MeasurementResult* getActiveResult();
+
+ StringBuffer ResultsString;
+ String ReturnedResultString;
+};
+
+}} // namespace OVR::Util
+
+#endif // OVR_Util_LatencyTest_h
diff --git a/LibOVR/Src/Util/Util_LatencyTest2.cpp b/LibOVR/Src/Util/Util_LatencyTest2.cpp
new file mode 100644
index 0000000..6fc8b1f
--- /dev/null
+++ b/LibOVR/Src/Util/Util_LatencyTest2.cpp
@@ -0,0 +1,191 @@
+/************************************************************************************
+
+Filename : Util_LatencyTest2.cpp
+Content : Wraps the lower level LatencyTester interface for DK2 and adds functionality.
+Created : March 10, 2014
+Authors : Volga Aksoy
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#include "Util_LatencyTest2.h"
+
+#include "../OVR_CAPI.h"
+#include "../Kernel/OVR_Log.h"
+#include "../Kernel/OVR_Timer.h"
+
+
+namespace OVR { namespace Util {
+
+//-------------------------------------------------------------------------------------
+// ***** LatencyTest2
+
+LatencyTest2::LatencyTest2(SensorDevice* device)
+ : Handler(getThis())
+ , TestActive(false)
+ , StartTiming(-1)
+ , LatencyMeasuredInSeconds(-1)
+ , LastPixelReadMsg(NULL)
+ , RenderColorValue(0)
+ , NumMsgsBeforeSettle(0)
+ , NumTestsSuccessful(0)
+{
+ if (device != NULL)
+ {
+ SetSensorDevice(device);
+ }
+}
+
+LatencyTest2::~LatencyTest2()
+{
+ HmdDevice = NULL;
+ LatencyTesterDev = NULL;
+
+ Handler.RemoveHandlerFromDevices();
+}
+
+bool LatencyTest2::SetSensorDevice(SensorDevice* device)
+{
+ Lock::Locker devLocker(&TesterLock);
+
+ // Enable/Disable pixel read from HMD
+ if (device != HmdDevice)
+ {
+ Handler.RemoveHandlerFromDevices();
+
+ HmdDevice = device;
+
+ if (HmdDevice != NULL)
+ {
+ HmdDevice->AddMessageHandler(&Handler);
+ }
+ }
+
+ return true;
+}
+
+bool LatencyTest2::SetDisplayDevice(LatencyTestDevice* device)
+{
+ Lock::Locker devLocker(&TesterLock);
+
+ if (device != LatencyTesterDev)
+ {
+ LatencyTesterDev = device;
+ if (LatencyTesterDev != NULL)
+ {
+ // Set display to initial (3 dashes).
+ LatencyTestDisplay ltd(2, 0x40400040);
+ LatencyTesterDev->SetDisplay(ltd);
+ }
+ }
+
+ return true;
+}
+
+void LatencyTest2::BeginTest(double startTime)
+{
+ Lock::Locker devLocker(&TesterLock);
+
+ if (!TestActive)
+ {
+ TestActive = true;
+ NumMsgsBeforeSettle = 0;
+
+ // Go to next pixel value
+ //RenderColorValue = (RenderColorValue == 0) ? 255 : 0;
+ RenderColorValue = (RenderColorValue + LT2_ColorIncrement) % 256;
+ RawStartTiming = LastPixelReadMsg.RawSensorTime;
+
+ if (startTime > 0.0)
+ StartTiming = startTime;
+ else
+ StartTiming = ovr_GetTimeInSeconds();
+
+ }
+}
+
+void LatencyTest2::handleMessage(const MessagePixelRead& msg)
+{
+ Lock::Locker devLocker(&TesterLock);
+
+ // Hold onto the last message as we will use this when we start a new test
+ LastPixelReadMsg = msg;
+
+ // If color readback index is valid, store it in the lock-less queue.
+ int readbackIndex = 0;
+ if (FrameTimeRecord::ColorToReadbackIndex(&readbackIndex, msg.PixelReadValue))
+ {
+ RecentFrameSet.AddValue(readbackIndex, msg.FrameTimeSeconds);
+ LockessRecords.SetState(RecentFrameSet);
+ }
+
+ NumMsgsBeforeSettle++;
+
+ if (TestActive)
+ {
+ int pixelValueDiff = RenderColorValue - LastPixelReadMsg.PixelReadValue;
+ int rawTimeDiff = LastPixelReadMsg.RawFrameTime - RawStartTiming;
+
+ if (pixelValueDiff < LT2_PixelTestThreshold && pixelValueDiff > -LT2_PixelTestThreshold)
+ {
+ TestActive = false;
+
+ LatencyMeasuredInSeconds = LastPixelReadMsg.FrameTimeSeconds - StartTiming;
+ RawLatencyMeasured = rawTimeDiff;
+ //LatencyMeasuredInSeconds = RawLatencyMeasured / 1000000.0;
+
+ if(LatencyTesterDev && (NumTestsSuccessful % 5) == 0)
+ {
+ int displayNum = (int)(RawLatencyMeasured / 100.0);
+ //int displayNum = NumMsgsBeforeSettle;
+ //int displayNum = (int)(LatencyMeasuredInSeconds * 1000.0);
+ LatencyTestDisplay ltd(1, displayNum);
+ LatencyTesterDev->SetDisplay(ltd);
+ }
+
+ NumTestsSuccessful++;
+ }
+ else if (TestActive && (rawTimeDiff / 1000) > LT2_TimeoutWaitingForColorDetected)
+ {
+ TestActive = false;
+ LatencyMeasuredInSeconds = -1;
+ }
+ }
+}
+
+LatencyTest2::PixelReadHandler::~PixelReadHandler()
+{
+ RemoveHandlerFromDevices();
+}
+
+void LatencyTest2::PixelReadHandler::OnMessage(const Message& msg)
+{
+ if(msg.Type == Message_PixelRead)
+ pLatencyTestUtil->handleMessage(static_cast<const MessagePixelRead&>(msg));
+}
+
+bool LatencyTest2::DisplayScreenColor(Color& colorToDisplay)
+{
+ Lock::Locker devLocker(&TesterLock);
+ colorToDisplay = Color(RenderColorValue, RenderColorValue, RenderColorValue, 255);
+
+ return TestActive;
+}
+
+}} // namespace OVR::Util
diff --git a/LibOVR/Src/Util/Util_LatencyTest2.h b/LibOVR/Src/Util/Util_LatencyTest2.h
new file mode 100644
index 0000000..61e8477
--- /dev/null
+++ b/LibOVR/Src/Util/Util_LatencyTest2.h
@@ -0,0 +1,238 @@
+/************************************************************************************
+
+PublicHeader: OVR.h
+Filename : Util_LatencyTest2.h
+Content : Wraps the lower level LatencyTester interface for DK2 and adds functionality.
+Created : March 10, 2014
+Authors : Volga Aksoy
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef OVR_Util_LatencyTest2_h
+#define OVR_Util_LatencyTest2_h
+
+#include "../OVR_Device.h"
+
+#include "../Kernel/OVR_String.h"
+#include "../Kernel/OVR_List.h"
+#include "../Kernel/OVR_Lockless.h"
+
+namespace OVR { namespace Util {
+
+
+enum {
+ LT2_ColorIncrement = 32,
+ LT2_PixelTestThreshold = LT2_ColorIncrement / 3,
+ LT2_IncrementCount = 256 / LT2_ColorIncrement,
+ LT2_TimeoutWaitingForColorDetected = 1000 // 1 second
+};
+
+//-------------------------------------------------------------------------------------
+
+// Describes frame scanout time used for latency testing.
+struct FrameTimeRecord
+{
+ int ReadbackIndex;
+ double TimeSeconds;
+
+ // Utility functions to convert color to readBack indices and back.
+ // The purpose of ReadbackIndex is to allow direct comparison by value.
+
+ static bool ColorToReadbackIndex(int *readbackIndex, unsigned char color)
+ {
+ int compareColor = color - LT2_ColorIncrement/2;
+ int index = color / LT2_ColorIncrement; // Use color without subtraction due to rounding.
+ int delta = compareColor - index * LT2_ColorIncrement;
+
+ if ((delta < LT2_PixelTestThreshold) && (delta > -LT2_PixelTestThreshold))
+ {
+ *readbackIndex = index;
+ return true;
+ }
+ return false;
+ }
+
+ static unsigned char ReadbackIndexToColor(int readbackIndex)
+ {
+ OVR_ASSERT(readbackIndex < LT2_IncrementCount);
+ return (unsigned char)(readbackIndex * LT2_ColorIncrement + LT2_ColorIncrement/2);
+ }
+};
+
+// FrameTimeRecordSet is a container holding multiple consecutive frame timing records
+// returned from the lock-less state. Used by FrameTimeManager.
+
+struct FrameTimeRecordSet
+{
+ enum {
+ RecordCount = 4,
+ RecordMask = RecordCount - 1
+ };
+ FrameTimeRecord Records[RecordCount];
+ int NextWriteIndex;
+
+ FrameTimeRecordSet()
+ {
+ NextWriteIndex = 0;
+ memset(this, 0, sizeof(FrameTimeRecordSet));
+ }
+
+ void AddValue(int readValue, double timeSeconds)
+ {
+ Records[NextWriteIndex].ReadbackIndex = readValue;
+ Records[NextWriteIndex].TimeSeconds = timeSeconds;
+ NextWriteIndex ++;
+ if (NextWriteIndex == RecordCount)
+ NextWriteIndex = 0;
+ }
+ // Matching should be done starting from NextWrite index
+ // until wrap-around
+
+ const FrameTimeRecord& operator [] (int i) const
+ {
+ return Records[(NextWriteIndex + i) & RecordMask];
+ }
+
+ const FrameTimeRecord& GetMostRecentFrame()
+ {
+ return Records[(NextWriteIndex - 1) & RecordMask];
+ }
+
+ // Advances I to absolute color index
+ bool FindReadbackIndex(int* i, int readbackIndex) const
+ {
+ for (; *i < RecordCount; (*i)++)
+ {
+ if ((*this)[*i].ReadbackIndex == readbackIndex)
+ return true;
+ }
+ return false;
+ }
+
+ bool IsAllZeroes() const
+ {
+ for (int i = 0; i < RecordCount; i++)
+ if (Records[i].ReadbackIndex != 0)
+ return false;
+ return true;
+ }
+};
+
+
+//-------------------------------------------------------------------------------------
+// ***** LatencyTest2
+//
+// LatencyTest2 utility class wraps the low level SensorDevice and manages the scheduling
+// of a latency test. A single test is composed of a series of individual latency measurements
+// which are used to derive min, max, and an average latency value.
+//
+// Developers are required to call the following methods:
+// SetDevice - Sets the SensorDevice to be used for the tests.
+// ProcessInputs - This should be called at the same place in the code where the game engine
+// reads the headset orientation from LibOVR (typically done by calling
+// 'GetOrientation' on the SensorFusion object). Calling this at the right time
+// enables us to measure the same latency that occurs for headset orientation
+// changes.
+// DisplayScreenColor - The latency tester works by sensing the color of the pixels directly
+// beneath it. The color of these pixels can be set by drawing a small
+// quad at the end of the rendering stage. The quad should be small
+// such that it doesn't significantly impact the rendering of the scene,
+// but large enough to be 'seen' by the sensor. See the SDK
+// documentation for more information.
+// GetResultsString - Call this to get a string containing the most recent results.
+// If the string has already been gotten then NULL will be returned.
+// The string pointer will remain valid until the next time this
+// method is called.
+//
+
+class LatencyTest2 : public NewOverrideBase
+{
+public:
+ LatencyTest2(SensorDevice* device = NULL);
+ ~LatencyTest2();
+
+ // Set the Latency Tester device that we'll use to send commands to and receive
+ // notification messages from.
+ bool SetSensorDevice(SensorDevice* device);
+ bool SetDisplayDevice(LatencyTestDevice* device);
+
+ // Returns true if this LatencyTestUtil has a Latency Tester device.
+ bool HasDisplayDevice() const { return LatencyTesterDev.GetPtr() != NULL; }
+ bool HasDevice() const { return Handler.IsHandlerInstalled(); }
+
+ bool DisplayScreenColor(Color& colorToDisplay);
+ //const char* GetResultsString();
+
+ // Begin test. Equivalent to pressing the button on the latency tester.
+ void BeginTest(double startTime = -1.0f);
+ bool IsMeasuringNow() const { return TestActive; }
+ double GetMeasuredLatency() const { return LatencyMeasuredInSeconds; }
+
+//
+ FrameTimeRecordSet GetLocklessState() { return LockessRecords.GetState(); }
+
+private:
+ LatencyTest2* getThis() { return this; }
+
+ enum LatencyTestMessageType
+ {
+ LatencyTest_None,
+ LatencyTest_Timer,
+ LatencyTest_ProcessInputs,
+ };
+
+ void handleMessage(const MessagePixelRead& msg);
+
+ class PixelReadHandler : public MessageHandler
+ {
+ LatencyTest2* pLatencyTestUtil;
+ public:
+ PixelReadHandler(LatencyTest2* latencyTester) : pLatencyTestUtil(latencyTester) { }
+ ~PixelReadHandler();
+
+ virtual void OnMessage(const Message& msg);
+ };
+ PixelReadHandler Handler;
+
+ Ptr<SensorDevice> HmdDevice;
+ Ptr<LatencyTestDevice> LatencyTesterDev;
+
+ Lock TesterLock;
+ bool TestActive;
+ unsigned char RenderColorValue;
+ MessagePixelRead LastPixelReadMsg;
+ double StartTiming;
+ unsigned int RawStartTiming;
+ UInt32 RawLatencyMeasured;
+ double LatencyMeasuredInSeconds;
+ int NumMsgsBeforeSettle;
+ unsigned int NumTestsSuccessful;
+
+ // MA:
+ // Frames are added here, then copied into lockess state
+ FrameTimeRecordSet RecentFrameSet;
+ LocklessUpdater<FrameTimeRecordSet> LockessRecords;
+};
+
+
+
+}} // namespace OVR::Util
+
+#endif // OVR_Util_LatencyTest2_h
diff --git a/LibOVR/Src/Util/Util_Render_Stereo.cpp b/LibOVR/Src/Util/Util_Render_Stereo.cpp
new file mode 100644
index 0000000..e84381e
--- /dev/null
+++ b/LibOVR/Src/Util/Util_Render_Stereo.cpp
@@ -0,0 +1,1472 @@
+/************************************************************************************
+
+Filename : Util_Render_Stereo.cpp
+Content : Stereo rendering configuration implementation
+Created : October 22, 2012
+Authors : Michael Antonov, Andrew Reisse, Tom Forsyth
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#include "Util_Render_Stereo.h"
+#include "../OVR_SensorFusion.h"
+
+namespace OVR { namespace Util { namespace Render {
+
+
+//-----------------------------------------------------------------------------------
+// **** Useful debug functions.
+
+char const* GetDebugNameEyeCupType ( EyeCupType eyeCupType )
+{
+ switch ( eyeCupType )
+ {
+ case EyeCup_DK1A: return "DK1 A"; break;
+ case EyeCup_DK1B: return "DK1 B"; break;
+ case EyeCup_DK1C: return "DK1 C"; break;
+ case EyeCup_DKHD2A: return "DKHD2 A"; break;
+ case EyeCup_OrangeA: return "Orange A"; break;
+ case EyeCup_RedA: return "Red A"; break;
+ case EyeCup_PinkA: return "Pink A"; break;
+ case EyeCup_BlueA: return "Blue A"; break;
+ case EyeCup_Delilah1A: return "Delilah 1 A"; break;
+ case EyeCup_Delilah2A: return "Delilah 2 A"; break;
+ case EyeCup_JamesA: return "James A"; break;
+ case EyeCup_SunMandalaA: return "Sun Mandala A"; break;
+ case EyeCup_DK2A: return "DK2 A"; break;
+ case EyeCup_LAST: return "LAST"; break;
+ default: OVR_ASSERT ( false ); return "Error"; break;
+ }
+}
+
+char const* GetDebugNameHmdType ( HmdTypeEnum hmdType )
+{
+ switch ( hmdType )
+ {
+ case HmdType_None: return "None"; break;
+ case HmdType_DK1: return "DK1"; break;
+ case HmdType_DKProto: return "DK1 prototype"; break;
+ case HmdType_DKHDProto: return "DK HD prototype 1"; break;
+ case HmdType_DKHDProto566Mi: return "DK HD prototype 566 Mi"; break;
+ case HmdType_DKHD2Proto: return "DK HD prototype 585"; break;
+ case HmdType_CrystalCoveProto: return "Crystal Cove"; break;
+ case HmdType_DK2: return "DK2"; break;
+ case HmdType_Unknown: return "Unknown"; break;
+ case HmdType_LAST: return "LAST"; break;
+ default: OVR_ASSERT ( false ); return "Error"; break;
+ }
+}
+
+
+//-----------------------------------------------------------------------------------
+// **** Internal pipeline functions.
+
+struct DistortionAndFov
+{
+ DistortionRenderDesc Distortion;
+ FovPort Fov;
+};
+
+static DistortionAndFov CalculateDistortionAndFovInternal ( StereoEye eyeType, HmdRenderInfo const &hmd,
+ LensConfig const *pLensOverride = NULL,
+ FovPort const *pTanHalfFovOverride = NULL,
+ float extraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION )
+{
+ // pLensOverride can be NULL, which means no override.
+
+ DistortionRenderDesc localDistortion = CalculateDistortionRenderDesc ( eyeType, hmd, pLensOverride );
+ FovPort fov = CalculateFovFromHmdInfo ( eyeType, localDistortion, hmd, extraEyeRotationInRadians );
+ // Here the app or the user would optionally clamp this visible fov to a smaller number if
+ // they want more perf or resolution and are willing to give up FOV.
+ // They may also choose to clamp UDLR differently e.g. to get cinemascope-style views.
+ if ( pTanHalfFovOverride != NULL )
+ {
+ fov = *pTanHalfFovOverride;
+ }
+
+ // Here we could call ClampToPhysicalScreenFov(), but we do want people
+ // to be able to play with larger-than-screen views.
+ // The calling app can always do the clamping itself.
+ DistortionAndFov result;
+ result.Distortion = localDistortion;
+ result.Fov = fov;
+
+ return result;
+}
+
+
+static Recti CalculateViewportInternal ( StereoEye eyeType,
+ Sizei const actualRendertargetSurfaceSize,
+ Sizei const requestedRenderedPixelSize,
+ bool bRendertargetSharedByBothEyes,
+ bool bMonoRenderingMode = false )
+{
+ Recti renderedViewport;
+ if ( bMonoRenderingMode || !bRendertargetSharedByBothEyes || (eyeType == StereoEye_Center) )
+ {
+ // One eye per RT.
+ renderedViewport.x = 0;
+ renderedViewport.y = 0;
+ renderedViewport.w = Alg::Min ( actualRendertargetSurfaceSize.w, requestedRenderedPixelSize.w );
+ renderedViewport.h = Alg::Min ( actualRendertargetSurfaceSize.h, requestedRenderedPixelSize.h );
+ }
+ else
+ {
+ // Both eyes share the RT.
+ renderedViewport.x = 0;
+ renderedViewport.y = 0;
+ renderedViewport.w = Alg::Min ( actualRendertargetSurfaceSize.w/2, requestedRenderedPixelSize.w );
+ renderedViewport.h = Alg::Min ( actualRendertargetSurfaceSize.h, requestedRenderedPixelSize.h );
+ if ( eyeType == StereoEye_Right )
+ {
+ renderedViewport.x = (actualRendertargetSurfaceSize.w+1)/2; // Round up, not down.
+ }
+ }
+ return renderedViewport;
+}
+
+static Recti CalculateViewportDensityInternal ( StereoEye eyeType,
+ DistortionRenderDesc const &distortion,
+ FovPort const &fov,
+ Sizei const &actualRendertargetSurfaceSize,
+ bool bRendertargetSharedByBothEyes,
+ float desiredPixelDensity = 1.0f,
+ bool bMonoRenderingMode = false )
+{
+ OVR_ASSERT ( actualRendertargetSurfaceSize.w > 0 );
+ OVR_ASSERT ( actualRendertargetSurfaceSize.h > 0 );
+
+ // What size RT do we need to get 1:1 mapping?
+ Sizei idealPixelSize = CalculateIdealPixelSize ( eyeType, distortion, fov, desiredPixelDensity );
+ // ...but we might not actually get that size.
+ return CalculateViewportInternal ( eyeType,
+ actualRendertargetSurfaceSize,
+ idealPixelSize,
+ bRendertargetSharedByBothEyes, bMonoRenderingMode );
+}
+
+static ViewportScaleAndOffset CalculateViewportScaleAndOffsetInternal (
+ ScaleAndOffset2D const &eyeToSourceNDC,
+ Recti const &renderedViewport,
+ Sizei const &actualRendertargetSurfaceSize )
+{
+ ViewportScaleAndOffset result;
+ result.RenderedViewport = renderedViewport;
+ result.EyeToSourceUV = CreateUVScaleAndOffsetfromNDCScaleandOffset(
+ eyeToSourceNDC, renderedViewport, actualRendertargetSurfaceSize );
+ return result;
+}
+
+
+static StereoEyeParams CalculateStereoEyeParamsInternal ( StereoEye eyeType, HmdRenderInfo const &hmd,
+ DistortionRenderDesc const &distortion,
+ FovPort const &fov,
+ Sizei const &actualRendertargetSurfaceSize,
+ Recti const &renderedViewport,
+ bool bRightHanded = true, float zNear = 0.01f, float zFar = 10000.0f,
+ bool bMonoRenderingMode = false,
+ float zoomFactor = 1.0f )
+{
+ // Generate the projection matrix for intermediate rendertarget.
+ // Z range can also be inserted later by the app (though not in this particular case)
+ float fovScale = 1.0f / zoomFactor;
+ FovPort zoomedFov = fov;
+ zoomedFov.LeftTan *= fovScale;
+ zoomedFov.RightTan *= fovScale;
+ zoomedFov.UpTan *= fovScale;
+ zoomedFov.DownTan *= fovScale;
+ Matrix4f projection = CreateProjection ( bRightHanded, zoomedFov, zNear, zFar );
+
+ // Find the mapping from TanAngle space to target NDC space.
+ // Note this does NOT take the zoom factor into account because
+ // this is the mapping of actual physical eye FOV (and our eyes do not zoom!)
+ // to screen space.
+ ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov ( fov );
+
+ // The size of the final FB, which is fixed and determined by the physical size of the device display.
+ Recti distortedViewport = GetFramebufferViewport ( eyeType, hmd );
+ Vector3f virtualCameraOffset = CalculateEyeVirtualCameraOffset(hmd, eyeType, bMonoRenderingMode);
+
+ StereoEyeParams result;
+ result.Eye = eyeType;
+ result.ViewAdjust = Matrix4f::Translation(virtualCameraOffset);
+ result.Distortion = distortion;
+ result.DistortionViewport = distortedViewport;
+ result.Fov = fov;
+ result.RenderedProjection = projection;
+ result.EyeToSourceNDC = eyeToSourceNDC;
+ ViewportScaleAndOffset vsao = CalculateViewportScaleAndOffsetInternal ( eyeToSourceNDC, renderedViewport, actualRendertargetSurfaceSize );
+ result.RenderedViewport = vsao.RenderedViewport;
+ result.EyeToSourceUV = vsao.EyeToSourceUV;
+
+ return result;
+}
+
+
+Vector3f CalculateEyeVirtualCameraOffset(HmdRenderInfo const &hmd,
+ StereoEye eyeType, bool bmonoRenderingMode)
+{
+ Vector3f virtualCameraOffset(0);
+
+ if (!bmonoRenderingMode)
+ {
+ float eyeCenterRelief = hmd.GetEyeCenter().ReliefInMeters;
+
+ if (eyeType == StereoEye_Left)
+ {
+ virtualCameraOffset.x = hmd.EyeLeft.NoseToPupilInMeters;
+ virtualCameraOffset.z = eyeCenterRelief - hmd.EyeLeft.ReliefInMeters;
+ }
+ else if (eyeType == StereoEye_Right)
+ {
+ virtualCameraOffset.x = -hmd.EyeRight.NoseToPupilInMeters;
+ virtualCameraOffset.z = eyeCenterRelief - hmd.EyeRight.ReliefInMeters;
+ }
+ }
+
+ return virtualCameraOffset;
+}
+
+
+//-----------------------------------------------------------------------------------
+// **** Higher-level utility functions.
+
+Sizei CalculateRecommendedTextureSize ( HmdRenderInfo const &hmd,
+ bool bRendertargetSharedByBothEyes,
+ float pixelDensityInCenter /*= 1.0f*/ )
+{
+ Sizei idealPixelSize[2];
+ for ( int eyeNum = 0; eyeNum < 2; eyeNum++ )
+ {
+ StereoEye eyeType = ( eyeNum == 0 ) ? StereoEye_Left : StereoEye_Right;
+
+ DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION );
+
+ idealPixelSize[eyeNum] = CalculateIdealPixelSize ( eyeType,
+ distortionAndFov.Distortion,
+ distortionAndFov.Fov,
+ pixelDensityInCenter );
+ }
+
+ Sizei result;
+ result.w = Alg::Max ( idealPixelSize[0].w, idealPixelSize[1].w );
+ result.h = Alg::Max ( idealPixelSize[0].h, idealPixelSize[1].h );
+ if ( bRendertargetSharedByBothEyes )
+ {
+ result.w *= 2;
+ }
+ return result;
+}
+
+StereoEyeParams CalculateStereoEyeParams ( HmdRenderInfo const &hmd,
+ StereoEye eyeType,
+ Sizei const &actualRendertargetSurfaceSize,
+ bool bRendertargetSharedByBothEyes,
+ bool bRightHanded /*= true*/,
+ float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/,
+ Sizei const *pOverrideRenderedPixelSize /* = NULL*/,
+ FovPort const *pOverrideFovport /*= NULL*/,
+ float zoomFactor /*= 1.0f*/ )
+{
+ DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION );
+ if ( pOverrideFovport != NULL )
+ {
+ distortionAndFov.Fov = *pOverrideFovport;
+ }
+
+ Recti viewport;
+ if ( pOverrideRenderedPixelSize != NULL )
+ {
+ viewport = CalculateViewportInternal ( eyeType, actualRendertargetSurfaceSize, *pOverrideRenderedPixelSize, bRendertargetSharedByBothEyes, false );
+ }
+ else
+ {
+ viewport = CalculateViewportDensityInternal ( eyeType,
+ distortionAndFov.Distortion,
+ distortionAndFov.Fov,
+ actualRendertargetSurfaceSize, bRendertargetSharedByBothEyes, 1.0f, false );
+ }
+
+ return CalculateStereoEyeParamsInternal (
+ eyeType, hmd,
+ distortionAndFov.Distortion,
+ distortionAndFov.Fov,
+ actualRendertargetSurfaceSize, viewport,
+ bRightHanded, zNear, zFar, false, zoomFactor );
+}
+
+
+FovPort CalculateRecommendedFov ( HmdRenderInfo const &hmd,
+ StereoEye eyeType,
+ bool bMakeFovSymmetrical /* = false */ )
+{
+ DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION );
+ FovPort fov = distortionAndFov.Fov;
+ if ( bMakeFovSymmetrical )
+ {
+ // Deal with engines that cannot support an off-center projection.
+ // Unfortunately this means they will be rendering pixels that the user can't actually see.
+ float fovTanH = Alg::Max ( fov.LeftTan, fov.RightTan );
+ float fovTanV = Alg::Max ( fov.UpTan, fov.DownTan );
+ fov.LeftTan = fovTanH;
+ fov.RightTan = fovTanH;
+ fov.UpTan = fovTanV;
+ fov.DownTan = fovTanV;
+ }
+ return fov;
+}
+
+ViewportScaleAndOffset ModifyRenderViewport ( StereoEyeParams const &params,
+ Sizei const &actualRendertargetSurfaceSize,
+ Recti const &renderViewport )
+{
+ return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize );
+}
+
+ViewportScaleAndOffset ModifyRenderSize ( StereoEyeParams const &params,
+ Sizei const &actualRendertargetSurfaceSize,
+ Sizei const &requestedRenderSize,
+ bool bRendertargetSharedByBothEyes /*= false*/ )
+{
+ Recti renderViewport = CalculateViewportInternal ( params.Eye, actualRendertargetSurfaceSize, requestedRenderSize, bRendertargetSharedByBothEyes, false );
+ return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize );
+}
+
+ViewportScaleAndOffset ModifyRenderDensity ( StereoEyeParams const &params,
+ Sizei const &actualRendertargetSurfaceSize,
+ float pixelDensity /*= 1.0f*/,
+ bool bRendertargetSharedByBothEyes /*= false*/ )
+{
+ Recti renderViewport = CalculateViewportDensityInternal ( params.Eye, params.Distortion, params.Fov, actualRendertargetSurfaceSize, bRendertargetSharedByBothEyes, pixelDensity, false );
+ return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize );
+}
+
+
+//-----------------------------------------------------------------------------------
+// **** StereoConfig Implementation
+
+StereoConfig::StereoConfig(StereoMode mode)
+ : Mode(mode),
+ DirtyFlag(true)
+{
+ // Initialize "fake" default HMD values for testing without HMD plugged in.
+ // These default values match those returned by DK1
+ // (at least they did at time of writing - certainly good enough for debugging)
+ Hmd.HmdType = HmdType_None;
+ Hmd.ResolutionInPixels = Sizei(1280, 800);
+ Hmd.ScreenSizeInMeters = Sizef(0.1498f, 0.0936f);
+ Hmd.ScreenGapSizeInMeters = 0.0f;
+ Hmd.CenterFromTopInMeters = 0.0468f;
+ Hmd.LensSeparationInMeters = 0.0635f;
+ Hmd.LensDiameterInMeters = 0.035f;
+ Hmd.LensSurfaceToMidplateInMeters = 0.025f;
+ Hmd.EyeCups = EyeCup_DK1A;
+ Hmd.Shutter.Type = HmdShutter_RollingTopToBottom;
+ Hmd.Shutter.VsyncToNextVsync = ( 1.0f / 60.0f );
+ Hmd.Shutter.VsyncToFirstScanline = 0.000052f;
+ Hmd.Shutter.FirstScanlineToLastScanline = 0.016580f;
+ Hmd.Shutter.PixelSettleTime = 0.015f;
+ Hmd.Shutter.PixelPersistence = ( 1.0f / 60.0f );
+ Hmd.EyeLeft.Distortion.SetToIdentity();
+ Hmd.EyeLeft.Distortion.MetersPerTanAngleAtCenter = 0.043875f;
+ Hmd.EyeLeft.Distortion.Eqn = Distortion_RecipPoly4;
+ Hmd.EyeLeft.Distortion.K[0] = 1.0f;
+ Hmd.EyeLeft.Distortion.K[1] = -0.3999f;
+ Hmd.EyeLeft.Distortion.K[2] = 0.2408f;
+ Hmd.EyeLeft.Distortion.K[3] = -0.4589f;
+ Hmd.EyeLeft.Distortion.MaxR = 1.0f;
+ Hmd.EyeLeft.Distortion.ChromaticAberration[0] = 0.006f;
+ Hmd.EyeLeft.Distortion.ChromaticAberration[1] = 0.0f;
+ Hmd.EyeLeft.Distortion.ChromaticAberration[2] = -0.014f;
+ Hmd.EyeLeft.Distortion.ChromaticAberration[3] = 0.0f;
+ Hmd.EyeLeft.NoseToPupilInMeters = 0.62f;
+ Hmd.EyeLeft.ReliefInMeters = 0.013f;
+ Hmd.EyeRight = Hmd.EyeLeft;
+
+ SetViewportMode = SVPM_Density;
+ SetViewportPixelsPerDisplayPixel = 1.0f;
+ // Not used in this mode, but init them anyway.
+ SetViewportSize[0] = Sizei(0,0);
+ SetViewportSize[1] = Sizei(0,0);
+ SetViewport[0] = Recti(0,0,0,0);
+ SetViewport[1] = Recti(0,0,0,0);
+
+ OverrideLens = false;
+ OverrideTanHalfFov = false;
+ OverrideZeroIpd = false;
+ ExtraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION;
+ IsRendertargetSharedByBothEyes = true;
+ RightHandedProjection = true;
+
+ // This should cause an assert if the app does not call SetRendertargetSize()
+ RendertargetSize = Sizei ( 0, 0 );
+
+ ZNear = 0.01f;
+ ZFar = 10000.0f;
+
+ Set2DAreaFov(DegreeToRad(85.0f));
+}
+
+void StereoConfig::SetHmdRenderInfo(const HmdRenderInfo& hmd)
+{
+ Hmd = hmd;
+ DirtyFlag = true;
+}
+
+void StereoConfig::Set2DAreaFov(float fovRadians)
+{
+ Area2DFov = fovRadians;
+ DirtyFlag = true;
+}
+
+const StereoEyeParamsWithOrtho& StereoConfig::GetEyeRenderParams(StereoEye eye)
+{
+ if ( DirtyFlag )
+ {
+ UpdateComputedState();
+ }
+
+ static const UByte eyeParamIndices[3] = { 0, 0, 1 };
+
+ OVR_ASSERT(eye < sizeof(eyeParamIndices));
+ return EyeRenderParams[eyeParamIndices[eye]];
+}
+
+void StereoConfig::SetLensOverride ( LensConfig const *pLensOverrideLeft /*= NULL*/,
+ LensConfig const *pLensOverrideRight /*= NULL*/ )
+{
+ if ( pLensOverrideLeft == NULL )
+ {
+ OverrideLens = false;
+ }
+ else
+ {
+ OverrideLens = true;
+ LensOverrideLeft = *pLensOverrideLeft;
+ LensOverrideRight = *pLensOverrideLeft;
+ if ( pLensOverrideRight != NULL )
+ {
+ LensOverrideRight = *pLensOverrideRight;
+ }
+ }
+ DirtyFlag = true;
+}
+
+void StereoConfig::SetRendertargetSize (Size<int> const rendertargetSize,
+ bool rendertargetIsSharedByBothEyes )
+{
+ RendertargetSize = rendertargetSize;
+ IsRendertargetSharedByBothEyes = rendertargetIsSharedByBothEyes;
+ DirtyFlag = true;
+}
+
+void StereoConfig::SetFov ( FovPort const *pfovLeft /*= NULL*/,
+ FovPort const *pfovRight /*= NULL*/ )
+{
+ DirtyFlag = true;
+ if ( pfovLeft == NULL )
+ {
+ OverrideTanHalfFov = false;
+ }
+ else
+ {
+ OverrideTanHalfFov = true;
+ FovOverrideLeft = *pfovLeft;
+ FovOverrideRight = *pfovLeft;
+ if ( pfovRight != NULL )
+ {
+ FovOverrideRight = *pfovRight;
+ }
+ }
+}
+
+
+void StereoConfig::SetZeroVirtualIpdOverride ( bool enableOverride )
+{
+ DirtyFlag = true;
+ OverrideZeroIpd = enableOverride;
+}
+
+
+void StereoConfig::SetZClipPlanesAndHandedness ( float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/, bool rightHandedProjection /*= true*/ )
+{
+ DirtyFlag = true;
+ ZNear = zNear;
+ ZFar = zFar;
+ RightHandedProjection = rightHandedProjection;
+}
+
+void StereoConfig::SetExtraEyeRotation ( float extraEyeRotationInRadians )
+{
+ DirtyFlag = true;
+ ExtraEyeRotationInRadians = extraEyeRotationInRadians;
+}
+
+Sizei StereoConfig::CalculateRecommendedTextureSize ( bool rendertargetSharedByBothEyes,
+ float pixelDensityInCenter /*= 1.0f*/ )
+{
+ return Render::CalculateRecommendedTextureSize ( Hmd, rendertargetSharedByBothEyes, pixelDensityInCenter );
+}
+
+
+
+void StereoConfig::UpdateComputedState()
+{
+ int numEyes = 2;
+ StereoEye eyeTypes[2];
+
+ switch ( Mode )
+ {
+ case Stereo_None:
+ numEyes = 1;
+ eyeTypes[0] = StereoEye_Center;
+ break;
+
+ case Stereo_LeftRight_Multipass:
+ numEyes = 2;
+ eyeTypes[0] = StereoEye_Left;
+ eyeTypes[1] = StereoEye_Right;
+ break;
+
+ default:
+ OVR_ASSERT( false ); break;
+ }
+
+ // If either of these fire, you've probably forgotten to call SetRendertargetSize()
+ OVR_ASSERT ( RendertargetSize.w > 0 );
+ OVR_ASSERT ( RendertargetSize.h > 0 );
+
+ for ( int eyeNum = 0; eyeNum < numEyes; eyeNum++ )
+ {
+ StereoEye eyeType = eyeTypes[eyeNum];
+ LensConfig *pLensOverride = NULL;
+ if ( OverrideLens )
+ {
+ if ( eyeType == StereoEye_Right )
+ {
+ pLensOverride = &LensOverrideRight;
+ }
+ else
+ {
+ pLensOverride = &LensOverrideLeft;
+ }
+ }
+
+ FovPort *pTanHalfFovOverride = NULL;
+ if ( OverrideTanHalfFov )
+ {
+ if ( eyeType == StereoEye_Right )
+ {
+ pTanHalfFovOverride = &FovOverrideRight;
+ }
+ else
+ {
+ pTanHalfFovOverride = &FovOverrideLeft;
+ }
+ }
+
+ DistortionAndFov distortionAndFov =
+ CalculateDistortionAndFovInternal ( eyeType, Hmd,
+ pLensOverride, pTanHalfFovOverride,
+ ExtraEyeRotationInRadians );
+
+ EyeRenderParams[eyeNum].StereoEye.Distortion = distortionAndFov.Distortion;
+ EyeRenderParams[eyeNum].StereoEye.Fov = distortionAndFov.Fov;
+ }
+
+ if ( OverrideZeroIpd )
+ {
+ // Take the union of the calculated eye FOVs.
+ FovPort fov;
+ fov.UpTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.UpTan , EyeRenderParams[1].StereoEye.Fov.UpTan );
+ fov.DownTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.DownTan , EyeRenderParams[1].StereoEye.Fov.DownTan );
+ fov.LeftTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.LeftTan , EyeRenderParams[1].StereoEye.Fov.LeftTan );
+ fov.RightTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.RightTan, EyeRenderParams[1].StereoEye.Fov.RightTan );
+ EyeRenderParams[0].StereoEye.Fov = fov;
+ EyeRenderParams[1].StereoEye.Fov = fov;
+ }
+
+ for ( int eyeNum = 0; eyeNum < numEyes; eyeNum++ )
+ {
+ StereoEye eyeType = eyeTypes[eyeNum];
+
+ DistortionRenderDesc localDistortion = EyeRenderParams[eyeNum].StereoEye.Distortion;
+ FovPort fov = EyeRenderParams[eyeNum].StereoEye.Fov;
+
+ // Use a placeholder - will be overridden later.
+ Recti tempViewport = Recti ( 0, 0, 1, 1 );
+
+ EyeRenderParams[eyeNum].StereoEye = CalculateStereoEyeParamsInternal (
+ eyeType, Hmd, localDistortion, fov,
+ RendertargetSize, tempViewport,
+ RightHandedProjection, ZNear, ZFar,
+ OverrideZeroIpd );
+
+ // We want to create a virtual 2D surface we can draw debug text messages to.
+ // We'd like it to be a fixed distance (OrthoDistance) away,
+ // and to cover a specific FOV (Area2DFov). We need to find the projection matrix for this,
+ // and also to know how large it is in pixels to achieve a 1:1 mapping at the center of the screen.
+ float orthoDistance = 0.8f;
+ float orthoHalfFov = tanf ( Area2DFov * 0.5f );
+ Vector2f unityOrthoPixelSize = localDistortion.PixelsPerTanAngleAtCenter * ( orthoHalfFov * 2.0f );
+ float localInterpupillaryDistance = Hmd.EyeLeft.NoseToPupilInMeters + Hmd.EyeRight.NoseToPupilInMeters;
+ if ( OverrideZeroIpd )
+ {
+ localInterpupillaryDistance = 0.0f;
+ }
+ Matrix4f ortho = CreateOrthoSubProjection ( true, eyeType,
+ orthoHalfFov, orthoHalfFov,
+ unityOrthoPixelSize.x, unityOrthoPixelSize.y,
+ orthoDistance, localInterpupillaryDistance,
+ EyeRenderParams[eyeNum].StereoEye.RenderedProjection );
+ EyeRenderParams[eyeNum].OrthoProjection = ortho;
+ }
+
+ // ...and now set up the viewport, scale & offset the way the app wanted.
+ setupViewportScaleAndOffsets();
+
+ if ( OverrideZeroIpd )
+ {
+ // Monocular rendering has some fragile parts... don't break any by accident.
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.UpTan == EyeRenderParams[1].StereoEye.Fov.UpTan );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.DownTan == EyeRenderParams[1].StereoEye.Fov.DownTan );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.LeftTan == EyeRenderParams[1].StereoEye.Fov.LeftTan );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.RightTan == EyeRenderParams[1].StereoEye.Fov.RightTan );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[0][0] == EyeRenderParams[1].StereoEye.RenderedProjection.M[0][0] );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[1][1] == EyeRenderParams[1].StereoEye.RenderedProjection.M[1][1] );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[0][2] == EyeRenderParams[1].StereoEye.RenderedProjection.M[0][2] );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[1][2] == EyeRenderParams[1].StereoEye.RenderedProjection.M[1][2] );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedViewport == EyeRenderParams[1].StereoEye.RenderedViewport );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceUV.Offset == EyeRenderParams[1].StereoEye.EyeToSourceUV.Offset );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceUV.Scale == EyeRenderParams[1].StereoEye.EyeToSourceUV.Scale );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceNDC.Offset == EyeRenderParams[1].StereoEye.EyeToSourceNDC.Offset );
+ OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceNDC.Scale == EyeRenderParams[1].StereoEye.EyeToSourceNDC.Scale );
+ OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[0][0] == EyeRenderParams[1].OrthoProjection.M[0][0] );
+ OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[1][1] == EyeRenderParams[1].OrthoProjection.M[1][1] );
+ OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[0][2] == EyeRenderParams[1].OrthoProjection.M[0][2] );
+ OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[1][2] == EyeRenderParams[1].OrthoProjection.M[1][2] );
+ }
+
+ DirtyFlag = false;
+}
+
+
+
+ViewportScaleAndOffsetBothEyes StereoConfig::setupViewportScaleAndOffsets()
+{
+ for ( int eyeNum = 0; eyeNum < 2; eyeNum++ )
+ {
+ StereoEye eyeType = ( eyeNum == 0 ) ? StereoEye_Left : StereoEye_Right;
+
+ DistortionRenderDesc localDistortion = EyeRenderParams[eyeNum].StereoEye.Distortion;
+ FovPort fov = EyeRenderParams[eyeNum].StereoEye.Fov;
+
+ Recti renderedViewport;
+ switch ( SetViewportMode )
+ {
+ case SVPM_Density:
+ renderedViewport = CalculateViewportDensityInternal (
+ eyeType, localDistortion, fov,
+ RendertargetSize, IsRendertargetSharedByBothEyes,
+ SetViewportPixelsPerDisplayPixel, OverrideZeroIpd );
+ break;
+ case SVPM_Size:
+ if ( ( eyeType == StereoEye_Right ) && !OverrideZeroIpd )
+ {
+ renderedViewport = CalculateViewportInternal (
+ eyeType, RendertargetSize,
+ SetViewportSize[1],
+ IsRendertargetSharedByBothEyes, OverrideZeroIpd );
+ }
+ else
+ {
+ renderedViewport = CalculateViewportInternal (
+ eyeType, RendertargetSize,
+ SetViewportSize[0],
+ IsRendertargetSharedByBothEyes, OverrideZeroIpd );
+ }
+ break;
+ case SVPM_Viewport:
+ if ( ( eyeType == StereoEye_Right ) && !OverrideZeroIpd )
+ {
+ renderedViewport = SetViewport[1];
+ }
+ else
+ {
+ renderedViewport = SetViewport[0];
+ }
+ break;
+ default: OVR_ASSERT ( false ); break;
+ }
+
+ ViewportScaleAndOffset vpsao = CalculateViewportScaleAndOffsetInternal (
+ EyeRenderParams[eyeNum].StereoEye.EyeToSourceNDC,
+ renderedViewport,
+ RendertargetSize );
+ EyeRenderParams[eyeNum].StereoEye.RenderedViewport = vpsao.RenderedViewport;
+ EyeRenderParams[eyeNum].StereoEye.EyeToSourceUV = vpsao.EyeToSourceUV;
+ }
+
+ ViewportScaleAndOffsetBothEyes result;
+ result.Left.EyeToSourceUV = EyeRenderParams[0].StereoEye.EyeToSourceUV;
+ result.Left.RenderedViewport = EyeRenderParams[0].StereoEye.RenderedViewport;
+ result.Right.EyeToSourceUV = EyeRenderParams[1].StereoEye.EyeToSourceUV;
+ result.Right.RenderedViewport = EyeRenderParams[1].StereoEye.RenderedViewport;
+ return result;
+}
+
+// Specify a pixel density - how many rendered pixels per pixel in the physical display.
+ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderDensity ( float pixelsPerDisplayPixel )
+{
+ SetViewportMode = SVPM_Density;
+ SetViewportPixelsPerDisplayPixel = pixelsPerDisplayPixel;
+ return setupViewportScaleAndOffsets();
+}
+
+// Supply the size directly. Will be clamped to the physical rendertarget size.
+ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderSize ( Sizei const &renderSizeLeft, Sizei const &renderSizeRight )
+{
+ SetViewportMode = SVPM_Size;
+ SetViewportSize[0] = renderSizeLeft;
+ SetViewportSize[1] = renderSizeRight;
+ return setupViewportScaleAndOffsets();
+}
+
+// Supply the viewport directly. This is not clamped to the physical rendertarget - careful now!
+ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderViewport ( Recti const &renderViewportLeft, Recti const &renderViewportRight )
+{
+ SetViewportMode = SVPM_Viewport;
+ SetViewport[0] = renderViewportLeft;
+ SetViewport[1] = renderViewportRight;
+ return setupViewportScaleAndOffsets();
+}
+
+Matrix4f StereoConfig::GetProjectionWithZoom ( StereoEye eye, float fovZoom ) const
+{
+ int eyeNum = ( eye == StereoEye_Right ) ? 1 : 0;
+ float fovScale = 1.0f / fovZoom;
+ FovPort fovPort = EyeRenderParams[eyeNum].StereoEye.Fov;
+ fovPort.LeftTan *= fovScale;
+ fovPort.RightTan *= fovScale;
+ fovPort.UpTan *= fovScale;
+ fovPort.DownTan *= fovScale;
+ return CreateProjection ( RightHandedProjection, fovPort, ZNear, ZFar );
+}
+
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Distortion Mesh Rendering
+
+
+// Pow2 for the Morton order to work!
+// 4 is too low - it is easy to see the "wobbles" in the HMD.
+// 5 is realllly close but you can see pixel differences with even/odd frame checking.
+// 6 is indistinguishable on a monitor on even/odd frames.
+static const int DMA_GridSizeLog2 = 6;
+static const int DMA_GridSize = 1<<DMA_GridSizeLog2;
+static const int DMA_NumVertsPerEye = (DMA_GridSize+1)*(DMA_GridSize+1);
+static const int DMA_NumTrisPerEye = (DMA_GridSize)*(DMA_GridSize)*2;
+
+
+
+void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices )
+{
+ OVR_FREE ( pVertices );
+ OVR_FREE ( pTriangleMeshIndices );
+}
+
+void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles,
+ const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo )
+{
+ bool rightEye = ( stereoParams.Eye == StereoEye_Right );
+ int vertexCount = 0;
+ int triangleCount = 0;
+
+ // Generate mesh into allocated data and return result.
+ DistortionMeshCreate(ppVertices, ppTriangleListIndices, &vertexCount, &triangleCount,
+ rightEye, hmdRenderInfo, stereoParams.Distortion, stereoParams.EyeToSourceNDC);
+
+ *pNumVertices = vertexCount;
+ *pNumTriangles = triangleCount;
+}
+
+
+// Generate distortion mesh for a eye.
+void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles,
+ bool rightEye,
+ const HmdRenderInfo &hmdRenderInfo,
+ const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC )
+{
+ *pNumVertices = DMA_NumVertsPerEye;
+ *pNumTriangles = DMA_NumTrisPerEye;
+
+ *ppVertices = (DistortionMeshVertexData*)
+ OVR_ALLOC( sizeof(DistortionMeshVertexData) * (*pNumVertices) );
+ *ppTriangleListIndices = (UInt16*) OVR_ALLOC( sizeof(UInt16) * (*pNumTriangles) * 3 );
+
+ if (!*ppVertices || !*ppTriangleListIndices)
+ {
+ if (*ppVertices)
+ {
+ OVR_FREE(*ppVertices);
+ }
+ if (*ppTriangleListIndices)
+ {
+ OVR_FREE(*ppTriangleListIndices);
+ }
+ *ppVertices = NULL;
+ *ppTriangleListIndices = NULL;
+ *pNumTriangles = 0;
+ *pNumVertices = 0;
+ return;
+ }
+
+ // When does the fade-to-black edge start? Chosen heuristically.
+ const float fadeOutBorderFraction = 0.075f;
+
+
+ // Populate vertex buffer info
+ float xOffset = 0.0f;
+ float uOffset = 0.0f;
+ OVR_UNUSED(uOffset);
+
+ if (rightEye)
+ {
+ xOffset = 1.0f;
+ uOffset = 0.5f;
+ }
+
+ // First pass - build up raw vertex data.
+ DistortionMeshVertexData* pcurVert = *ppVertices;
+
+ for ( int y = 0; y <= DMA_GridSize; y++ )
+ {
+ for ( int x = 0; x <= DMA_GridSize; x++ )
+ {
+
+ Vector2f sourceCoordNDC;
+ // NDC texture coords [-1,+1]
+ sourceCoordNDC.x = 2.0f * ( (float)x / (float)DMA_GridSize ) - 1.0f;
+ sourceCoordNDC.y = 2.0f * ( (float)y / (float)DMA_GridSize ) - 1.0f;
+ Vector2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace ( eyeToSourceNDC, sourceCoordNDC );
+
+ // This is the function that does the really heavy lifting.
+ Vector2f screenNDC = TransformTanFovSpaceToScreenNDC ( distortion, tanEyeAngle, false );
+
+ // We then need RGB UVs. Since chromatic aberration is generated from screen coords, not
+ // directly from texture NDCs, we can't just use tanEyeAngle, we need to go the long way round.
+ Vector2f tanEyeAnglesR, tanEyeAnglesG, tanEyeAnglesB;
+ TransformScreenNDCToTanFovSpaceChroma ( &tanEyeAnglesR, &tanEyeAnglesG, &tanEyeAnglesB,
+ distortion, screenNDC );
+
+ pcurVert->TanEyeAnglesR = tanEyeAnglesR;
+ pcurVert->TanEyeAnglesG = tanEyeAnglesG;
+ pcurVert->TanEyeAnglesB = tanEyeAnglesB;
+
+
+ HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type;
+ switch ( shutterType )
+ {
+ case HmdShutter_Global:
+ pcurVert->TimewarpLerp = 0.0f;
+ break;
+ case HmdShutter_RollingLeftToRight:
+ // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0
+ pcurVert->TimewarpLerp = screenNDC.x * 0.25f + 0.25f;
+ if (rightEye)
+ {
+ pcurVert->TimewarpLerp += 0.5f;
+ }
+ break;
+ case HmdShutter_RollingRightToLeft:
+ // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0
+ pcurVert->TimewarpLerp = 0.75f - screenNDC.x * 0.25f;
+ if (rightEye)
+ {
+ pcurVert->TimewarpLerp -= 0.5f;
+ }
+ break;
+ case HmdShutter_RollingTopToBottom:
+ // Retrace is top to bottom on both eyes at the same time.
+ pcurVert->TimewarpLerp = screenNDC.y * 0.5f + 0.5f;
+ break;
+ default: OVR_ASSERT ( false ); break;
+ }
+
+ // Fade out at texture edges.
+ float edgeFadeIn = ( 1.0f / fadeOutBorderFraction ) *
+ ( 1.0f - Alg::Max ( Alg::Abs ( sourceCoordNDC.x ), Alg::Abs ( sourceCoordNDC.y ) ) );
+ // Also fade out at screen edges.
+ float edgeFadeInScreen = ( 2.0f / fadeOutBorderFraction ) *
+ ( 1.0f - Alg::Max ( Alg::Abs ( screenNDC.x ), Alg::Abs ( screenNDC.y ) ) );
+ edgeFadeIn = Alg::Min ( edgeFadeInScreen, edgeFadeIn );
+
+ // Don't let verts overlap to the other eye.
+ screenNDC.x = Alg::Max ( -1.0f, Alg::Min ( screenNDC.x, 1.0f ) );
+ screenNDC.y = Alg::Max ( -1.0f, Alg::Min ( screenNDC.y, 1.0f ) );
+
+ pcurVert->Shade = Alg::Max ( 0.0f, Alg::Min ( edgeFadeIn, 1.0f ) );
+ pcurVert->ScreenPosNDC.x = 0.5f * screenNDC.x - 0.5f + xOffset;
+ pcurVert->ScreenPosNDC.y = -screenNDC.y;
+
+ pcurVert++;
+ }
+ }
+
+
+ // Populate index buffer info
+ UInt16 *pcurIndex = *ppTriangleListIndices;
+
+ for ( int triNum = 0; triNum < DMA_GridSize * DMA_GridSize; triNum++ )
+ {
+ // Use a Morton order to help locality of FB, texture and vertex cache.
+ // (0.325ms raster order -> 0.257ms Morton order)
+ OVR_ASSERT ( DMA_GridSize <= 256 );
+ int x = ( ( triNum & 0x0001 ) >> 0 ) |
+ ( ( triNum & 0x0004 ) >> 1 ) |
+ ( ( triNum & 0x0010 ) >> 2 ) |
+ ( ( triNum & 0x0040 ) >> 3 ) |
+ ( ( triNum & 0x0100 ) >> 4 ) |
+ ( ( triNum & 0x0400 ) >> 5 ) |
+ ( ( triNum & 0x1000 ) >> 6 ) |
+ ( ( triNum & 0x4000 ) >> 7 );
+ int y = ( ( triNum & 0x0002 ) >> 1 ) |
+ ( ( triNum & 0x0008 ) >> 2 ) |
+ ( ( triNum & 0x0020 ) >> 3 ) |
+ ( ( triNum & 0x0080 ) >> 4 ) |
+ ( ( triNum & 0x0200 ) >> 5 ) |
+ ( ( triNum & 0x0800 ) >> 6 ) |
+ ( ( triNum & 0x2000 ) >> 7 ) |
+ ( ( triNum & 0x8000 ) >> 8 );
+ int FirstVertex = x * (DMA_GridSize+1) + y;
+ // Another twist - we want the top-left and bottom-right quadrants to
+ // have the triangles split one way, the other two split the other.
+ // +---+---+---+---+
+ // | /| /|\ |\ |
+ // | / | / | \ | \ |
+ // |/ |/ | \| \|
+ // +---+---+---+---+
+ // | /| /|\ |\ |
+ // | / | / | \ | \ |
+ // |/ |/ | \| \|
+ // +---+---+---+---+
+ // |\ |\ | /| /|
+ // | \ | \ | / | / |
+ // | \| \|/ |/ |
+ // +---+---+---+---+
+ // |\ |\ | /| /|
+ // | \ | \ | / | / |
+ // | \| \|/ |/ |
+ // +---+---+---+---+
+ // This way triangle edges don't span long distances over the distortion function,
+ // so linear interpolation works better & we can use fewer tris.
+ if ( ( x < DMA_GridSize/2 ) != ( y < DMA_GridSize/2 ) ) // != is logical XOR
+ {
+ *pcurIndex++ = (UInt16)FirstVertex;
+ *pcurIndex++ = (UInt16)FirstVertex+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1;
+
+ *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1);
+ *pcurIndex++ = (UInt16)FirstVertex;
+ }
+ else
+ {
+ *pcurIndex++ = (UInt16)FirstVertex;
+ *pcurIndex++ = (UInt16)FirstVertex+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1);
+
+ *pcurIndex++ = (UInt16)FirstVertex+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1)+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(DMA_GridSize+1);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------------
+// ***** Heightmap Mesh Rendering
+
+
+static const int HMA_GridSizeLog2 = 7;
+static const int HMA_GridSize = 1<<HMA_GridSizeLog2;
+static const int HMA_NumVertsPerEye = (HMA_GridSize+1)*(HMA_GridSize+1);
+static const int HMA_NumTrisPerEye = (HMA_GridSize)*(HMA_GridSize)*2;
+
+
+void HeightmapMeshDestroy ( HeightmapMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices )
+{
+ OVR_FREE ( pVertices );
+ OVR_FREE ( pTriangleMeshIndices );
+}
+
+void HeightmapMeshCreate ( HeightmapMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles,
+ const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo )
+{
+ bool rightEye = ( stereoParams.Eye == StereoEye_Right );
+ int vertexCount = 0;
+ int triangleCount = 0;
+
+ // Generate mesh into allocated data and return result.
+ HeightmapMeshCreate(ppVertices, ppTriangleListIndices, &vertexCount, &triangleCount,
+ rightEye, hmdRenderInfo, stereoParams.EyeToSourceNDC);
+
+ *pNumVertices = vertexCount;
+ *pNumTriangles = triangleCount;
+}
+
+
+// Generate heightmap mesh for one eye.
+void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles, bool rightEye,
+ const HmdRenderInfo &hmdRenderInfo,
+ const ScaleAndOffset2D &eyeToSourceNDC )
+{
+ *pNumVertices = HMA_NumVertsPerEye;
+ *pNumTriangles = HMA_NumTrisPerEye;
+
+ *ppVertices = (HeightmapMeshVertexData*) OVR_ALLOC( sizeof(HeightmapMeshVertexData) * (*pNumVertices) );
+ *ppTriangleListIndices = (UInt16*) OVR_ALLOC( sizeof(UInt16) * (*pNumTriangles) * 3 );
+
+ if (!*ppVertices || !*ppTriangleListIndices)
+ {
+ if (*ppVertices)
+ {
+ OVR_FREE(*ppVertices);
+ }
+ if (*ppTriangleListIndices)
+ {
+ OVR_FREE(*ppTriangleListIndices);
+ }
+ *ppVertices = NULL;
+ *ppTriangleListIndices = NULL;
+ *pNumTriangles = 0;
+ *pNumVertices = 0;
+ return;
+ }
+
+ // Populate vertex buffer info
+ float xOffset = 0.0f;
+ float uOffset = 0.0f;
+
+ if (rightEye)
+ {
+ xOffset = 1.0f;
+ uOffset = 0.5f;
+ }
+
+ // First pass - build up raw vertex data.
+ HeightmapMeshVertexData* pcurVert = *ppVertices;
+
+ for ( int y = 0; y <= HMA_GridSize; y++ )
+ {
+ for ( int x = 0; x <= HMA_GridSize; x++ )
+ {
+ Vector2f sourceCoordNDC;
+ // NDC texture coords [-1,+1]
+ sourceCoordNDC.x = 2.0f * ( (float)x / (float)HMA_GridSize ) - 1.0f;
+ sourceCoordNDC.y = 2.0f * ( (float)y / (float)HMA_GridSize ) - 1.0f;
+ Vector2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace ( eyeToSourceNDC, sourceCoordNDC );
+
+ pcurVert->TanEyeAngles = tanEyeAngle;
+
+ HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type;
+ switch ( shutterType )
+ {
+ case HmdShutter_Global:
+ pcurVert->TimewarpLerp = 0.0f;
+ break;
+ case HmdShutter_RollingLeftToRight:
+ // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0
+ pcurVert->TimewarpLerp = sourceCoordNDC.x * 0.25f + 0.25f;
+ if (rightEye)
+ {
+ pcurVert->TimewarpLerp += 0.5f;
+ }
+ break;
+ case HmdShutter_RollingRightToLeft:
+ // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0
+ pcurVert->TimewarpLerp = 0.75f - sourceCoordNDC.x * 0.25f;
+ if (rightEye)
+ {
+ pcurVert->TimewarpLerp -= 0.5f;
+ }
+ break;
+ case HmdShutter_RollingTopToBottom:
+ // Retrace is top to bottom on both eyes at the same time.
+ pcurVert->TimewarpLerp = sourceCoordNDC.y * 0.5f + 0.5f;
+ break;
+ default: OVR_ASSERT ( false ); break;
+ }
+
+ // Don't let verts overlap to the other eye.
+ //sourceCoordNDC.x = Alg::Max ( -1.0f, Alg::Min ( sourceCoordNDC.x, 1.0f ) );
+ //sourceCoordNDC.y = Alg::Max ( -1.0f, Alg::Min ( sourceCoordNDC.y, 1.0f ) );
+
+ //pcurVert->ScreenPosNDC.x = 0.5f * sourceCoordNDC.x - 0.5f + xOffset;
+ pcurVert->ScreenPosNDC.x = sourceCoordNDC.x;
+ pcurVert->ScreenPosNDC.y = -sourceCoordNDC.y;
+
+ pcurVert++;
+ }
+ }
+
+
+ // Populate index buffer info
+ UInt16 *pcurIndex = *ppTriangleListIndices;
+
+ for ( int triNum = 0; triNum < HMA_GridSize * HMA_GridSize; triNum++ )
+ {
+ // Use a Morton order to help locality of FB, texture and vertex cache.
+ // (0.325ms raster order -> 0.257ms Morton order)
+ OVR_ASSERT ( HMA_GridSize < 256 );
+ int x = ( ( triNum & 0x0001 ) >> 0 ) |
+ ( ( triNum & 0x0004 ) >> 1 ) |
+ ( ( triNum & 0x0010 ) >> 2 ) |
+ ( ( triNum & 0x0040 ) >> 3 ) |
+ ( ( triNum & 0x0100 ) >> 4 ) |
+ ( ( triNum & 0x0400 ) >> 5 ) |
+ ( ( triNum & 0x1000 ) >> 6 ) |
+ ( ( triNum & 0x4000 ) >> 7 );
+ int y = ( ( triNum & 0x0002 ) >> 1 ) |
+ ( ( triNum & 0x0008 ) >> 2 ) |
+ ( ( triNum & 0x0020 ) >> 3 ) |
+ ( ( triNum & 0x0080 ) >> 4 ) |
+ ( ( triNum & 0x0200 ) >> 5 ) |
+ ( ( triNum & 0x0800 ) >> 6 ) |
+ ( ( triNum & 0x2000 ) >> 7 ) |
+ ( ( triNum & 0x8000 ) >> 8 );
+ int FirstVertex = x * (HMA_GridSize+1) + y;
+ // Another twist - we want the top-left and bottom-right quadrants to
+ // have the triangles split one way, the other two split the other.
+ // +---+---+---+---+
+ // | /| /|\ |\ |
+ // | / | / | \ | \ |
+ // |/ |/ | \| \|
+ // +---+---+---+---+
+ // | /| /|\ |\ |
+ // | / | / | \ | \ |
+ // |/ |/ | \| \|
+ // +---+---+---+---+
+ // |\ |\ | /| /|
+ // | \ | \ | / | / |
+ // | \| \|/ |/ |
+ // +---+---+---+---+
+ // |\ |\ | /| /|
+ // | \ | \ | / | / |
+ // | \| \|/ |/ |
+ // +---+---+---+---+
+ // This way triangle edges don't span long distances over the distortion function,
+ // so linear interpolation works better & we can use fewer tris.
+ if ( ( x < HMA_GridSize/2 ) != ( y < HMA_GridSize/2 ) ) // != is logical XOR
+ {
+ *pcurIndex++ = (UInt16)FirstVertex;
+ *pcurIndex++ = (UInt16)FirstVertex+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1)+1;
+
+ *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1)+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1);
+ *pcurIndex++ = (UInt16)FirstVertex;
+ }
+ else
+ {
+ *pcurIndex++ = (UInt16)FirstVertex;
+ *pcurIndex++ = (UInt16)FirstVertex+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1);
+
+ *pcurIndex++ = (UInt16)FirstVertex+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1)+1;
+ *pcurIndex++ = (UInt16)FirstVertex+(HMA_GridSize+1);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------------
+// ***** Prediction and timewarp.
+//
+
+// Calculates the values from the HMD info.
+PredictionValues PredictionGetDeviceValues ( const HmdRenderInfo &hmdRenderInfo,
+ bool withTimewarp /*= true*/,
+ bool withVsync /*= true*/ )
+{
+ PredictionValues result;
+
+ result.WithTimewarp = withTimewarp;
+ result.WithVsync = withVsync;
+
+ // For unclear reasons, most graphics systems add an extra frame of latency
+ // somewhere along the way. In time we'll debug this and figure it out, but
+ // for now this gets prediction a little bit better.
+ const float extraFramesOfBufferingKludge = 1.0f;
+
+ if ( withVsync )
+ {
+ // These are the times from the Present+Flush to when the middle of the scene is "averagely visible" (without timewarp)
+ // So if you had no timewarp, this, plus the time until the next vsync, is how much to predict by.
+ result.PresentFlushToRenderedScene = extraFramesOfBufferingKludge * hmdRenderInfo.Shutter.FirstScanlineToLastScanline;
+ // Predict to the middle of the screen being scanned out.
+ result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.VsyncToFirstScanline + 0.5f * hmdRenderInfo.Shutter.FirstScanlineToLastScanline;
+ // Time for pixels to get half-way to settling.
+ result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelSettleTime * 0.5f;
+ // Predict to half-way through persistence
+ result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelPersistence * 0.5f;
+
+ // The time from the Present+Flush to when the first scanline is "averagely visible".
+ result.PresentFlushToTimewarpStart = extraFramesOfBufferingKludge * hmdRenderInfo.Shutter.FirstScanlineToLastScanline;
+ // Predict to the first line being scanned out.
+ result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.VsyncToFirstScanline;
+ // Time for pixels to get half-way to settling.
+ result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.PixelSettleTime * 0.5f;
+ // Predict to half-way through persistence
+ result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.PixelPersistence * 0.5f;
+
+ // Time to the the last scanline.
+ result.PresentFlushToTimewarpEnd = result.PresentFlushToTimewarpStart + hmdRenderInfo.Shutter.FirstScanlineToLastScanline;
+
+ // Ideal framerate.
+ result.PresentFlushToPresentFlush = hmdRenderInfo.Shutter.VsyncToNextVsync;
+ }
+ else
+ {
+ // Timewarp without vsync is a little odd.
+ // Currently, we assume that without vsync, we have no idea which scanline
+ // is currently being sent to the display. So we can't do lerping timewarp,
+ // we can just do a full-screen late-stage fixup.
+
+ // "PresentFlushToRenderedScene" means the time from the Present+Flush to when the middle of the scene is "averagely visible" (without timewarp)
+ // So if you had no timewarp, this, plus the time until the next flush (which is usually the time to render the frame), is how much to predict by.
+ // Time for pixels to get half-way to settling.
+ result.PresentFlushToRenderedScene = hmdRenderInfo.Shutter.PixelSettleTime * 0.5f;
+ // Predict to half-way through persistence
+ result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelPersistence * 0.5f;
+
+ // Without vsync, you don't know timings, and so can't do anything useful with lerped warping.
+ result.PresentFlushToTimewarpStart = result.PresentFlushToRenderedScene;
+ result.PresentFlushToTimewarpEnd = result.PresentFlushToRenderedScene;
+
+ // There's no concept of "ideal" when vsync is off.
+ result.PresentFlushToPresentFlush = 0.0f;
+ }
+
+ return result;
+}
+
+Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&eyeViewAdjust )
+{
+ Matrix4f worldFromPredictedView = (eyeViewAdjust * predictedViewFromWorld).InvertedHomogeneousTransform();
+ Matrix4f matRenderFromNowStart = (eyeViewAdjust * renderedViewFromWorld) * worldFromPredictedView;
+
+ // The sensor-predicted orientations have: X=right, Y=up, Z=backwards.
+ // The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards.
+ // So we need to perform a similarity transform on this delta matrix.
+ // The verbose code would look like this:
+ /*
+ Matrix4f matBasisChange;
+ matBasisChange.SetIdentity();
+ matBasisChange.M[0][0] = 1.0f;
+ matBasisChange.M[1][1] = -1.0f;
+ matBasisChange.M[2][2] = -1.0f;
+ Matrix4f matBasisChangeInv = matBasisChange.Inverted();
+ matRenderFromNow = matBasisChangeInv * matRenderFromNow * matBasisChange;
+ */
+ // ...but of course all the above is a constant transform and much more easily done.
+ // We flip the signs of the Y&Z row, then flip the signs of the Y&Z column,
+ // and of course most of the flips cancel:
+ // +++ +-- +--
+ // +++ -> flip Y&Z columns -> +-- -> flip Y&Z rows -> -++
+ // +++ +-- -++
+ matRenderFromNowStart.M[0][1] = -matRenderFromNowStart.M[0][1];
+ matRenderFromNowStart.M[0][2] = -matRenderFromNowStart.M[0][2];
+ matRenderFromNowStart.M[1][0] = -matRenderFromNowStart.M[1][0];
+ matRenderFromNowStart.M[2][0] = -matRenderFromNowStart.M[2][0];
+ matRenderFromNowStart.M[1][3] = -matRenderFromNowStart.M[1][3];
+ matRenderFromNowStart.M[2][3] = -matRenderFromNowStart.M[2][3];
+
+ return matRenderFromNowStart;
+}
+
+Matrix4f TimewarpComputePoseDeltaPosition ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&eyeViewAdjust )
+{
+ Matrix4f worldFromPredictedView = (eyeViewAdjust * predictedViewFromWorld).InvertedHomogeneousTransform();
+ Matrix4f matRenderXform = (eyeViewAdjust * renderedViewFromWorld) * worldFromPredictedView;
+
+ return matRenderXform.Inverted();
+}
+
+TimewarpMachine::TimewarpMachine()
+{
+ for ( int i = 0; i < 2; i++ )
+ {
+ EyeRenderPoses[i] = Transformf();
+ }
+ DistortionTimeCount = 0;
+ VsyncEnabled = false;
+}
+
+void TimewarpMachine::Reset(HmdRenderInfo& renderInfo, bool vsyncEnabled, double timeNow)
+{
+ RenderInfo = renderInfo;
+ VsyncEnabled = vsyncEnabled;
+ CurrentPredictionValues = PredictionGetDeviceValues ( renderInfo, true, VsyncEnabled );
+ PresentFlushToPresentFlushSeconds = 0.0f;
+ DistortionTimeCount = 0;
+ DistortionTimeAverage = 0.0f;
+ LastFramePresentFlushTime = timeNow;
+ AfterPresentAndFlush(timeNow);
+}
+
+void TimewarpMachine::AfterPresentAndFlush(double timeNow)
+{
+ PresentFlushToPresentFlushSeconds = (float)(timeNow - LastFramePresentFlushTime);
+ LastFramePresentFlushTime = timeNow;
+ NextFramePresentFlushTime = timeNow + (double)PresentFlushToPresentFlushSeconds;
+}
+
+double TimewarpMachine::GetViewRenderPredictionTime()
+{
+ // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us.
+ return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToRenderedScene;
+}
+
+Transformf TimewarpMachine::GetViewRenderPredictionPose(SensorFusion &sfusion)
+{
+ double predictionTime = GetViewRenderPredictionTime();
+ return sfusion.GetPoseAtTime(predictionTime);
+}
+
+double TimewarpMachine::GetVisiblePixelTimeStart()
+{
+ // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us.
+ return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpStart;
+}
+double TimewarpMachine::GetVisiblePixelTimeEnd()
+{
+ // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us.
+ return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpEnd;
+}
+Transformf TimewarpMachine::GetPredictedVisiblePixelPoseStart(SensorFusion &sfusion)
+{
+ double predictionTime = GetVisiblePixelTimeStart();
+ return sfusion.GetPoseAtTime(predictionTime);
+}
+Transformf TimewarpMachine::GetPredictedVisiblePixelPoseEnd (SensorFusion &sfusion)
+{
+ double predictionTime = GetVisiblePixelTimeEnd();
+ return sfusion.GetPoseAtTime(predictionTime);
+}
+Matrix4f TimewarpMachine::GetTimewarpDeltaStart(SensorFusion &sfusion, Transformf const &renderedPose)
+{
+ Transformf visiblePose = GetPredictedVisiblePixelPoseStart ( sfusion );
+ Matrix4f visibleMatrix(visiblePose);
+ Matrix4f renderedMatrix(renderedPose);
+ Matrix4f identity; // doesn't matter for orientation-only timewarp
+ return TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity );
+}
+Matrix4f TimewarpMachine::GetTimewarpDeltaEnd (SensorFusion &sfusion, Transformf const &renderedPose)
+{
+ Transformf visiblePose = GetPredictedVisiblePixelPoseEnd ( sfusion );
+ Matrix4f visibleMatrix(visiblePose);
+ Matrix4f renderedMatrix(renderedPose);
+ Matrix4f identity; // doesn't matter for orientation-only timewarp
+ return TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity );
+}
+
+
+// What time should the app wait until before starting distortion?
+double TimewarpMachine::JustInTime_GetDistortionWaitUntilTime()
+{
+ if ( !VsyncEnabled || ( DistortionTimeCount < NumDistortionTimes ) )
+ {
+ // Don't wait.
+ return LastFramePresentFlushTime;
+ }
+
+ const float fudgeFactor = 0.002f; // Found heuristically - 1ms is too short because of timing granularity - may need further tweaking!
+ float howLongBeforePresent = DistortionTimeAverage + fudgeFactor;
+ // Subtlety here. Technically, the correct time is NextFramePresentFlushTime - howLongBeforePresent.
+ // However, if the app drops a frame, this then perpetuates it,
+ // i.e. if the display is running at 60fps, but the last frame was slow,
+ // (e.g. because of swapping or whatever), then NextFramePresentFlushTime is
+ // 33ms in the future, not 16ms. Since this function supplies the
+ // time to wait until, the app will indeed wait until 32ms, so the framerate
+ // drops to 30fps and never comes back up!
+ // So we return the *ideal* framerate, not the *actual* framerate.
+ return LastFramePresentFlushTime + (float)( CurrentPredictionValues.PresentFlushToPresentFlush - howLongBeforePresent );
+}
+
+
+bool TimewarpMachine::JustInTime_NeedDistortionTimeMeasurement() const
+{
+ if (!VsyncEnabled)
+ {
+ return false;
+ }
+ return ( DistortionTimeCount < NumDistortionTimes );
+}
+
+void TimewarpMachine::JustInTime_BeforeDistortionTimeMeasurement(double timeNow)
+{
+ DistortionTimeCurrentStart = timeNow;
+}
+
+void TimewarpMachine::JustInTime_AfterDistortionTimeMeasurement(double timeNow)
+{
+ float timeDelta = (float)( timeNow - DistortionTimeCurrentStart );
+ if ( DistortionTimeCount < NumDistortionTimes )
+ {
+ DistortionTimes[DistortionTimeCount] = timeDelta;
+ DistortionTimeCount++;
+ if ( DistortionTimeCount == NumDistortionTimes )
+ {
+ // Median.
+ float distortionTimeMedian = 0.0f;
+ for ( int i = 0; i < NumDistortionTimes/2; i++ )
+ {
+ // Find the maximum time of those remaining.
+ float maxTime = DistortionTimes[0];
+ int maxIndex = 0;
+ for ( int j = 1; j < NumDistortionTimes; j++ )
+ {
+ if ( maxTime < DistortionTimes[j] )
+ {
+ maxTime = DistortionTimes[j];
+ maxIndex = j;
+ }
+ }
+ // Zero that max time, so we'll find the next-highest time.
+ DistortionTimes[maxIndex] = 0.0f;
+ distortionTimeMedian = maxTime;
+ }
+ DistortionTimeAverage = distortionTimeMedian;
+ }
+ }
+ else
+ {
+ OVR_ASSERT ( !"Really didn't need more measurements, thanks" );
+ }
+}
+
+
+}}} // OVR::Util::Render
+
diff --git a/LibOVR/Src/Util/Util_Render_Stereo.h b/LibOVR/Src/Util/Util_Render_Stereo.h
new file mode 100644
index 0000000..326059e
--- /dev/null
+++ b/LibOVR/Src/Util/Util_Render_Stereo.h
@@ -0,0 +1,498 @@
+/************************************************************************************
+
+PublicHeader: OVR.h
+Filename : Util_Render_Stereo.h
+Content : Sample stereo rendering configuration classes.
+Created : October 22, 2012
+Authors : Michael Antonov, Tom Forsyth
+
+Copyright : Copyright 2014 Oculus VR, Inc. All Rights reserved.
+
+Licensed under the Oculus VR Rift SDK License Version 3.1 (the "License");
+you may not use the Oculus VR Rift SDK except in compliance with the License,
+which is provided at the time of installation or download, or which
+otherwise accompanies this software in either electronic or hard copy form.
+
+You may obtain a copy of the License at
+
+http://www.oculusvr.com/licenses/LICENSE-3.1
+
+Unless required by applicable law or agreed to in writing, the Oculus VR SDK
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+*************************************************************************************/
+
+#ifndef OVR_Util_Render_Stereo_h
+#define OVR_Util_Render_Stereo_h
+
+#include "../OVR_Stereo.h"
+
+
+namespace OVR {
+
+class SensorFusion;
+
+namespace Util { namespace Render {
+
+
+
+//-----------------------------------------------------------------------------------
+// **** Useful debug functions.
+//
+// Purely for debugging - the results are not very end-user-friendly.
+char const* GetDebugNameEyeCupType ( EyeCupType eyeCupType );
+char const* GetDebugNameHmdType ( HmdTypeEnum hmdType );
+
+
+
+//-----------------------------------------------------------------------------------
+// **** Higher-level utility functions.
+
+Sizei CalculateRecommendedTextureSize ( HmdRenderInfo const &hmd,
+ bool bRendertargetSharedByBothEyes,
+ float pixelDensityInCenter = 1.0f );
+
+FovPort CalculateRecommendedFov ( HmdRenderInfo const &hmd,
+ StereoEye eyeType,
+ bool bMakeFovSymmetrical = false);
+
+StereoEyeParams CalculateStereoEyeParams ( HmdRenderInfo const &hmd,
+ StereoEye eyeType,
+ Sizei const &actualRendertargetSurfaceSize,
+ bool bRendertargetSharedByBothEyes,
+ bool bRightHanded = true,
+ float zNear = 0.01f, float zFar = 10000.0f,
+ Sizei const *pOverrideRenderedPixelSize = NULL,
+ FovPort const *pOverrideFovport = NULL,
+ float zoomFactor = 1.0f );
+
+Vector3f CalculateEyeVirtualCameraOffset(HmdRenderInfo const &hmd,
+ StereoEye eyeType, bool bMonoRenderingMode );
+
+
+// These are two components from StereoEyeParams that can be changed
+// very easily without full recomputation of everything.
+struct ViewportScaleAndOffset
+{
+ Recti RenderedViewport;
+ ScaleAndOffset2D EyeToSourceUV;
+};
+
+// Three ways to override the size of the render view dynamically.
+// None of these require changing the distortion parameters or the regenerating the distortion mesh,
+// and can be called every frame if desired.
+ViewportScaleAndOffset ModifyRenderViewport ( StereoEyeParams const &params,
+ Sizei const &actualRendertargetSurfaceSize,
+ Recti const &renderViewport );
+
+ViewportScaleAndOffset ModifyRenderSize ( StereoEyeParams const &params,
+ Sizei const &actualRendertargetSurfaceSize,
+ Sizei const &requestedRenderSize,
+ bool bRendertargetSharedByBothEyes = false );
+
+ViewportScaleAndOffset ModifyRenderDensity ( StereoEyeParams const &params,
+ Sizei const &actualRendertargetSurfaceSize,
+ float pixelDensity = 1.0f,
+ bool bRendertargetSharedByBothEyes = false );
+
+
+//-----------------------------------------------------------------------------------
+// ***** StereoConfig
+
+// StereoConfig maintains a scene stereo state and allow switching between different
+// stereo rendering modes. To support rendering, StereoConfig keeps track of HMD
+// variables such as screen size, eye-to-screen distance and distortion, and computes
+// extra data such as FOV and distortion center offsets based on it. Rendering
+// parameters are returned though StereoEyeParams for each eye.
+//
+// Beyond regular 3D projection, this class supports rendering a 2D orthographic
+// surface for UI and text. The 2D surface will be defined by CreateOrthoSubProjection().
+// The (0,0) coordinate corresponds to eye center location.
+//
+// Applications are not required to use this class, but they should be doing very
+// similar sequences of operations, and it may be useful to start with this class
+// and modify it.
+
+struct StereoEyeParamsWithOrtho
+{
+ StereoEyeParams StereoEye;
+ Matrix4f OrthoProjection;
+};
+
+struct ViewportScaleAndOffsetBothEyes
+{
+ ViewportScaleAndOffset Left;
+ ViewportScaleAndOffset Right;
+};
+
+class StereoConfig
+{
+public:
+
+ // StereoMode describes rendering modes that can be used by StereoConfig.
+ // These modes control whether stereo rendering is used or not (Stereo_None),
+ // and how it is implemented.
+ enum StereoMode
+ {
+ Stereo_None = 0, // Single eye
+ Stereo_LeftRight_Multipass = 1, // One frustum per eye
+ };
+
+
+ StereoConfig(StereoMode mode = Stereo_LeftRight_Multipass);
+
+ //---------------------------------------------------------------------------------------------
+ // *** Core functions - every app MUST call these functions at least once.
+
+ // Sets HMD parameters; also initializes distortion coefficients.
+ void SetHmdRenderInfo(const HmdRenderInfo& hmd);
+
+ // Set the physical size of the rendertarget surface the app created,
+ // and whether one RT is shared by both eyes, or each eye has its own RT:
+ // true: both eyes are rendered to the same RT. Left eye starts at top-left, right eye starts at top-middle.
+ // false: each eye is rendered to its own RT. Some GPU architectures prefer this arrangement.
+ // Typically, the app would call CalculateRecommendedTextureSize() to suggest the choice of RT size.
+ // This setting must be exactly the size of the actual RT created, or the UVs produced will be incorrect.
+ // If the app wants to render to a subsection of the RT, it should use SetRenderSize()
+ void SetRendertargetSize (Size<int> const rendertargetSize,
+ bool rendertargetIsSharedByBothEyes );
+
+ // Returns full set of Stereo rendering parameters for the specified eye.
+ const StereoEyeParamsWithOrtho& GetEyeRenderParams(StereoEye eye);
+
+
+
+ //---------------------------------------------------------------------------------------------
+ // *** Optional functions - an app may call these to override default behaviours.
+
+ const HmdRenderInfo& GetHmdRenderInfo() const { return Hmd; }
+
+ // Returns the recommended size of rendertargets.
+ // If rendertargetIsSharedByBothEyes is true, this is the size of the combined buffer.
+ // If rendertargetIsSharedByBothEyes is false, this is the size of each individual buffer.
+ // pixelDensityInCenter may be set to any number - by default it will match the HMD resolution in the center of the image.
+ // After creating the rendertargets, the application MUST call SetRendertargetSize() with the actual size created
+ // (which can be larger or smaller as the app wishes, but StereoConfig needs to know either way)
+ Sizei CalculateRecommendedTextureSize ( bool rendertargetSharedByBothEyes,
+ float pixelDensityInCenter = 1.0f );
+
+ // Sets a stereo rendering mode and updates internal cached
+ // state (matrices, per-eye view) based on it.
+ void SetStereoMode(StereoMode mode) { Mode = mode; DirtyFlag = true; }
+ StereoMode GetStereoMode() const { return Mode; }
+
+ // Sets the fieldOfView that the 2D coordinate area stretches to.
+ void Set2DAreaFov(float fovRadians);
+
+ // Really only for science experiments - no normal app should ever need to override
+ // the HMD's lens descriptors. Passing NULL removes the override.
+ // Supply both = set left and right.
+ // Supply just left = set both to the same.
+ // Supply neither = remove override.
+ void SetLensOverride ( LensConfig const *pLensOverrideLeft = NULL,
+ LensConfig const *pLensOverrideRight = NULL );
+
+ // Override the rendered FOV in various ways. All angles in tangent units.
+ // This is not clamped to the physical FOV of the display - you'll need to do that yourself!
+ // Supply both = set left and right.
+ // Supply just left = set both to the same.
+ // Supply neither = remove override.
+ void SetFov ( FovPort const *pfovLeft = NULL,
+ FovPort const *pfovRight = NULL );
+
+ void SetFovPortRadians ( float horizontal, float vertical )
+ {
+ FovPort fov = FovPort::CreateFromRadians(horizontal, vertical);
+ SetFov( &fov, &fov );
+ }
+
+
+ // This forces a "zero IPD" mode where there is just a single render with an FOV that
+ // is the union of the two calculated FOVs.
+ // The calculated render is for the left eye. Any size & FOV overrides for the right
+ // eye will be ignored.
+ // If you query the right eye's size, you will get the same render
+ // size & position as the left eye - you should not actually do the render of course!
+ // The distortion values will be different, because it goes to a different place on the framebuffer.
+ // Note that if you do this, the rendertarget does not need to be twice the width of
+ // the render size any more.
+ void SetZeroVirtualIpdOverride ( bool enableOverride );
+
+ // Allows the app to specify near and far clip planes and the right/left-handedness of the projection matrix.
+ void SetZClipPlanesAndHandedness ( float zNear = 0.01f, float zFar = 10000.0f,
+ bool rightHandedProjection = true );
+
+ // Allows the app to specify how much extra eye rotation to allow when determining the visible FOV.
+ void SetExtraEyeRotation ( float extraEyeRotationInRadians = 0.0f );
+
+ // The dirty flag is set by any of the above calls. Just handy for the app to know
+ // if e.g. the distortion mesh needs regeneration.
+ void SetDirty() { DirtyFlag = true; }
+ bool IsDirty() { return DirtyFlag; }
+
+ // An app never needs to call this - GetEyeRenderParams will call it internally if
+ // the state is dirty. However apps can call this explicitly to control when and where
+ // computation is performed (e.g. not inside critical loops)
+ void UpdateComputedState();
+
+ // This returns the projection matrix with a "zoom". Does not modify any internal state.
+ Matrix4f GetProjectionWithZoom ( StereoEye eye, float fovZoom ) const;
+
+
+ //---------------------------------------------------------------------------------------------
+ // The SetRender* functions are special.
+ //
+ // They do not require a full recalculation of state, and they do not change anything but the
+ // ViewportScaleAndOffset data for the eyes (which they return), and do not set the dirty flag!
+ // This means they can be called without regenerating the distortion mesh, and thus
+ // can happily be called every frame without causing performance problems. Dynamic rescaling
+ // of the rendertarget can help keep framerate up in demanding VR applications.
+ // See the documentation for more details on their use.
+
+ // Specify a pixel density - how many rendered pixels per pixel in the physical display.
+ ViewportScaleAndOffsetBothEyes SetRenderDensity ( float pixelsPerDisplayPixel );
+
+ // Supply the size directly. Will be clamped to the physical rendertarget size.
+ ViewportScaleAndOffsetBothEyes SetRenderSize ( Sizei const &renderSizeLeft, Sizei const &renderSizeRight );
+
+ // Supply the viewport directly. This is not clamped to the physical rendertarget - careful now!
+ ViewportScaleAndOffsetBothEyes SetRenderViewport ( Recti const &renderViewportLeft, Recti const &renderViewportRight );
+
+private:
+
+ // *** Modifiable State
+
+ StereoMode Mode;
+ HmdRenderInfo Hmd;
+
+ float Area2DFov; // FOV range mapping to the 2D area.
+
+ // Only one of these three overrides can be true!
+ enum SetViewportModeEnum
+ {
+ SVPM_Density,
+ SVPM_Size,
+ SVPM_Viewport,
+ } SetViewportMode;
+ // ...and depending which it is, one of the following are used.
+ float SetViewportPixelsPerDisplayPixel;
+ Sizei SetViewportSize[2];
+ Recti SetViewport[2];
+
+ // Other overrides.
+ bool OverrideLens;
+ LensConfig LensOverrideLeft;
+ LensConfig LensOverrideRight;
+ Sizei RendertargetSize;
+ bool OverrideTanHalfFov;
+ FovPort FovOverrideLeft;
+ FovPort FovOverrideRight;
+ bool OverrideZeroIpd;
+ float ZNear;
+ float ZFar;
+ float ExtraEyeRotationInRadians;
+ bool IsRendertargetSharedByBothEyes;
+ bool RightHandedProjection;
+
+ bool DirtyFlag; // Set when any if the modifiable state changed. Does NOT get set by SetRender*()
+
+ // Utility function.
+ ViewportScaleAndOffsetBothEyes setupViewportScaleAndOffsets();
+
+ // *** Computed State
+
+public: // Small hack for the config tool. Normal code should never read EyeRenderParams directly - use GetEyeRenderParams() instead.
+ // 0/1 = left/right main views.
+ StereoEyeParamsWithOrtho EyeRenderParams[2];
+};
+
+
+//-----------------------------------------------------------------------------------
+// ***** Distortion Mesh Rendering
+//
+
+// Stores both texture UV coords, or tan(angle) values.
+// Use whichever set of data the specific distortion algorithm requires.
+// This struct *must* be binary compatible with CAPI ovrDistortionVertex.
+struct DistortionMeshVertexData
+{
+ // [-1,+1],[-1,+1] over the entire framebuffer.
+ Vector2f ScreenPosNDC;
+ // [0.0-1.0] interpolation value for timewarping - see documentation for details.
+ float TimewarpLerp;
+ // [0.0-1.0] fade-to-black at the edges to reduce peripheral vision noise.
+ float Shade;
+ // The red, green, and blue vectors in tan(angle) space.
+ // Scale and offset by the values in StereoEyeParams.EyeToSourceUV.Scale
+ // and StereoParams.EyeToSourceUV.Offset to get to real texture UV coords.
+ Vector2f TanEyeAnglesR;
+ Vector2f TanEyeAnglesG;
+ Vector2f TanEyeAnglesB;
+};
+
+
+void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles,
+ const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo );
+
+// Generate distortion mesh for a eye. This version requires less data then stereoParms, supporting
+// dynamic change in render target viewport.
+void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles,
+ bool rightEye,
+ const HmdRenderInfo &hmdRenderInfo,
+ const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC );
+
+void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices );
+
+
+//-----------------------------------------------------------------------------------
+// ***** Heightmap Mesh Rendering
+//
+
+// Stores both texture UV coords, or tan(angle) values.
+// This struct *must* be binary compatible with CAPI ovrHeightmapVertex.
+struct HeightmapMeshVertexData
+{
+ // [-1,+1],[-1,+1] over the entire framebuffer.
+ Vector2f ScreenPosNDC;
+ // [0.0-1.0] interpolation value for timewarping - see documentation for details.
+ float TimewarpLerp;
+ // The vectors in tan(angle) space.
+ // Scale and offset by the values in StereoEyeParams.EyeToSourceUV.Scale
+ // and StereoParams.EyeToSourceUV.Offset to get to real texture UV coords.
+ Vector2f TanEyeAngles;
+};
+
+
+void HeightmapMeshCreate ( HeightmapMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles,
+ const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo );
+
+// Generate heightmap mesh for a eye. This version requires less data then stereoParms, supporting
+// dynamic change in render target viewport.
+void HeightmapMeshCreate( HeightmapMeshVertexData **ppVertices, UInt16 **ppTriangleListIndices,
+ int *pNumVertices, int *pNumTriangles, bool rightEye,
+ const HmdRenderInfo &hmdRenderInfo, const ScaleAndOffset2D &eyeToSourceNDC );
+
+void HeightmapMeshDestroy ( HeightmapMeshVertexData *pVertices, UInt16 *pTriangleMeshIndices );
+
+
+
+//-----------------------------------------------------------------------------------
+// ***** Prediction and timewarp.
+//
+
+struct PredictionValues
+{
+ // All values in seconds.
+ // These are the times in seconds from a present+flush to the relevant display element.
+ // The time is measured to the middle of that element's visibility window,
+ // e.g. if the device is a full-persistence display, the element will be visible for
+ // an entire frame, so the time measures to the middle of that period, i.e. half the frame time.
+ float PresentFlushToRenderedScene; // To the overall rendered 3D scene being visible.
+ float PresentFlushToTimewarpStart; // To when the first timewarped scanline will be visible.
+ float PresentFlushToTimewarpEnd; // To when the last timewarped scanline will be visible.
+ float PresentFlushToPresentFlush; // To the next present+flush, i.e. the ideal framerate.
+
+ bool WithTimewarp;
+ bool WithVsync;
+};
+
+// Calculates the values from the HMD info.
+PredictionValues PredictionGetDeviceValues ( const HmdRenderInfo &hmdRenderInfo,
+ bool withTimewarp = true,
+ bool withVsync = true );
+
+// Pass in an orientation used to render the scene, and then the predicted orientation
+// (which may have been computed later on, and thus is more accurate), and this
+// will return the matrix to pass to the timewarp distortion shader.
+// TODO: deal with different handedness?
+Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&eyeViewAdjust );
+Matrix4f TimewarpComputePoseDeltaPosition ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&eyeViewAdjust );
+
+
+
+// TimewarpMachine helps keep track of rendered frame timing and
+// handles predictions for time-warp rendering.
+class TimewarpMachine
+{
+public:
+ TimewarpMachine();
+
+ // Call this on and every time something about the setup changes.
+ void Reset ( HmdRenderInfo& renderInfo, bool vsyncEnabled, double timeNow );
+
+ // The only reliable time in most engines is directly after the frame-present and GPU flush-and-wait.
+ // This call should be done right after that to give this system the timing info it needs.
+ void AfterPresentAndFlush(double timeNow);
+
+ // The "average" time the rendered frame will show up,
+ // and the predicted pose of the HMD at that time.
+ // You usually only need to call one of these functions.
+ double GetViewRenderPredictionTime();
+ Transformf GetViewRenderPredictionPose(SensorFusion &sfusion);
+
+
+ // Timewarp prediction functions. You usually only need to call one of these three sets of functions.
+
+ // The predicted times that the first and last pixel will be visible on-screen.
+ double GetVisiblePixelTimeStart();
+ double GetVisiblePixelTimeEnd();
+ // Predicted poses of the HMD at those first and last pixels.
+ Transformf GetPredictedVisiblePixelPoseStart(SensorFusion &sfusion);
+ Transformf GetPredictedVisiblePixelPoseEnd (SensorFusion &sfusion);
+ // The delta matrices to feed to the timewarp distortion code,
+ // given the pose that was used for rendering.
+ // (usually the one returned by GetViewRenderPredictionPose() earlier)
+ Matrix4f GetTimewarpDeltaStart(SensorFusion &sfusion, Transformf const &renderedPose);
+ Matrix4f GetTimewarpDeltaEnd (SensorFusion &sfusion, Transformf const &renderedPose);
+
+
+ // Just-In-Time distortion aims to delay the second sensor reading & distortion
+ // until the very last moment to improve prediction. However, it is a little scary,
+ // since the delay might wait too long and miss the vsync completely!
+ // Use of the JustInTime_* functions is entirely optional, and we advise allowing
+ // users to turn it off in their video options to cope with odd machine configurations.
+
+ // What time should the app wait until before starting distortion?
+ double JustInTime_GetDistortionWaitUntilTime();
+
+ // Used to time the distortion rendering
+ bool JustInTime_NeedDistortionTimeMeasurement() const;
+ void JustInTime_BeforeDistortionTimeMeasurement(double timeNow);
+ void JustInTime_AfterDistortionTimeMeasurement(double timeNow);
+
+
+private:
+
+ bool VsyncEnabled;
+ HmdRenderInfo RenderInfo;
+ PredictionValues CurrentPredictionValues;
+
+ enum { NumDistortionTimes = 10 };
+ int DistortionTimeCount;
+ double DistortionTimeCurrentStart;
+ float DistortionTimes[NumDistortionTimes];
+ float DistortionTimeAverage;
+
+ // Pose at which last time the eye was rendered.
+ Transformf EyeRenderPoses[2];
+
+ // Absolute time of the last present+flush
+ double LastFramePresentFlushTime;
+ // Seconds between presentflushes
+ float PresentFlushToPresentFlushSeconds;
+ // Predicted absolute time of the next present+flush
+ double NextFramePresentFlushTime;
+
+};
+
+
+
+}}} // OVR::Util::Render
+
+#endif