diff options
Diffstat (limited to 'src/gallium/auxiliary')
26 files changed, 3706 insertions, 445 deletions
diff --git a/src/gallium/auxiliary/draw/draw_pt_post_vs.c b/src/gallium/auxiliary/draw/draw_pt_post_vs.c index 00d7197b132..e25f16c354a 100644 --- a/src/gallium/auxiliary/draw/draw_pt_post_vs.c +++ b/src/gallium/auxiliary/draw/draw_pt_post_vs.c @@ -104,7 +104,7 @@ static boolean post_vs_cliptest_viewport_gl( struct pt_post_vs *pvs, unsigned clipped = 0; unsigned j; - if (0) debug_printf("%s\n"); + if (0) debug_printf("%s\n", __FUNCTION__); for (j = 0; j < count; j++) { float *position = out->data[pos]; diff --git a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c index 109ac7c9d63..d01f8666222 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c +++ b/src/gallium/auxiliary/pipebuffer/pb_buffer_fenced.c @@ -542,7 +542,7 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list) debug_printf("%10p %7u %7u\n", fenced_buf, fenced_buf->base.base.size, - fenced_buf->base.base.reference.count); + p_atomic_read(&fenced_buf->base.base.reference.count)); curr = next; next = curr->next; } @@ -556,7 +556,7 @@ fenced_buffer_list_dump(struct fenced_buffer_list *fenced_list) debug_printf("%10p %7u %7u %10p %s\n", fenced_buf, fenced_buf->base.base.size, - fenced_buf->base.base.reference.count, + p_atomic_read(&fenced_buf->base.base.reference.count), fenced_buf->fence, signaled == 0 ? "y" : "n"); curr = next; diff --git a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c index 1b4df28c707..6e3214ca9c9 100644 --- a/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c +++ b/src/gallium/auxiliary/pipebuffer/pb_bufmgr_debug.c @@ -350,7 +350,7 @@ pb_debug_manager_dump(struct pb_debug_manager *mgr) buf = LIST_ENTRY(struct pb_debug_buffer, curr, head); debug_printf("buffer = %p\n", buf); - debug_printf(" .size = %p\n", buf->base.base.size); + debug_printf(" .size = 0x%x\n", buf->base.base.size); debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE); curr = next; diff --git a/src/gallium/auxiliary/tgsi/tgsi_sse2.c b/src/gallium/auxiliary/tgsi/tgsi_sse2.c index 501fc05e729..5f6b83b2362 100644 --- a/src/gallium/auxiliary/tgsi/tgsi_sse2.c +++ b/src/gallium/auxiliary/tgsi/tgsi_sse2.c @@ -1445,11 +1445,11 @@ fetch_texel( struct tgsi_sampler **sampler, { float rgba[NUM_CHANNELS][QUAD_SIZE]; (*sampler)->get_samples(*sampler, - &store[0], - &store[4], - &store[8], - 0.0f, /*store[12], lodbias */ - rgba); + &store[0], /* s */ + &store[4], /* t */ + &store[8], /* r */ + store[12], /* lodbias */ + rgba); /* results */ memcpy( store, rgba, 16 * sizeof(float)); } @@ -1855,7 +1855,6 @@ emit_instruction( break; case TGSI_OPCODE_RCP: - /* TGSI_OPCODE_RECIP */ FETCH( func, *inst, 0, 0, CHAN_X ); emit_rcp( func, 0, 0 ); FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) { @@ -1864,7 +1863,6 @@ emit_instruction( break; case TGSI_OPCODE_RSQ: - /* TGSI_OPCODE_RECIPSQRT */ FETCH( func, *inst, 0, 0, CHAN_X ); emit_abs( func, 0 ); emit_rsqrt( func, 1, 0 ); @@ -1962,7 +1960,6 @@ emit_instruction( break; case TGSI_OPCODE_DP3: - /* TGSI_OPCODE_DOT3 */ FETCH( func, *inst, 0, 0, CHAN_X ); FETCH( func, *inst, 1, 1, CHAN_X ); emit_mul( func, 0, 1 ); @@ -1980,7 +1977,6 @@ emit_instruction( break; case TGSI_OPCODE_DP4: - /* TGSI_OPCODE_DOT4 */ FETCH( func, *inst, 0, 0, CHAN_X ); FETCH( func, *inst, 1, 1, CHAN_X ); emit_mul( func, 0, 1 ); @@ -2051,17 +2047,14 @@ emit_instruction( break; case TGSI_OPCODE_SLT: - /* TGSI_OPCODE_SETLT */ emit_setcc( func, inst, cc_LessThan ); break; case TGSI_OPCODE_SGE: - /* TGSI_OPCODE_SETGE */ emit_setcc( func, inst, cc_NotLessThan ); break; case TGSI_OPCODE_MAD: - /* TGSI_OPCODE_MADD */ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) { FETCH( func, *inst, 0, 0, chan_index ); FETCH( func, *inst, 1, 1, chan_index ); @@ -2291,7 +2284,7 @@ emit_instruction( break; case TGSI_OPCODE_SEQ: - return 0; + emit_setcc( func, inst, cc_Equal ); break; case TGSI_OPCODE_SFL: @@ -2299,7 +2292,7 @@ emit_instruction( break; case TGSI_OPCODE_SGT: - return 0; + emit_setcc( func, inst, cc_NotLessThanEqual ); break; case TGSI_OPCODE_SIN: @@ -2311,11 +2304,11 @@ emit_instruction( break; case TGSI_OPCODE_SLE: - return 0; + emit_setcc( func, inst, cc_LessThanEqual ); break; case TGSI_OPCODE_SNE: - return 0; + emit_setcc( func, inst, cc_NotEqual ); break; case TGSI_OPCODE_STR: @@ -2379,7 +2372,6 @@ emit_instruction( break; case TGSI_OPCODE_SSG: - /* TGSI_OPCODE_SGN */ FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) { FETCH( func, *inst, 0, 0, chan_index ); emit_sgn( func, 0, 0 ); diff --git a/src/gallium/auxiliary/util/Makefile b/src/gallium/auxiliary/util/Makefile index ae8d330a787..1d8bb55bbd6 100644 --- a/src/gallium/auxiliary/util/Makefile +++ b/src/gallium/auxiliary/util/Makefile @@ -10,6 +10,7 @@ C_SOURCES = \ u_debug_stack.c \ u_blit.c \ u_cache.c \ + u_cpu_detect.c \ u_draw_quad.c \ u_format.c \ u_format_access.c \ diff --git a/src/gallium/auxiliary/util/SConscript b/src/gallium/auxiliary/util/SConscript index 28a5ab42569..2187935fa40 100644 --- a/src/gallium/auxiliary/util/SConscript +++ b/src/gallium/auxiliary/util/SConscript @@ -24,6 +24,7 @@ util = env.ConvenienceLibrary( 'u_bitmask.c', 'u_blit.c', 'u_cache.c', + 'u_cpu_detect.c', 'u_debug.c', 'u_debug_dump.c', 'u_debug_memory.c', diff --git a/src/gallium/auxiliary/util/u_cpu_detect.c b/src/gallium/auxiliary/util/u_cpu_detect.c index d9f2f8fc288..ecfb96138d6 100644 --- a/src/gallium/auxiliary/util/u_cpu_detect.c +++ b/src/gallium/auxiliary/util/u_cpu_detect.c @@ -24,23 +24,21 @@ * **************************************************************************/ -/* - * Based on the work of Eric Anholt <[email protected]> +/** + * @file + * CPU feature detection. + * + * @author Dennis Smit + * @author Based on the work of Eric Anholt <[email protected]> */ -/* FIXME: clean this entire file up */ +#include "pipe/p_config.h" +#include "u_debug.h" #include "u_cpu_detect.h" -#ifdef __linux__ -#define OS_LINUX -#endif -#ifdef WIN32 -#define OS_WIN32 -#endif - -#if defined(ARCH_POWERPC) -#if defined(OS_DARWIN) +#if defined(PIPE_ARCH_PPC) +#if defined(PIPE_OS_DARWIN) #include <sys/sysctl.h> #else #include <signal.h> @@ -48,137 +46,140 @@ #endif #endif -#if defined(OS_NETBSD) || defined(OS_OPENBSD) +#if defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD) #include <sys/param.h> #include <sys/sysctl.h> #include <machine/cpu.h> #endif -#if defined(OS_FREEBSD) +#if defined(PIPE_OS_FREEBSD) #include <sys/types.h> #include <sys/sysctl.h> #endif -#if defined(OS_LINUX) +#if defined(PIPE_OS_LINUX) #include <signal.h> #endif -#if defined(OS_WIN32) -#include <windows.h> +#ifdef PIPE_OS_UNIX +#include <unistd.h> #endif -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> -#include <string.h> +#if defined(PIPE_OS_WINDOWS) +#include <windows.h> +#endif -static struct cpu_detect_caps __cpu_detect_caps; -static int __cpu_detect_initialized = 0; +struct util_cpu_caps util_cpu_caps; static int has_cpuid(void); static int cpuid(unsigned int ax, unsigned int *p); +#if defined(PIPE_ARCH_X86) + /* The sigill handlers */ -#if defined(ARCH_X86) /* x86 (linux katmai handler check thing) */ -#if defined(OS_LINUX) && defined(_POSIX_SOURCE) && defined(X86_FXSR_MAGIC) -static void sigill_handler_sse(int signal, struct sigcontext sc) +#if defined(PIPE_OS_LINUX) //&& defined(_POSIX_SOURCE) && defined(X86_FXSR_MAGIC) +static void +sigill_handler_sse(int signal, struct sigcontext sc) { - /* Both the "xorps %%xmm0,%%xmm0" and "divps %xmm0,%%xmm1" - * instructions are 3 bytes long. We must increment the instruction - * pointer manually to avoid repeated execution of the offending - * instruction. - * - * If the SIGILL is caused by a divide-by-zero when unmasked - * exceptions aren't supported, the SIMD FPU status and control - * word will be restored at the end of the test, so we don't need - * to worry about doing it here. Besides, we may not be able to... - */ - sc.eip += 3; - - __cpu_detect_caps.hasSSE=0; + /* Both the "xorps %%xmm0,%%xmm0" and "divps %xmm0,%%xmm1" + * instructions are 3 bytes long. We must increment the instruction + * pointer manually to avoid repeated execution of the offending + * instruction. + * + * If the SIGILL is caused by a divide-by-zero when unmasked + * exceptions aren't supported, the SIMD FPU status and control + * word will be restored at the end of the test, so we don't need + * to worry about doing it here. Besides, we may not be able to... + */ + sc.eip += 3; + + util_cpu_caps.has_sse=0; } -static void sigfpe_handler_sse(int signal, struct sigcontext sc) +static void +sigfpe_handler_sse(int signal, struct sigcontext sc) { - if (sc.fpstate->magic != 0xffff) { - /* Our signal context has the extended FPU state, so reset the - * divide-by-zero exception mask and clear the divide-by-zero - * exception bit. - */ - sc.fpstate->mxcsr |= 0x00000200; - sc.fpstate->mxcsr &= 0xfffffffb; - } else { - /* If we ever get here, we're completely hosed. - */ - } + if (sc.fpstate->magic != 0xffff) { + /* Our signal context has the extended FPU state, so reset the + * divide-by-zero exception mask and clear the divide-by-zero + * exception bit. + */ + sc.fpstate->mxcsr |= 0x00000200; + sc.fpstate->mxcsr &= 0xfffffffb; + } else { + /* If we ever get here, we're completely hosed. + */ + } } -#endif -#endif /* OS_LINUX && _POSIX_SOURCE && X86_FXSR_MAGIC */ +#endif /* PIPE_OS_LINUX && _POSIX_SOURCE && X86_FXSR_MAGIC */ -#if defined(OS_WIN32) -LONG CALLBACK win32_sig_handler_sse(EXCEPTION_POINTERS* ep) +#if defined(PIPE_OS_WINDOWS) +static LONG CALLBACK +win32_sig_handler_sse(EXCEPTION_POINTERS* ep) { - if(ep->ExceptionRecord->ExceptionCode==EXCEPTION_ILLEGAL_INSTRUCTION){ - ep->ContextRecord->Eip +=3; - __cpu_detect_caps.hasSSE=0; - return EXCEPTION_CONTINUE_EXECUTION; - } - return EXCEPTION_CONTINUE_SEARCH; + if(ep->ExceptionRecord->ExceptionCode==EXCEPTION_ILLEGAL_INSTRUCTION){ + ep->ContextRecord->Eip +=3; + util_cpu_caps.has_sse=0; + return EXCEPTION_CONTINUE_EXECUTION; + } + return EXCEPTION_CONTINUE_SEARCH; } -#endif /* OS_WIN32 */ +#endif /* PIPE_OS_WINDOWS */ + +#endif /* PIPE_ARCH_X86 */ -#if defined(ARCH_POWERPC) && !defined(OS_DARWIN) +#if defined(PIPE_ARCH_PPC) && !defined(PIPE_OS_DARWIN) static sigjmp_buf __lv_powerpc_jmpbuf; static volatile sig_atomic_t __lv_powerpc_canjump = 0; -static void sigill_handler (int sig); - -static void sigill_handler (int sig) +static void +sigill_handler(int sig) { - if (!__lv_powerpc_canjump) { - signal (sig, SIG_DFL); - raise (sig); - } + if (!__lv_powerpc_canjump) { + signal (sig, SIG_DFL); + raise (sig); + } - __lv_powerpc_canjump = 0; - siglongjmp(__lv_powerpc_jmpbuf, 1); + __lv_powerpc_canjump = 0; + siglongjmp(__lv_powerpc_jmpbuf, 1); } -static void check_os_altivec_support(void) +static void +check_os_altivec_support(void) { -#if defined(OS_DARWIN) - int sels[2] = {CTL_HW, HW_VECTORUNIT}; - int has_vu = 0; - int len = sizeof (has_vu); - int err; - - err = sysctl(sels, 2, &has_vu, &len, NULL, 0); - - if (err == 0) { - if (has_vu != 0) { - __cpu_detect_caps.hasAltiVec = 1; - } - } -#else /* !OS_DARWIN */ - /* no Darwin, do it the brute-force way */ - /* this is borrowed from the libmpeg2 library */ - signal(SIGILL, sigill_handler); - if (sigsetjmp(__lv_powerpc_jmpbuf, 1)) { - signal(SIGILL, SIG_DFL); - } else { - __lv_powerpc_canjump = 1; - - __asm __volatile - ("mtspr 256, %0\n\t" - "vand %%v0, %%v0, %%v0" - : - : "r" (-1)); - - signal(SIGILL, SIG_DFL); - __cpu_detect_caps.hasAltiVec = 1; - } +#if defined(PIPE_OS_DARWIN) + int sels[2] = {CTL_HW, HW_VECTORUNIT}; + int has_vu = 0; + int len = sizeof (has_vu); + int err; + + err = sysctl(sels, 2, &has_vu, &len, NULL, 0); + + if (err == 0) { + if (has_vu != 0) { + util_cpu_caps.has_altivec = 1; + } + } +#else /* !PIPE_OS_DARWIN */ + /* no Darwin, do it the brute-force way */ + /* this is borrowed from the libmpeg2 library */ + signal(SIGILL, sigill_handler); + if (sigsetjmp(__lv_powerpc_jmpbuf, 1)) { + signal(SIGILL, SIG_DFL); + } else { + __lv_powerpc_canjump = 1; + + __asm __volatile + ("mtspr 256, %0\n\t" + "vand %%v0, %%v0, %%v0" + : + : "r" (-1)); + + signal(SIGILL, SIG_DFL); + util_cpu_caps.has_altivec = 1; + } #endif } #endif @@ -189,318 +190,312 @@ static void check_os_altivec_support(void) * and RedHat patched 2.2 kernels that have broken exception handling * support for user space apps that do SSE. */ -static void check_os_katmai_support(void) +static void +check_os_katmai_support(void) { -#if defined(ARCH_X86) -#if defined(OS_FREEBSD) - int has_sse=0, ret; - int len = sizeof (has_sse); - - ret = sysctlbyname("hw.instruction_sse", &has_sse, &len, NULL, 0); - if (ret || !has_sse) - __cpu_detect_caps.hasSSE=0; - -#elif defined(OS_NETBSD) || defined(OS_OPENBSD) - int has_sse, has_sse2, ret, mib[2]; - int varlen; - - mib[0] = CTL_MACHDEP; - mib[1] = CPU_SSE; - varlen = sizeof (has_sse); - - ret = sysctl(mib, 2, &has_sse, &varlen, NULL, 0); - if (ret < 0 || !has_sse) { - __cpu_detect_caps.hasSSE = 0; - } else { - __cpu_detect_caps.hasSSE = 1; - } - - mib[1] = CPU_SSE2; - varlen = sizeof (has_sse2); - ret = sysctl(mib, 2, &has_sse2, &varlen, NULL, 0); - if (ret < 0 || !has_sse2) { - __cpu_detect_caps.hasSSE2 = 0; - } else { - __cpu_detect_caps.hasSSE2 = 1; - } - __cpu_detect_caps.hasSSE = 0; /* FIXME ?!?!? */ - -#elif defined(OS_WIN32) - LPTOP_LEVEL_EXCEPTION_FILTER exc_fil; - if (__cpu_detect_caps.hasSSE) { - exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse); - __asm __volatile ("xorps %xmm0, %xmm0"); - SetUnhandledExceptionFilter(exc_fil); - } -#elif defined(OS_LINUX) - struct sigaction saved_sigill; - struct sigaction saved_sigfpe; - - /* Save the original signal handlers. - */ - sigaction(SIGILL, NULL, &saved_sigill); - sigaction(SIGFPE, NULL, &saved_sigfpe); - - signal(SIGILL, (void (*)(int))sigill_handler_sse); - signal(SIGFPE, (void (*)(int))sigfpe_handler_sse); - - /* Emulate test for OSFXSR in CR4. The OS will set this bit if it - * supports the extended FPU save and restore required for SSE. If - * we execute an SSE instruction on a PIII and get a SIGILL, the OS - * doesn't support Streaming SIMD Exceptions, even if the processor - * does. - */ - if (__cpu_detect_caps.hasSSE) { - __asm __volatile ("xorps %xmm1, %xmm0"); - } - - /* Emulate test for OSXMMEXCPT in CR4. The OS will set this bit if - * it supports unmasked SIMD FPU exceptions. If we unmask the - * exceptions, do a SIMD divide-by-zero and get a SIGILL, the OS - * doesn't support unmasked SIMD FPU exceptions. If we get a SIGFPE - * as expected, we're okay but we need to clean up after it. - * - * Are we being too stringent in our requirement that the OS support - * unmasked exceptions? Certain RedHat 2.2 kernels enable SSE by - * setting CR4.OSFXSR but don't support unmasked exceptions. Win98 - * doesn't even support them. We at least know the user-space SSE - * support is good in kernels that do support unmasked exceptions, - * and therefore to be safe I'm going to leave this test in here. - */ - if (__cpu_detect_caps.hasSSE) { - // test_os_katmai_exception_support(); - } - - /* Restore the original signal handlers. - */ - sigaction(SIGILL, &saved_sigill, NULL); - sigaction(SIGFPE, &saved_sigfpe, NULL); +#if defined(PIPE_ARCH_X86) +#if defined(PIPE_OS_FREEBSD) + int has_sse=0, ret; + int len = sizeof (has_sse); + + ret = sysctlbyname("hw.instruction_sse", &has_sse, &len, NULL, 0); + if (ret || !has_sse) + util_cpu_caps.has_sse=0; + +#elif defined(PIPE_OS_NETBSD) || defined(PIPE_OS_OPENBSD) + int has_sse, has_sse2, ret, mib[2]; + int varlen; + + mib[0] = CTL_MACHDEP; + mib[1] = CPU_SSE; + varlen = sizeof (has_sse); + + ret = sysctl(mib, 2, &has_sse, &varlen, NULL, 0); + if (ret < 0 || !has_sse) { + util_cpu_caps.has_sse = 0; + } else { + util_cpu_caps.has_sse = 1; + } + + mib[1] = CPU_SSE2; + varlen = sizeof (has_sse2); + ret = sysctl(mib, 2, &has_sse2, &varlen, NULL, 0); + if (ret < 0 || !has_sse2) { + util_cpu_caps.has_sse2 = 0; + } else { + util_cpu_caps.has_sse2 = 1; + } + util_cpu_caps.has_sse = 0; /* FIXME ?!?!? */ + +#elif defined(PIPE_OS_WINDOWS) + LPTOP_LEVEL_EXCEPTION_FILTER exc_fil; + if (util_cpu_caps.has_sse) { + exc_fil = SetUnhandledExceptionFilter(win32_sig_handler_sse); +#if defined(PIPE_CC_GCC) + __asm __volatile ("xorps %xmm0, %xmm0"); +#elif defined(PIPE_CC_MSVC) + __asm { + xorps xmm0, xmm0 // executing SSE instruction + } +#else +#error Unsupported compiler +#endif + SetUnhandledExceptionFilter(exc_fil); + } +#elif defined(PIPE_OS_LINUX) + struct sigaction saved_sigill; + struct sigaction saved_sigfpe; + + /* Save the original signal handlers. + */ + sigaction(SIGILL, NULL, &saved_sigill); + sigaction(SIGFPE, NULL, &saved_sigfpe); + + signal(SIGILL, (void (*)(int))sigill_handler_sse); + signal(SIGFPE, (void (*)(int))sigfpe_handler_sse); + + /* Emulate test for OSFXSR in CR4. The OS will set this bit if it + * supports the extended FPU save and restore required for SSE. If + * we execute an SSE instruction on a PIII and get a SIGILL, the OS + * doesn't support Streaming SIMD Exceptions, even if the processor + * does. + */ + if (util_cpu_caps.has_sse) { + __asm __volatile ("xorps %xmm1, %xmm0"); + } + + /* Emulate test for OSXMMEXCPT in CR4. The OS will set this bit if + * it supports unmasked SIMD FPU exceptions. If we unmask the + * exceptions, do a SIMD divide-by-zero and get a SIGILL, the OS + * doesn't support unmasked SIMD FPU exceptions. If we get a SIGFPE + * as expected, we're okay but we need to clean up after it. + * + * Are we being too stringent in our requirement that the OS support + * unmasked exceptions? Certain RedHat 2.2 kernels enable SSE by + * setting CR4.OSFXSR but don't support unmasked exceptions. Win98 + * doesn't even support them. We at least know the user-space SSE + * support is good in kernels that do support unmasked exceptions, + * and therefore to be safe I'm going to leave this test in here. + */ + if (util_cpu_caps.has_sse) { + // test_os_katmai_exception_support(); + } + + /* Restore the original signal handlers. + */ + sigaction(SIGILL, &saved_sigill, NULL); + sigaction(SIGFPE, &saved_sigfpe, NULL); #else - /* We can't use POSIX signal handling to test the availability of - * SSE, so we disable it by default. - */ - __cpu_detect_caps.hasSSE = 0; + /* We can't use POSIX signal handling to test the availability of + * SSE, so we disable it by default. + */ + util_cpu_caps.has_sse = 0; #endif /* __linux__ */ #endif + +#if defined(PIPE_ARCH_X86_64) + util_cpu_caps.has_sse = 1; +#endif } static int has_cpuid(void) { -#if defined(ARCH_X86) - int a, c; - - __asm __volatile - ("pushf\n" - "popl %0\n" - "movl %0, %1\n" - "xorl $0x200000, %0\n" - "push %0\n" - "popf\n" - "pushf\n" - "popl %0\n" - : "=a" (a), "=c" (c) - : - : "cc"); - - return a != c; +#if defined(PIPE_ARCH_X86) +#if defined(PIPE_OS_GCC) + int a, c; + + __asm __volatile + ("pushf\n" + "popl %0\n" + "movl %0, %1\n" + "xorl $0x200000, %0\n" + "push %0\n" + "popf\n" + "pushf\n" + "popl %0\n" + : "=a" (a), "=c" (c) + : + : "cc"); + + return a != c; +#else + /* FIXME */ + return 1; +#endif +#elif defined(PIPE_ARCH_X86_64) + return 1; #else - return 0; + return 0; #endif } -static int cpuid(unsigned int ax, unsigned int *p) +static INLINE int +cpuid(unsigned int ax, unsigned int *p) { -#if defined(ARCH_X86) - unsigned int flags; - - __asm __volatile - ("movl %%ebx, %%esi\n\t" - "cpuid\n\t" - "xchgl %%ebx, %%esi" - : "=a" (p[0]), "=S" (p[1]), - "=c" (p[2]), "=d" (p[3]) - : "0" (ax)); - - return 0; -#else - return -1; + int ret = -1; + +#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64) +#if defined(PIPE_CC_GCC) + __asm __volatile + ("movl %%ebx, %%esi\n\t" + "cpuid\n\t" + "xchgl %%ebx, %%esi" + : "=a" (p[0]), "=S" (p[1]), + "=c" (p[2]), "=d" (p[3]) + : "0" (ax)); + + ret = 0; +#elif defined(PIPE_CC_MSVC) + __cpuid(ax, p); + + ret = 0; +#endif #endif + + return ret; } -void cpu_detect_initialize() +void +util_cpu_detect(void) { - unsigned int regs[4]; - unsigned int regs2[4]; - - int mib[2], ncpu; - int len; - - memset(&__cpu_detect_caps, 0, sizeof (struct cpu_detect_caps)); - - /* Check for arch type */ -#if defined(ARCH_MIPS) - __cpu_detect_caps.type = CPU_DETECT_TYPE_MIPS; -#elif defined(ARCH_ALPHA) - __cpu_detect_caps.type = CPU_DETECT_TYPE_ALPHA; -#elif defined(ARCH_SPARC) - __cpu_detect_caps.type = CPU_DETECT_TYPE_SPARC; -#elif defined(ARCH_X86) - __cpu_detect_caps.type = CPU_DETECT_TYPE_X86; -#elif defined(ARCH_POWERPC) - __cpu_detect_caps.type = CPU_DETECT_TYPE_POWERPC; + static boolean util_cpu_detect_initialized = FALSE; + + if(util_cpu_detect_initialized) + return; + + memset(&util_cpu_caps, 0, sizeof util_cpu_caps); + + /* Check for arch type */ +#if defined(PIPE_ARCH_MIPS) + util_cpu_caps.arch = UTIL_CPU_ARCH_MIPS; +#elif defined(PIPE_ARCH_ALPHA) + util_cpu_caps.arch = UTIL_CPU_ARCH_ALPHA; +#elif defined(PIPE_ARCH_SPARC) + util_cpu_caps.arch = UTIL_CPU_ARCH_SPARC; +#elif defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64) + util_cpu_caps.arch = UTIL_CPU_ARCH_X86; +#elif defined(PIPE_ARCH_PPC) + util_cpu_caps.arch = UTIL_CPU_ARCH_POWERPC; #else - __cpu_detect_caps.type = CPU_DETECT_TYPE_OTHER; + util_cpu_caps.arch = UTIL_CPU_ARCH_UNKNOWN; #endif - /* Count the number of CPUs in system */ -#if !defined(OS_WIN32) && !defined(OS_UNKNOWN) && defined(_SC_NPROCESSORS_ONLN) - __cpu_detect_caps.nrcpu = sysconf(_SC_NPROCESSORS_ONLN); - if (__cpu_detect_caps.nrcpu == -1) - __cpu_detect_caps.nrcpu = 1; - -#elif defined(OS_NETBSD) || defined(OS_FREEBSD) || defined(OS_OPENBSD) + /* Count the number of CPUs in system */ +#if !defined(PIPE_OS_WINDOWS) && !defined(PIPE_OS_UNKNOWN) && defined(_SC_NPROCESSORS_ONLN) + util_cpu_caps.nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + if (util_cpu_caps.nr_cpus == -1) + util_cpu_caps.nr_cpus = 1; - mib[0] = CTL_HW; - mib[1] = HW_NCPU; +#elif defined(PIPE_OS_NETBSD) || defined(PIPE_OS_FREEBSD) || defined(PIPE_OS_OPENBSD) + { + int mib[2], ncpu; + int len; - len = sizeof (ncpu); - sysctl(mib, 2, &ncpu, &len, NULL, 0); - __cpu_detect_caps.nrcpu = ncpu; + mib[0] = CTL_HW; + mib[1] = HW_NCPU; + len = sizeof (ncpu); + sysctl(mib, 2, &ncpu, &len, NULL, 0); + util_cpu_caps.nr_cpus = ncpu; + } #else - __cpu_detect_caps.nrcpu = 1; + util_cpu_caps.nr_cpus = 1; #endif -#if defined(ARCH_X86) - /* No cpuid, old 486 or lower */ - if (has_cpuid() == 0) - return; +#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64) + if (has_cpuid()) { + unsigned int regs[4]; + unsigned int regs2[4]; - __cpu_detect_caps.cacheline = 32; + util_cpu_caps.cacheline = 32; - /* Get max cpuid level */ - cpuid(0x00000000, regs); + /* Get max cpuid level */ + cpuid(0x00000000, regs); - if (regs[0] >= 0x00000001) { - unsigned int cacheline; + if (regs[0] >= 0x00000001) { + unsigned int cacheline; - cpuid (0x00000001, regs2); + cpuid (0x00000001, regs2); - __cpu_detect_caps.x86cpuType = (regs2[0] >> 8) & 0xf; - if (__cpu_detect_caps.x86cpuType == 0xf) - __cpu_detect_caps.x86cpuType = 8 + ((regs2[0] >> 20) & 255); /* use extended family (P4, IA64) */ + util_cpu_caps.x86_cpu_type = (regs2[0] >> 8) & 0xf; + if (util_cpu_caps.x86_cpu_type == 0xf) + util_cpu_caps.x86_cpu_type = 8 + ((regs2[0] >> 20) & 255); /* use extended family (P4, IA64) */ - /* general feature flags */ - __cpu_detect_caps.hasTSC = (regs2[3] & (1 << 8 )) >> 8; /* 0x0000010 */ - __cpu_detect_caps.hasMMX = (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */ - __cpu_detect_caps.hasSSE = (regs2[3] & (1 << 25 )) >> 25; /* 0x2000000 */ - __cpu_detect_caps.hasSSE2 = (regs2[3] & (1 << 26 )) >> 26; /* 0x4000000 */ - __cpu_detect_caps.hasSSE3 = (regs2[2] & (1)); /* 0x0000001 */ - __cpu_detect_caps.hasSSSE3 = (regs2[2] & (1 << 9 )) >> 9; /* 0x0000020 */ - __cpu_detect_caps.hasMMX2 = __cpu_detect_caps.hasSSE; /* SSE cpus supports mmxext too */ + /* general feature flags */ + util_cpu_caps.has_tsc = (regs2[3] & (1 << 8 )) >> 8; /* 0x0000010 */ + util_cpu_caps.has_mmx = (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */ + util_cpu_caps.has_sse = (regs2[3] & (1 << 25 )) >> 25; /* 0x2000000 */ + util_cpu_caps.has_sse2 = (regs2[3] & (1 << 26 )) >> 26; /* 0x4000000 */ + util_cpu_caps.has_sse3 = (regs2[2] & (1)); /* 0x0000001 */ + util_cpu_caps.has_ssse3 = (regs2[2] & (1 << 9 )) >> 9; /* 0x0000020 */ + util_cpu_caps.has_sse4_1 = (regs2[2] & (1 << 19)) >> 19; + util_cpu_caps.has_mmx2 = util_cpu_caps.has_sse; /* SSE cpus supports mmxext too */ - cacheline = ((regs2[1] >> 8) & 0xFF) * 8; - if (cacheline > 0) - __cpu_detect_caps.cacheline = cacheline; - } + cacheline = ((regs2[1] >> 8) & 0xFF) * 8; + if (cacheline > 0) + util_cpu_caps.cacheline = cacheline; + } - cpuid(0x80000000, regs); + cpuid(0x80000000, regs); - if (regs[0] >= 0x80000001) { + if (regs[0] >= 0x80000001) { - cpuid(0x80000001, regs2); + cpuid(0x80000001, regs2); - __cpu_detect_caps.hasMMX |= (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */ - __cpu_detect_caps.hasMMX2 |= (regs2[3] & (1 << 22 )) >> 22; /* 0x400000 */ - __cpu_detect_caps.has3DNow = (regs2[3] & (1 << 31 )) >> 31; /* 0x80000000 */ - __cpu_detect_caps.has3DNowExt = (regs2[3] & (1 << 30 )) >> 30; - } + util_cpu_caps.has_mmx |= (regs2[3] & (1 << 23 )) >> 23; /* 0x0800000 */ + util_cpu_caps.has_mmx2 |= (regs2[3] & (1 << 22 )) >> 22; /* 0x400000 */ + util_cpu_caps.has_3dnow = (regs2[3] & (1 << 31 )) >> 31; /* 0x80000000 */ + util_cpu_caps.has_3dnow_ext = (regs2[3] & (1 << 30 )) >> 30; + } - if (regs[0] >= 0x80000006) { - cpuid(0x80000006, regs2); - __cpu_detect_caps.cacheline = regs2[2] & 0xFF; - } + if (regs[0] >= 0x80000006) { + cpuid(0x80000006, regs2); + util_cpu_caps.cacheline = regs2[2] & 0xFF; + } +#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_FREEBSD) || defined(PIPE_OS_NETBSD) || defined(PIPE_OS_CYGWIN) || defined(PIPE_OS_OPENBSD) + if (util_cpu_caps.has_sse) + check_os_katmai_support(); -#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_CYGWIN) || defined(OS_OPENBSD) - if (__cpu_detect_caps.hasSSE) - check_os_katmai_support(); - - if (!__cpu_detect_caps.hasSSE) { - __cpu_detect_caps.hasSSE2 = 0; - __cpu_detect_caps.hasSSE3 = 0; - __cpu_detect_caps.hasSSSE3 = 0; - } + if (!util_cpu_caps.has_sse) { + util_cpu_caps.has_sse2 = 0; + util_cpu_caps.has_sse3 = 0; + util_cpu_caps.has_ssse3 = 0; + } #else - __cpu_detect_caps.hasSSE = 0; - __cpu_detect_caps.hasSSE2 = 0; - __cpu_detect_caps.hasSSE3 = 0; - __cpu_detect_caps.hasSSSE3 = 0; + util_cpu_caps.has_sse = 0; + util_cpu_caps.has_sse2 = 0; + util_cpu_caps.has_sse3 = 0; + util_cpu_caps.has_ssse3 = 0; +#endif + } +#endif /* PIPE_ARCH_X86 || PIPE_ARCH_X86_64 */ + +#if defined(PIPE_ARCH_PPC) + check_os_altivec_support(); +#endif /* PIPE_ARCH_PPC */ + +#ifdef DEBUG + debug_printf("util_cpu_caps.arch = %i\n", util_cpu_caps.arch); + debug_printf("util_cpu_caps.nr_cpus = %u\n", util_cpu_caps.nr_cpus); + + debug_printf("util_cpu_caps.x86_cpu_type = %u\n", util_cpu_caps.x86_cpu_type); + debug_printf("util_cpu_caps.cacheline = %u\n", util_cpu_caps.cacheline); + + debug_printf("util_cpu_caps.has_tsc = %u\n", util_cpu_caps.has_tsc); + debug_printf("util_cpu_caps.has_mmx = %u\n", util_cpu_caps.has_mmx); + debug_printf("util_cpu_caps.has_mmx2 = %u\n", util_cpu_caps.has_mmx2); + debug_printf("util_cpu_caps.has_sse = %u\n", util_cpu_caps.has_sse); + debug_printf("util_cpu_caps.has_sse2 = %u\n", util_cpu_caps.has_sse2); + debug_printf("util_cpu_caps.has_sse3 = %u\n", util_cpu_caps.has_sse3); + debug_printf("util_cpu_caps.has_ssse3 = %u\n", util_cpu_caps.has_ssse3); + debug_printf("util_cpu_caps.has_sse4_1 = %u\n", util_cpu_caps.has_sse4_1); + debug_printf("util_cpu_caps.has_3dnow = %u\n", util_cpu_caps.has_3dnow); + debug_printf("util_cpu_caps.has_3dnow_ext = %u\n", util_cpu_caps.has_3dnow_ext); + debug_printf("util_cpu_caps.has_altivec = %u\n", util_cpu_caps.has_altivec); #endif -#endif /* ARCH_X86 */ - -#if defined(ARCH_POWERPC) - check_os_altivec_support(); -#endif /* ARCH_POWERPC */ - - __cpu_detect_initialized = 1; -} - -struct cpu_detect_caps *cpu_detect_get_caps() -{ - return &__cpu_detect_caps; -} - -/* The getters and setters for feature flags */ -int cpu_detect_get_tsc() -{ - return __cpu_detect_caps.hasTSC; -} - -int cpu_detect_get_mmx() -{ - return __cpu_detect_caps.hasMMX; -} - -int cpu_detect_get_mmx2() -{ - return __cpu_detect_caps.hasMMX2; -} - -int cpu_detect_get_sse() -{ - return __cpu_detect_caps.hasSSE; -} - -int cpu_detect_get_sse2() -{ - return __cpu_detect_caps.hasSSE2; -} - -int cpu_detect_get_sse3() -{ - return __cpu_detect_caps.hasSSE3; -} - -int cpu_detect_get_ssse3() -{ - return __cpu_detect_caps.hasSSSE3; -} - -int cpu_detect_get_3dnow() -{ - return __cpu_detect_caps.has3DNow; -} - -int cpu_detect_get_3dnow2() -{ - return __cpu_detect_caps.has3DNowExt; -} -int cpu_detect_get_altivec() -{ - return __cpu_detect_caps.hasAltiVec; + util_cpu_detect_initialized = TRUE; } - diff --git a/src/gallium/auxiliary/util/u_cpu_detect.h b/src/gallium/auxiliary/util/u_cpu_detect.h index 1612d49286a..7ea0121c07f 100644 --- a/src/gallium/auxiliary/util/u_cpu_detect.h +++ b/src/gallium/auxiliary/util/u_cpu_detect.h @@ -24,55 +24,53 @@ * ***************************************************************************/ -/* - * Based on the work of Eric Anholt <[email protected]> +/** + * @file + * CPU feature detection. + * + * @author Dennis Smit + * @author Based on the work of Eric Anholt <[email protected]> */ -#ifndef _CPU_DETECT_H -#define _CPU_DETECT_H +#ifndef _UTIL_CPU_DETECT_H +#define _UTIL_CPU_DETECT_H + +#include "pipe/p_compiler.h" -typedef enum { - CPU_DETECT_TYPE_MIPS, - CPU_DETECT_TYPE_ALPHA, - CPU_DETECT_TYPE_SPARC, - CPU_DETECT_TYPE_X86, - CPU_DETECT_TYPE_POWERPC, - CPU_DETECT_TYPE_OTHER -} cpu_detect_type; +enum util_cpu_arch { + UTIL_CPU_ARCH_UNKNOWN = 0, + UTIL_CPU_ARCH_MIPS, + UTIL_CPU_ARCH_ALPHA, + UTIL_CPU_ARCH_SPARC, + UTIL_CPU_ARCH_X86, + UTIL_CPU_ARCH_POWERPC +}; -struct cpu_detect_caps { - cpu_detect_type type; - int nrcpu; +struct util_cpu_caps { + enum util_cpu_arch arch; + unsigned nr_cpus; - /* Feature flags */ - int x86cpuType; - int cacheline; + /* Feature flags */ + int x86_cpu_type; + unsigned cacheline; - int hasTSC; - int hasMMX; - int hasMMX2; - int hasSSE; - int hasSSE2; - int hasSSE3; - int hasSSSE3; - int has3DNow; - int has3DNowExt; - int hasAltiVec; + unsigned has_tsc:1; + unsigned has_mmx:1; + unsigned has_mmx2:1; + unsigned has_sse:1; + unsigned has_sse2:1; + unsigned has_sse3:1; + unsigned has_ssse3:1; + unsigned has_sse4_1:1; + unsigned has_3dnow:1; + unsigned has_3dnow_ext:1; + unsigned has_altivec:1; }; -/* prototypes */ -void cpu_detect_initialize(void); -struct cpu_detect_caps *cpu_detect_get_caps(void); +extern struct util_cpu_caps +util_cpu_caps; + +void util_cpu_detect(void); -int cpu_detect_get_tsc(void); -int cpu_detect_get_mmx(void); -int cpu_detect_get_mmx2(void); -int cpu_detect_get_sse(void); -int cpu_detect_get_sse2(void); -int cpu_detect_get_sse3(void); -int cpu_detect_get_ssse3(void); -int cpu_detect_get_3dnow(void); -int cpu_detect_get_3dnow2(void); -int cpu_detect_get_altivec(void); -#endif /* _CPU_DETECT_H */ +#endif /* _UTIL_CPU_DETECT_H */ diff --git a/src/gallium/auxiliary/util/u_debug.h b/src/gallium/auxiliary/util/u_debug.h index 1380d98d7ee..b82e7cb4d40 100644 --- a/src/gallium/auxiliary/util/u_debug.h +++ b/src/gallium/auxiliary/util/u_debug.h @@ -65,6 +65,11 @@ extern "C" { #define __FUNCTION__ "???" #endif +#if defined(__GNUC__) +#define _util_printf_format(fmt, list) __attribute__ ((format (printf, fmt, list))) +#else +#define _util_printf_format(fmt, list) +#endif void _debug_vprintf(const char *format, va_list ap); @@ -82,14 +87,17 @@ _debug_printf(const char *format, ...) /** * Print debug messages. * - * The actual channel used to output debug message is platform specific. To - * avoid misformating or truncation, follow these rules of thumb: + * The actual channel used to output debug message is platform specific. To + * avoid misformating or truncation, follow these rules of thumb: * - output whole lines - * - avoid outputing large strings (512 bytes is the current maximum length + * - avoid outputing large strings (512 bytes is the current maximum length * that is guaranteed to be printed in all platforms) */ #if !defined(PIPE_OS_HAIKU) static INLINE void +debug_printf(const char *format, ...) _util_printf_format(1,2); + +static INLINE void debug_printf(const char *format, ...) { #ifdef DEBUG diff --git a/src/gallium/auxiliary/util/u_gen_mipmap.c b/src/gallium/auxiliary/util/u_gen_mipmap.c index f06c0e463d0..4e3d35f40e7 100644 --- a/src/gallium/auxiliary/util/u_gen_mipmap.c +++ b/src/gallium/auxiliary/util/u_gen_mipmap.c @@ -1515,6 +1515,17 @@ util_gen_mipmap(struct gen_mipmap_state *ctx, uint zslice = 0; uint offset; + /* The texture object should have room for the levels which we're + * about to generate. + */ + assert(lastLevel <= pt->last_level); + + /* If this fails, why are we here? */ + assert(lastLevel > baseLevel); + + assert(filter == PIPE_TEX_FILTER_LINEAR || + filter == PIPE_TEX_FILTER_NEAREST); + /* check if we can render in the texture's format */ if (!screen->is_format_supported(screen, pt->format, PIPE_TEXTURE_2D, PIPE_TEXTURE_USAGE_RENDER_TARGET, 0)) { diff --git a/src/gallium/auxiliary/util/u_math.h b/src/gallium/auxiliary/util/u_math.h index b428dc544c3..75b075f160d 100644 --- a/src/gallium/auxiliary/util/u_math.h +++ b/src/gallium/auxiliary/util/u_math.h @@ -283,6 +283,14 @@ util_fast_pow(float x, float y) return util_fast_exp2(util_fast_log2(x) * y); } +/* Note that this counts zero as a power of two. + */ +static INLINE boolean +util_is_power_of_two( unsigned v ) +{ + return (v & (v-1)) == 0; +} + /** * Floor(x), returned as int. @@ -463,6 +471,26 @@ util_logbase2(unsigned n) /** + * Returns the smallest power of two >= x + */ +static INLINE unsigned +util_next_power_of_two(unsigned x) +{ + unsigned i; + + if (x == 0) + return 1; + + --x; + + for (i = 1; i < sizeof(unsigned) * 8; i <<= 1) + x |= x >> i; + + return x + 1; +} + + +/** * Clamp X to [MIN, MAX]. * This is a macro to allow float, int, uint, etc. types. */ diff --git a/src/gallium/auxiliary/util/u_network.c b/src/gallium/auxiliary/util/u_network.c index bc4b7584067..6269c72e121 100644 --- a/src/gallium/auxiliary/util/u_network.c +++ b/src/gallium/auxiliary/util/u_network.c @@ -6,7 +6,7 @@ #if defined(PIPE_SUBSYSTEM_WINDOWS_USER) # include <winsock2.h> # include <windows.h> -#elif defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) +#elif defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) || defined(PIPE_OS_BSD) # include <sys/socket.h> # include <netinet/in.h> # include <unistd.h> @@ -54,7 +54,7 @@ u_socket_close(int s) if (s < 0) return; -#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) +#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) || defined(PIPE_OS_BSD) shutdown(s, SHUT_RDWR); close(s); #elif defined(PIPE_SUBSYSTEM_WINDOWS_USER) @@ -169,7 +169,7 @@ u_socket_listen_on_port(uint16_t portnum) void u_socket_block(int s, boolean block) { -#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) +#if defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) || defined(PIPE_OS_BSD) int old = fcntl(s, F_GETFL, 0); if (old == -1) return; diff --git a/src/gallium/auxiliary/util/u_network.h b/src/gallium/auxiliary/util/u_network.h index 8c778f492ca..0aa898b9676 100644 --- a/src/gallium/auxiliary/util/u_network.h +++ b/src/gallium/auxiliary/util/u_network.h @@ -6,7 +6,7 @@ #if defined(PIPE_SUBSYSTEM_WINDOWS_USER) # define PIPE_HAVE_SOCKETS -#elif defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) +#elif defined(PIPE_OS_LINUX) || defined(PIPE_OS_HAIKU) || defined(PIPE_OS_BSD) # define PIPE_HAVE_SOCKETS #endif diff --git a/src/gallium/auxiliary/util/u_tile.c b/src/gallium/auxiliary/util/u_tile.c index 0d6489c26e4..8a22f584bee 100644 --- a/src/gallium/auxiliary/util/u_tile.c +++ b/src/gallium/auxiliary/util/u_tile.c @@ -1452,7 +1452,7 @@ pipe_put_tile_z(struct pipe_transfer *pt, case PIPE_FORMAT_S8Z24_UNORM: { uint *pDest = (uint *) (map + y * pt->stride + x*4); - assert(pt->usage == PIPE_TRANSFER_READ_WRITE); + assert((pt->usage & PIPE_TRANSFER_READ_WRITE) == PIPE_TRANSFER_READ_WRITE); for (i = 0; i < h; i++) { for (j = 0; j < w; j++) { /* convert 32-bit Z to 24-bit Z, preserve stencil */ @@ -1479,7 +1479,7 @@ pipe_put_tile_z(struct pipe_transfer *pt, case PIPE_FORMAT_Z24S8_UNORM: { uint *pDest = (uint *) (map + y * pt->stride + x*4); - assert(pt->usage == PIPE_TRANSFER_READ_WRITE); + assert((pt->usage & PIPE_TRANSFER_READ_WRITE) == PIPE_TRANSFER_READ_WRITE); for (i = 0; i < h; i++) { for (j = 0; j < w; j++) { /* convert 32-bit Z to 24-bit Z, preserve stencil */ diff --git a/src/gallium/auxiliary/vl/Makefile b/src/gallium/auxiliary/vl/Makefile new file mode 100644 index 00000000000..4314c1e8d69 --- /dev/null +++ b/src/gallium/auxiliary/vl/Makefile @@ -0,0 +1,13 @@ +TOP = ../../../.. +include $(TOP)/configs/current + +LIBNAME = vl + +C_SOURCES = \ + vl_bitstream_parser.c \ + vl_mpeg12_mc_renderer.c \ + vl_compositor.c \ + vl_csc.c \ + vl_shader_build.c + +include ../../Makefile.template diff --git a/src/gallium/auxiliary/vl/SConscript b/src/gallium/auxiliary/vl/SConscript new file mode 100644 index 00000000000..aed69f5efed --- /dev/null +++ b/src/gallium/auxiliary/vl/SConscript @@ -0,0 +1,13 @@ +Import('*') + +vl = env.ConvenienceLibrary( + target = 'vl', + source = [ + 'vl_bitstream_parser.c', + 'vl_mpeg12_mc_renderer.c', + 'vl_compositor.c', + 'vl_csc.c', + 'vl_shader_build.c', + ]) + +auxiliaries.insert(0, vl) diff --git a/src/gallium/auxiliary/vl/vl_bitstream_parser.c b/src/gallium/auxiliary/vl/vl_bitstream_parser.c new file mode 100644 index 00000000000..3193ea5f41c --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_bitstream_parser.c @@ -0,0 +1,167 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vl_bitstream_parser.h" +#include <assert.h> +#include <limits.h> +#include <util/u_memory.h> + +static unsigned +grab_bits(unsigned cursor, unsigned how_many_bits, unsigned bitstream_elt) +{ + unsigned excess_bits = sizeof(unsigned) * CHAR_BIT - how_many_bits - cursor; + + assert(cursor < sizeof(unsigned) * CHAR_BIT); + assert(how_many_bits > 0 && how_many_bits <= sizeof(unsigned) * CHAR_BIT); + assert(cursor + how_many_bits <= sizeof(unsigned) * CHAR_BIT); + + return (bitstream_elt << excess_bits) >> (excess_bits + cursor); +} + +static unsigned +show_bits(unsigned cursor, unsigned how_many_bits, const unsigned *bitstream) +{ + unsigned cur_int = cursor / (sizeof(unsigned) * CHAR_BIT); + unsigned cur_bit = cursor % (sizeof(unsigned) * CHAR_BIT); + + assert(bitstream); + + if (cur_bit + how_many_bits > sizeof(unsigned) * CHAR_BIT) { + unsigned lower = grab_bits(cur_bit, sizeof(unsigned) * CHAR_BIT - cur_bit, + bitstream[cur_int]); + unsigned upper = grab_bits(0, cur_bit + how_many_bits - sizeof(unsigned) * CHAR_BIT, + bitstream[cur_int + 1]); + return lower | upper << (sizeof(unsigned) * CHAR_BIT - cur_bit); + } + else + return grab_bits(cur_bit, how_many_bits, bitstream[cur_int]); +} + +bool vl_bitstream_parser_init(struct vl_bitstream_parser *parser, + unsigned num_bitstreams, + const void **bitstreams, + const unsigned *sizes) +{ + assert(parser); + assert(num_bitstreams); + assert(bitstreams); + assert(sizes); + + parser->num_bitstreams = num_bitstreams; + parser->bitstreams = (const unsigned**)bitstreams; + parser->sizes = sizes; + parser->cur_bitstream = 0; + parser->cursor = 0; + + return true; +} + +void vl_bitstream_parser_cleanup(struct vl_bitstream_parser *parser) +{ + assert(parser); +} + +unsigned +vl_bitstream_parser_get_bits(struct vl_bitstream_parser *parser, + unsigned how_many_bits) +{ + unsigned bits; + + assert(parser); + + bits = vl_bitstream_parser_show_bits(parser, how_many_bits); + + vl_bitstream_parser_forward(parser, how_many_bits); + + return bits; +} + +unsigned +vl_bitstream_parser_show_bits(struct vl_bitstream_parser *parser, + unsigned how_many_bits) +{ + unsigned bits = 0; + unsigned shift = 0; + unsigned cursor; + unsigned cur_bitstream; + + assert(parser); + + cursor = parser->cursor; + cur_bitstream = parser->cur_bitstream; + + while (1) { + unsigned bits_left = parser->sizes[cur_bitstream] * CHAR_BIT - cursor; + unsigned bits_to_show = how_many_bits > bits_left ? bits_left : how_many_bits; + + bits |= show_bits(cursor, bits_to_show, + parser->bitstreams[cur_bitstream]) << shift; + + if (how_many_bits > bits_to_show) { + how_many_bits -= bits_to_show; + cursor = 0; + ++cur_bitstream; + shift += bits_to_show; + } + else + break; + } + + return bits; +} + +void vl_bitstream_parser_forward(struct vl_bitstream_parser *parser, + unsigned how_many_bits) +{ + assert(parser); + assert(how_many_bits); + + parser->cursor += how_many_bits; + + while (parser->cursor > parser->sizes[parser->cur_bitstream] * CHAR_BIT) { + parser->cursor -= parser->sizes[parser->cur_bitstream++] * CHAR_BIT; + assert(parser->cur_bitstream < parser->num_bitstreams); + } +} + +void vl_bitstream_parser_rewind(struct vl_bitstream_parser *parser, + unsigned how_many_bits) +{ + signed c; + + assert(parser); + assert(how_many_bits); + + c = parser->cursor - how_many_bits; + + while (c < 0) { + c += parser->sizes[parser->cur_bitstream--] * CHAR_BIT; + assert(parser->cur_bitstream < parser->num_bitstreams); + } + + parser->cursor = (unsigned)c; +} diff --git a/src/gallium/auxiliary/vl/vl_bitstream_parser.h b/src/gallium/auxiliary/vl/vl_bitstream_parser.h new file mode 100644 index 00000000000..30ec743fa75 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_bitstream_parser.h @@ -0,0 +1,63 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef vl_bitstream_parser_h +#define vl_bitstream_parser_h + +#include "pipe/p_compiler.h" + +struct vl_bitstream_parser +{ + unsigned num_bitstreams; + const unsigned **bitstreams; + const unsigned *sizes; + unsigned cur_bitstream; + unsigned cursor; +}; + +bool vl_bitstream_parser_init(struct vl_bitstream_parser *parser, + unsigned num_bitstreams, + const void **bitstreams, + const unsigned *sizes); + +void vl_bitstream_parser_cleanup(struct vl_bitstream_parser *parser); + +unsigned +vl_bitstream_parser_get_bits(struct vl_bitstream_parser *parser, + unsigned how_many_bits); + +unsigned +vl_bitstream_parser_show_bits(struct vl_bitstream_parser *parser, + unsigned how_many_bits); + +void vl_bitstream_parser_forward(struct vl_bitstream_parser *parser, + unsigned how_many_bits); + +void vl_bitstream_parser_rewind(struct vl_bitstream_parser *parser, + unsigned how_many_bits); + +#endif /* vl_bitstream_parser_h */ diff --git a/src/gallium/auxiliary/vl/vl_compositor.c b/src/gallium/auxiliary/vl/vl_compositor.c new file mode 100644 index 00000000000..b36dbeb2088 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_compositor.c @@ -0,0 +1,532 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vl_compositor.h" +#include <assert.h> +#include <pipe/p_context.h> +#include <pipe/p_inlines.h> +#include <tgsi/tgsi_parse.h> +#include <tgsi/tgsi_build.h> +#include <util/u_memory.h> +#include "vl_csc.h" +#include "vl_shader_build.h" + +struct vertex2f +{ + float x, y; +}; + +struct vertex4f +{ + float x, y, z, w; +}; + +struct vertex_shader_consts +{ + struct vertex4f dst_scale; + struct vertex4f dst_trans; + struct vertex4f src_scale; + struct vertex4f src_trans; +}; + +struct fragment_shader_consts +{ + float matrix[16]; +}; + +/* + * Represents 2 triangles in a strip in normalized coords. + * Used to render the surface onto the frame buffer. + */ +static const struct vertex2f surface_verts[4] = +{ + {0.0f, 0.0f}, + {0.0f, 1.0f}, + {1.0f, 0.0f}, + {1.0f, 1.0f} +}; + +/* + * Represents texcoords for the above. We can use the position values directly. + * TODO: Duplicate these in the shader, no need to create a buffer. + */ +static const struct vertex2f *surface_texcoords = surface_verts; + +static void +create_vert_shader(struct vl_compositor *c) +{ + const unsigned max_tokens = 50; + + struct pipe_shader_state vs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(c); + + tokens = (struct tgsi_token*)MALLOC(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version*)&tokens[0] = tgsi_build_version(); + header = (struct tgsi_header*)&tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header); + + ti = 3; + + /* + * decl i0 ; Vertex pos + * decl i1 ; Vertex texcoords + */ + for (i = 0; i < 2; i++) { + decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * decl c0 ; Scaling vector to scale vertex pos rect to destination size + * decl c1 ; Translation vector to move vertex pos rect into position + * decl c2 ; Scaling vector to scale texcoord rect to source size + * decl c3 ; Translation vector to move texcoord rect into position + */ + decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 3); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* + * decl o0 ; Vertex pos + * decl o1 ; Vertex texcoords + */ + for (i = 0; i < 2; i++) { + decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* decl t0, t1 */ + decl = vl_decl_temps(0, 1); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* + * mad o0, i0, c0, c1 ; Scale and translate unit output rect to destination size and pos + * mad o1, i1, c2, c3 ; Scale and translate unit texcoord rect to source size and pos + */ + for (i = 0; i < 2; ++i) { + inst = vl_inst4(TGSI_OPCODE_MAD, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i, TGSI_FILE_CONSTANT, i * 2, TGSI_FILE_CONSTANT, i * 2 + 1); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + vs.tokens = tokens; + c->vertex_shader = c->pipe->create_vs_state(c->pipe, &vs); + FREE(tokens); +} + +static void +create_frag_shader(struct vl_compositor *c) +{ + const unsigned max_tokens = 50; + + struct pipe_shader_state fs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(c); + + tokens = (struct tgsi_token*)MALLOC(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version*)&tokens[0] = tgsi_build_version(); + header = (struct tgsi_header*)&tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor*)&tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header); + + ti = 3; + + /* decl i0 ; Texcoords for s0 */ + decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, 1, 0, 0, TGSI_INTERPOLATE_LINEAR); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* + * decl c0-c3 ; CSC matrix c0-c3 + */ + decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 3); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl o0 ; Fragment color */ + decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl t0 */ + decl = vl_decl_temps(0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl s0 ; Sampler for tex containing picture to display */ + decl = vl_decl_samplers(0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* tex2d t0, i0, s0 ; Read src pixel */ + inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_INPUT, 0, TGSI_FILE_SAMPLER, 0); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* + * dp4 o0.x, t0, c0 ; Multiply pixel by the color conversion matrix + * dp4 o0.y, t0, c1 + * dp4 o0.z, t0, c2 + * dp4 o0.w, t0, c3 + */ + for (i = 0; i < 4; ++i) { + inst = vl_inst3(TGSI_OPCODE_DP4, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, i); + inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i; + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + fs.tokens = tokens; + c->fragment_shader = c->pipe->create_fs_state(c->pipe, &fs); + FREE(tokens); +} + +static bool +init_pipe_state(struct vl_compositor *c) +{ + struct pipe_sampler_state sampler; + + assert(c); + + c->fb_state.nr_cbufs = 1; + c->fb_state.zsbuf = NULL; + + sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE; + sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE; + sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE; + sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR; + sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE; + sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR; + sampler.compare_mode = PIPE_TEX_COMPARE_NONE; + sampler.compare_func = PIPE_FUNC_ALWAYS; + sampler.normalized_coords = 1; + /*sampler.prefilter = ;*/ + /*sampler.lod_bias = ;*/ + /*sampler.min_lod = ;*/ + /*sampler.max_lod = ;*/ + /*sampler.border_color[i] = ;*/ + /*sampler.max_anisotropy = ;*/ + c->sampler = c->pipe->create_sampler_state(c->pipe, &sampler); + + return true; +} + +static void cleanup_pipe_state(struct vl_compositor *c) +{ + assert(c); + + c->pipe->delete_sampler_state(c->pipe, c->sampler); +} + +static bool +init_shaders(struct vl_compositor *c) +{ + assert(c); + + create_vert_shader(c); + create_frag_shader(c); + + return true; +} + +static void cleanup_shaders(struct vl_compositor *c) +{ + assert(c); + + c->pipe->delete_vs_state(c->pipe, c->vertex_shader); + c->pipe->delete_fs_state(c->pipe, c->fragment_shader); +} + +static bool +init_buffers(struct vl_compositor *c) +{ + struct fragment_shader_consts fsc; + + assert(c); + + /* + * Create our vertex buffer and vertex buffer element + * VB contains 4 vertices that render a quad covering the entire window + * to display a rendered surface + * Quad is rendered as a tri strip + */ + c->vertex_bufs[0].stride = sizeof(struct vertex2f); + c->vertex_bufs[0].max_index = 3; + c->vertex_bufs[0].buffer_offset = 0; + c->vertex_bufs[0].buffer = pipe_buffer_create + ( + c->pipe->screen, + 1, + PIPE_BUFFER_USAGE_VERTEX, + sizeof(struct vertex2f) * 4 + ); + + memcpy + ( + pipe_buffer_map(c->pipe->screen, c->vertex_bufs[0].buffer, PIPE_BUFFER_USAGE_CPU_WRITE), + surface_verts, + sizeof(struct vertex2f) * 4 + ); + + pipe_buffer_unmap(c->pipe->screen, c->vertex_bufs[0].buffer); + + c->vertex_elems[0].src_offset = 0; + c->vertex_elems[0].vertex_buffer_index = 0; + c->vertex_elems[0].nr_components = 2; + c->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* + * Create our texcoord buffer and texcoord buffer element + * Texcoord buffer contains the TCs for mapping the rendered surface to the 4 vertices + */ + c->vertex_bufs[1].stride = sizeof(struct vertex2f); + c->vertex_bufs[1].max_index = 3; + c->vertex_bufs[1].buffer_offset = 0; + c->vertex_bufs[1].buffer = pipe_buffer_create + ( + c->pipe->screen, + 1, + PIPE_BUFFER_USAGE_VERTEX, + sizeof(struct vertex2f) * 4 + ); + + memcpy + ( + pipe_buffer_map(c->pipe->screen, c->vertex_bufs[1].buffer, PIPE_BUFFER_USAGE_CPU_WRITE), + surface_texcoords, + sizeof(struct vertex2f) * 4 + ); + + pipe_buffer_unmap(c->pipe->screen, c->vertex_bufs[1].buffer); + + c->vertex_elems[1].src_offset = 0; + c->vertex_elems[1].vertex_buffer_index = 1; + c->vertex_elems[1].nr_components = 2; + c->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* + * Create our vertex shader's constant buffer + * Const buffer contains scaling and translation vectors + */ + c->vs_const_buf.buffer = pipe_buffer_create + ( + c->pipe->screen, + 1, + PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD, + sizeof(struct vertex_shader_consts) + ); + + /* + * Create our fragment shader's constant buffer + * Const buffer contains the color conversion matrix and bias vectors + */ + c->fs_const_buf.buffer = pipe_buffer_create + ( + c->pipe->screen, + 1, + PIPE_BUFFER_USAGE_CONSTANT, + sizeof(struct fragment_shader_consts) + ); + + vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix); + + vl_compositor_set_csc_matrix(c, fsc.matrix); + + return true; +} + +static void +cleanup_buffers(struct vl_compositor *c) +{ + unsigned i; + + assert(c); + + for (i = 0; i < 2; ++i) + pipe_buffer_reference(&c->vertex_bufs[i].buffer, NULL); + + pipe_buffer_reference(&c->vs_const_buf.buffer, NULL); + pipe_buffer_reference(&c->fs_const_buf.buffer, NULL); +} + +bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe) +{ + assert(compositor); + + memset(compositor, 0, sizeof(struct vl_compositor)); + + compositor->pipe = pipe; + + if (!init_pipe_state(compositor)) + return false; + if (!init_shaders(compositor)) { + cleanup_pipe_state(compositor); + return false; + } + if (!init_buffers(compositor)) { + cleanup_shaders(compositor); + cleanup_pipe_state(compositor); + return false; + } + + return true; +} + +void vl_compositor_cleanup(struct vl_compositor *compositor) +{ + assert(compositor); + + cleanup_buffers(compositor); + cleanup_shaders(compositor); + cleanup_pipe_state(compositor); +} + +void vl_compositor_render(struct vl_compositor *compositor, + /*struct pipe_texture *backround, + struct pipe_video_rect *backround_area,*/ + struct pipe_texture *src_surface, + enum pipe_mpeg12_picture_type picture_type, + /*unsigned num_past_surfaces, + struct pipe_texture *past_surfaces, + unsigned num_future_surfaces, + struct pipe_texture *future_surfaces,*/ + struct pipe_video_rect *src_area, + struct pipe_texture *dst_surface, + struct pipe_video_rect *dst_area, + /*unsigned num_layers, + struct pipe_texture *layers, + struct pipe_video_rect *layer_src_areas, + struct pipe_video_rect *layer_dst_areas*/ + struct pipe_fence_handle **fence) +{ + struct vertex_shader_consts *vs_consts; + + assert(compositor); + assert(src_surface); + assert(src_area); + assert(dst_surface); + assert(dst_area); + assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME); + + compositor->fb_state.width = dst_surface->width[0]; + compositor->fb_state.height = dst_surface->height[0]; + compositor->fb_state.cbufs[0] = compositor->pipe->screen->get_tex_surface + ( + compositor->pipe->screen, + dst_surface, + 0, 0, 0, PIPE_BUFFER_USAGE_GPU_READ | PIPE_BUFFER_USAGE_GPU_WRITE + ); + + compositor->viewport.scale[0] = compositor->fb_state.width; + compositor->viewport.scale[1] = compositor->fb_state.height; + compositor->viewport.scale[2] = 1; + compositor->viewport.scale[3] = 1; + compositor->viewport.translate[0] = 0; + compositor->viewport.translate[1] = 0; + compositor->viewport.translate[2] = 0; + compositor->viewport.translate[3] = 0; + + compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state); + compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport); + compositor->pipe->bind_sampler_states(compositor->pipe, 1, &compositor->sampler); + compositor->pipe->set_sampler_textures(compositor->pipe, 1, &src_surface); + compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader); + compositor->pipe->bind_fs_state(compositor->pipe, compositor->fragment_shader); + compositor->pipe->set_vertex_buffers(compositor->pipe, 2, compositor->vertex_bufs); + compositor->pipe->set_vertex_elements(compositor->pipe, 2, compositor->vertex_elems); + compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_VERTEX, 0, &compositor->vs_const_buf); + compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, &compositor->fs_const_buf); + + vs_consts = pipe_buffer_map + ( + compositor->pipe->screen, + compositor->vs_const_buf.buffer, + PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD + ); + + vs_consts->dst_scale.x = dst_area->w / (float)compositor->fb_state.cbufs[0]->width; + vs_consts->dst_scale.y = dst_area->h / (float)compositor->fb_state.cbufs[0]->height; + vs_consts->dst_scale.z = 1; + vs_consts->dst_scale.w = 1; + vs_consts->dst_trans.x = dst_area->x / (float)compositor->fb_state.cbufs[0]->width; + vs_consts->dst_trans.y = dst_area->y / (float)compositor->fb_state.cbufs[0]->height; + vs_consts->dst_trans.z = 0; + vs_consts->dst_trans.w = 0; + + vs_consts->src_scale.x = src_area->w / (float)src_surface->width[0]; + vs_consts->src_scale.y = src_area->h / (float)src_surface->height[0]; + vs_consts->src_scale.z = 1; + vs_consts->src_scale.w = 1; + vs_consts->src_trans.x = src_area->x / (float)src_surface->width[0]; + vs_consts->src_trans.y = src_area->y / (float)src_surface->height[0]; + vs_consts->src_trans.z = 0; + vs_consts->src_trans.w = 0; + + pipe_buffer_unmap(compositor->pipe->screen, compositor->vs_const_buf.buffer); + + compositor->pipe->draw_arrays(compositor->pipe, PIPE_PRIM_TRIANGLE_STRIP, 0, 4); + compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence); + + pipe_surface_reference(&compositor->fb_state.cbufs[0], NULL); +} + +void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat) +{ + assert(compositor); + + memcpy + ( + pipe_buffer_map(compositor->pipe->screen, compositor->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE), + mat, + sizeof(struct fragment_shader_consts) + ); + + pipe_buffer_unmap(compositor->pipe->screen, compositor->fs_const_buf.buffer); +} diff --git a/src/gallium/auxiliary/vl/vl_compositor.h b/src/gallium/auxiliary/vl/vl_compositor.h new file mode 100644 index 00000000000..17e2afd3539 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_compositor.h @@ -0,0 +1,76 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef vl_compositor_h +#define vl_compositor_h + +#include <pipe/p_compiler.h> +#include <pipe/p_state.h> +#include <pipe/p_video_state.h> + +struct pipe_context; +struct pipe_texture; + +struct vl_compositor +{ + struct pipe_context *pipe; + + struct pipe_framebuffer_state fb_state; + void *sampler; + void *vertex_shader; + void *fragment_shader; + struct pipe_viewport_state viewport; + struct pipe_vertex_buffer vertex_bufs[2]; + struct pipe_vertex_element vertex_elems[2]; + struct pipe_constant_buffer vs_const_buf, fs_const_buf; +}; + +bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe); + +void vl_compositor_cleanup(struct vl_compositor *compositor); + +void vl_compositor_render(struct vl_compositor *compositor, + /*struct pipe_texture *backround, + struct pipe_video_rect *backround_area,*/ + struct pipe_texture *src_surface, + enum pipe_mpeg12_picture_type picture_type, + /*unsigned num_past_surfaces, + struct pipe_texture *past_surfaces, + unsigned num_future_surfaces, + struct pipe_texture *future_surfaces,*/ + struct pipe_video_rect *src_area, + struct pipe_texture *dst_surface, + struct pipe_video_rect *dst_area, + /*unsigned num_layers, + struct pipe_texture *layers, + struct pipe_video_rect *layer_src_areas, + struct pipe_video_rect *layer_dst_areas,*/ + struct pipe_fence_handle **fence); + +void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat); + +#endif /* vl_compositor_h */ diff --git a/src/gallium/auxiliary/vl/vl_csc.c b/src/gallium/auxiliary/vl/vl_csc.c new file mode 100644 index 00000000000..5ecc43a5fa3 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_csc.c @@ -0,0 +1,206 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vl_csc.h" +#include <util/u_math.h> +#include <util/u_debug.h> + +/* + * Color space conversion formulas + * + * To convert YCbCr to RGB, + * vec4 ycbcr, rgb + * mat44 csc + * rgb = csc * ycbcr + * + * To calculate the color space conversion matrix csc with ProcAmp adjustments, + * mat44 csc, cstd, procamp, bias + * csc = cstd * (procamp * bias) + * + * Where cstd is a matrix corresponding to one of the color standards (BT.601, BT.709, etc) + * adjusted for the kind of YCbCr -> RGB mapping wanted (1:1, full), + * bias is a matrix corresponding to the kind of YCbCr -> RGB mapping wanted (1:1, full) + * + * To calculate procamp, + * mat44 procamp, hue, saturation, brightness, contrast + * procamp = brightness * (saturation * (contrast * hue)) + * Alternatively, + * procamp = saturation * (brightness * (contrast * hue)) + * + * contrast + * [ c, 0, 0, 0] + * [ 0, c, 0, 0] + * [ 0, 0, c, 0] + * [ 0, 0, 0, 1] + * + * brightness + * [ 1, 0, 0, b] + * [ 0, 1, 0, 0] + * [ 0, 0, 1, 0] + * [ 0, 0, 0, 1] + * + * saturation + * [ 1, 0, 0, 0] + * [ 0, s, 0, 0] + * [ 0, 0, s, 0] + * [ 0, 0, 0, 1] + * + * hue + * [ 1, 0, 0, 0] + * [ 0, cos(h), sin(h), 0] + * [ 0, -sin(h), cos(h), 0] + * [ 0, 0, 0, 1] + * + * procamp + * [ c, 0, 0, b] + * [ 0, c*s*cos(h), c*s*sin(h), 0] + * [ 0, -c*s*sin(h), c*s*cos(h), 0] + * [ 0, 0, 0, 1] + * + * bias + * [ 1, 0, 0, ybias] + * [ 0, 1, 0, cbbias] + * [ 0, 0, 1, crbias] + * [ 0, 0, 0, 1] + * + * csc + * [ c*cstd[ 0], c*cstd[ 1]*s*cos(h) - c*cstd[ 2]*s*sin(h), c*cstd[ 2]*s*cos(h) + c*cstd[ 1]*s*sin(h), cstd[ 3] + cstd[ 0]*(b + c*ybias) + cstd[ 1]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[ 2]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))] + * [ c*cstd[ 4], c*cstd[ 5]*s*cos(h) - c*cstd[ 6]*s*sin(h), c*cstd[ 6]*s*cos(h) + c*cstd[ 5]*s*sin(h), cstd[ 7] + cstd[ 4]*(b + c*ybias) + cstd[ 5]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[ 6]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))] + * [ c*cstd[ 8], c*cstd[ 9]*s*cos(h) - c*cstd[10]*s*sin(h), c*cstd[10]*s*cos(h) + c*cstd[ 9]*s*sin(h), cstd[11] + cstd[ 8]*(b + c*ybias) + cstd[ 9]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[10]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))] + * [ c*cstd[12], c*cstd[13]*s*cos(h) - c*cstd[14]*s*sin(h), c*cstd[14]*s*cos(h) + c*cstd[13]*s*sin(h), cstd[15] + cstd[12]*(b + c*ybias) + cstd[13]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[14]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))] + */ + +/* + * Converts ITU-R BT.601 YCbCr pixels to RGB pixels where: + * Y is in [16,235], Cb and Cr are in [16,240] + * R, G, and B are in [16,235] + */ +static const float bt_601[16] = +{ + 1.0f, 0.0f, 1.371f, 0.0f, + 1.0f, -0.336f, -0.698f, 0.0f, + 1.0f, 1.732f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f +}; + +/* + * Converts ITU-R BT.601 YCbCr pixels to RGB pixels where: + * Y is in [16,235], Cb and Cr are in [16,240] + * R, G, and B are in [0,255] + */ +static const float bt_601_full[16] = +{ + 1.164f, 0.0f, 1.596f, 0.0f, + 1.164f, -0.391f, -0.813f, 0.0f, + 1.164f, 2.018f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f +}; + +/* + * Converts ITU-R BT.709 YCbCr pixels to RGB pixels where: + * Y is in [16,235], Cb and Cr are in [16,240] + * R, G, and B are in [16,235] + */ +static const float bt_709[16] = +{ + 1.0f, 0.0f, 1.540f, 0.0f, + 1.0f, -0.183f, -0.459f, 0.0f, + 1.0f, 1.816f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f +}; + +/* + * Converts ITU-R BT.709 YCbCr pixels to RGB pixels where: + * Y is in [16,235], Cb and Cr are in [16,240] + * R, G, and B are in [0,255] + */ +static const float bt_709_full[16] = +{ + 1.164f, 0.0f, 1.793f, 0.0f, + 1.164f, -0.213f, -0.534f, 0.0f, + 1.164f, 2.115f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f +}; + +static const float identity[16] = +{ + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f +}; + +void vl_csc_get_matrix(enum VL_CSC_COLOR_STANDARD cs, + struct vl_procamp *procamp, + bool full_range, + float *matrix) +{ + float ybias = full_range ? -16.0f/255.0f : 0.0f; + float cbbias = -128.0f/255.0f; + float crbias = -128.0f/255.0f; + float c = procamp ? procamp->contrast : 1.0f; + float s = procamp ? procamp->saturation : 1.0f; + float b = procamp ? procamp->brightness : 0.0f; + float h = procamp ? procamp->hue : 0.0f; + const float *cstd; + + assert(matrix); + + switch (cs) { + case VL_CSC_COLOR_STANDARD_BT_601: + cstd = full_range ? &bt_601_full[0] : &bt_601[0]; + break; + case VL_CSC_COLOR_STANDARD_BT_709: + cstd = full_range ? &bt_709_full[0] : &bt_709[0]; + break; + case VL_CSC_COLOR_STANDARD_IDENTITY: + default: + assert(cs == VL_CSC_COLOR_STANDARD_IDENTITY); + memcpy(matrix, &identity[0], sizeof(float) * 16); + return; + } + + matrix[ 0] = c*cstd[ 0]; + matrix[ 1] = c*cstd[ 1]*s*cosf(h) - c*cstd[ 2]*s*sinf(h); + matrix[ 2] = c*cstd[ 2]*s*cosf(h) + c*cstd[ 1]*s*sinf(h); + matrix[ 3] = cstd[ 3] + cstd[ 0]*(b + c*ybias) + cstd[ 1]*(c*cbbias*s*cosf(h) + c*crbias*s*sinf(h)) + cstd[ 2]*(c*crbias*s*cosf(h) - c*cbbias*s*sinf(h)); + + matrix[ 4] = c*cstd[ 4]; + matrix[ 5] = c*cstd[ 5]*s*cosf(h) - c*cstd[ 6]*s*sinf(h); + matrix[ 6] = c*cstd[ 6]*s*cosf(h) + c*cstd[ 5]*s*sinf(h); + matrix[ 7] = cstd[ 7] + cstd[ 4]*(b + c*ybias) + cstd[ 5]*(c*cbbias*s*cosf(h) + c*crbias*s*sinf(h)) + cstd[ 6]*(c*crbias*s*cosf(h) - c*cbbias*s*sinf(h)); + + matrix[ 8] = c*cstd[ 8]; + matrix[ 9] = c*cstd[ 9]*s*cosf(h) - c*cstd[10]*s*sinf(h); + matrix[10] = c*cstd[10]*s*cosf(h) + c*cstd[ 9]*s*sinf(h); + matrix[11] = cstd[11] + cstd[ 8]*(b + c*ybias) + cstd[ 9]*(c*cbbias*s*cosf(h) + c*crbias*s*sinf(h)) + cstd[10]*(c*crbias*s*cosf(h) - c*cbbias*s*sinf(h)); + + matrix[12] = c*cstd[12]; + matrix[13] = c*cstd[13]*s*cos(h) - c*cstd[14]*s*sin(h); + matrix[14] = c*cstd[14]*s*cos(h) + c*cstd[13]*s*sin(h); + matrix[15] = cstd[15] + cstd[12]*(b + c*ybias) + cstd[13]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[14]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h)); +} diff --git a/src/gallium/auxiliary/vl/vl_csc.h b/src/gallium/auxiliary/vl/vl_csc.h new file mode 100644 index 00000000000..722ca35f339 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_csc.h @@ -0,0 +1,53 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef vl_csc_h +#define vl_csc_h + +#include <pipe/p_compiler.h> + +struct vl_procamp +{ + float brightness; + float contrast; + float saturation; + float hue; +}; + +enum VL_CSC_COLOR_STANDARD +{ + VL_CSC_COLOR_STANDARD_IDENTITY, + VL_CSC_COLOR_STANDARD_BT_601, + VL_CSC_COLOR_STANDARD_BT_709 +}; + +void vl_csc_get_matrix(enum VL_CSC_COLOR_STANDARD cs, + struct vl_procamp *procamp, + bool full_range, + float *matrix); + +#endif /* vl_csc_h */ diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c new file mode 100644 index 00000000000..6b3614821cc --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c @@ -0,0 +1,1654 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vl_mpeg12_mc_renderer.h" +#include <assert.h> +#include <pipe/p_context.h> +#include <pipe/p_inlines.h> +#include <util/u_math.h> +#include <util/u_memory.h> +#include <tgsi/tgsi_parse.h> +#include <tgsi/tgsi_build.h> +#include "vl_shader_build.h" + +#define DEFAULT_BUF_ALIGNMENT 1 +#define MACROBLOCK_WIDTH 16 +#define MACROBLOCK_HEIGHT 16 +#define BLOCK_WIDTH 8 +#define BLOCK_HEIGHT 8 +#define ZERO_BLOCK_NIL -1.0f +#define ZERO_BLOCK_IS_NIL(zb) ((zb).x < 0.0f) + +struct vertex2f +{ + float x, y; +}; + +struct vertex4f +{ + float x, y, z, w; +}; + +struct vertex_shader_consts +{ + struct vertex4f denorm; +}; + +struct fragment_shader_consts +{ + struct vertex4f multiplier; + struct vertex4f div; +}; + +/* + * Muliplier renormalizes block samples from 16 bits to 12 bits. + * Divider is used when calculating Y % 2 for choosing top or bottom + * field for P or B macroblocks. + * TODO: Use immediates. + */ +static const struct fragment_shader_consts fs_consts = { + {32767.0f / 255.0f, 32767.0f / 255.0f, 32767.0f / 255.0f, 0.0f}, + {0.5f, 2.0f, 0.0f, 0.0f} +}; + +struct vert_stream_0 +{ + struct vertex2f pos; + struct vertex2f luma_tc; + struct vertex2f cb_tc; + struct vertex2f cr_tc; +}; + +enum MACROBLOCK_TYPE +{ + MACROBLOCK_TYPE_INTRA, + MACROBLOCK_TYPE_FWD_FRAME_PRED, + MACROBLOCK_TYPE_FWD_FIELD_PRED, + MACROBLOCK_TYPE_BKWD_FRAME_PRED, + MACROBLOCK_TYPE_BKWD_FIELD_PRED, + MACROBLOCK_TYPE_BI_FRAME_PRED, + MACROBLOCK_TYPE_BI_FIELD_PRED, + + NUM_MACROBLOCK_TYPES +}; + +static void +create_intra_vert_shader(struct vl_mpeg12_mc_renderer *r) +{ + const unsigned max_tokens = 50; + + struct pipe_shader_state vs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(r); + + tokens = (struct tgsi_token *) malloc(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version *) &tokens[0] = tgsi_build_version(); + header = (struct tgsi_header *) &tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor *) &tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header); + + ti = 3; + + /* + * decl i0 ; Vertex pos + * decl i1 ; Luma texcoords + * decl i2 ; Chroma Cb texcoords + * decl i3 ; Chroma Cr texcoords + */ + for (i = 0; i < 4; i++) { + decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * decl o0 ; Vertex pos + * decl o1 ; Luma texcoords + * decl o2 ; Chroma Cb texcoords + * decl o3 ; Chroma Cr texcoords + */ + for (i = 0; i < 4; i++) { + decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * mov o0, i0 ; Move input vertex pos to output + * mov o1, i1 ; Move input luma texcoords to output + * mov o2, i2 ; Move input chroma Cb texcoords to output + * mov o3, i3 ; Move input chroma Cr texcoords to output + */ + for (i = 0; i < 4; ++i) { + inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + vs.tokens = tokens; + r->i_vs = r->pipe->create_vs_state(r->pipe, &vs); + free(tokens); +} + +static void +create_intra_frag_shader(struct vl_mpeg12_mc_renderer *r) +{ + const unsigned max_tokens = 100; + + struct pipe_shader_state fs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(r); + + tokens = (struct tgsi_token *) malloc(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version *) &tokens[0] = tgsi_build_version(); + header = (struct tgsi_header *) &tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor *) &tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header); + + ti = 3; + + /* + * decl i0 ; Luma texcoords + * decl i1 ; Chroma Cb texcoords + * decl i2 ; Chroma Cr texcoords + */ + for (i = 0; i < 3; ++i) { + decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm */ + decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl o0 ; Fragment color */ + decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl t0, t1 */ + decl = vl_decl_temps(0, 1); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* + * decl s0 ; Sampler for luma texture + * decl s1 ; Sampler for chroma Cb texture + * decl s2 ; Sampler for chroma Cr texture + */ + for (i = 0; i < 3; ++i) { + decl = vl_decl_samplers(i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * tex2d t1, i0, s0 ; Read texel from luma texture + * mov t0.x, t1.x ; Move luma sample into .x component + * tex2d t1, i1, s1 ; Read texel from chroma Cb texture + * mov t0.y, t1.x ; Move Cb sample into .y component + * tex2d t1, i2, s2 ; Read texel from chroma Cr texture + * mov t0.z, t1.x ; Move Cr sample into .z component + */ + for (i = 0; i < 3; ++i) { + inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1); + inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X; + inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i; + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* mul o0, t0, c0 ; Rescale texel to correct range */ + inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + fs.tokens = tokens; + r->i_fs = r->pipe->create_fs_state(r->pipe, &fs); + free(tokens); +} + +static void +create_frame_pred_vert_shader(struct vl_mpeg12_mc_renderer *r) +{ + const unsigned max_tokens = 100; + + struct pipe_shader_state vs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(r); + + tokens = (struct tgsi_token *) malloc(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version *) &tokens[0] = tgsi_build_version(); + header = (struct tgsi_header *) &tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor *) &tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header); + + ti = 3; + + /* + * decl i0 ; Vertex pos + * decl i1 ; Luma texcoords + * decl i2 ; Chroma Cb texcoords + * decl i3 ; Chroma Cr texcoords + * decl i4 ; Ref surface top field texcoords + * decl i5 ; Ref surface bottom field texcoords (unused, packed in the same stream) + */ + for (i = 0; i < 6; i++) { + decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * decl o0 ; Vertex pos + * decl o1 ; Luma texcoords + * decl o2 ; Chroma Cb texcoords + * decl o3 ; Chroma Cr texcoords + * decl o4 ; Ref macroblock texcoords + */ + for (i = 0; i < 5; i++) { + decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * mov o0, i0 ; Move input vertex pos to output + * mov o1, i1 ; Move input luma texcoords to output + * mov o2, i2 ; Move input chroma Cb texcoords to output + * mov o3, i3 ; Move input chroma Cr texcoords to output + */ + for (i = 0; i < 4; ++i) { + inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* add o4, i0, i4 ; Translate vertex pos by motion vec to form ref macroblock texcoords */ + inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, 4); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + vs.tokens = tokens; + r->p_vs[0] = r->pipe->create_vs_state(r->pipe, &vs); + free(tokens); +} + +static void +create_field_pred_vert_shader(struct vl_mpeg12_mc_renderer *r) +{ + assert(false); +} + +static void +create_frame_pred_frag_shader(struct vl_mpeg12_mc_renderer *r) +{ + const unsigned max_tokens = 100; + + struct pipe_shader_state fs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(r); + + tokens = (struct tgsi_token *) malloc(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version *) &tokens[0] = tgsi_build_version(); + header = (struct tgsi_header *) &tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor *) &tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header); + + ti = 3; + + /* + * decl i0 ; Luma texcoords + * decl i1 ; Chroma Cb texcoords + * decl i2 ; Chroma Cr texcoords + * decl i3 ; Ref macroblock texcoords + */ + for (i = 0; i < 4; ++i) { + decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm */ + decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl o0 ; Fragment color */ + decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl t0, t1 */ + decl = vl_decl_temps(0, 1); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* + * decl s0 ; Sampler for luma texture + * decl s1 ; Sampler for chroma Cb texture + * decl s2 ; Sampler for chroma Cr texture + * decl s3 ; Sampler for ref surface texture + */ + for (i = 0; i < 4; ++i) { + decl = vl_decl_samplers(i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * tex2d t1, i0, s0 ; Read texel from luma texture + * mov t0.x, t1.x ; Move luma sample into .x component + * tex2d t1, i1, s1 ; Read texel from chroma Cb texture + * mov t0.y, t1.x ; Move Cb sample into .y component + * tex2d t1, i2, s2 ; Read texel from chroma Cr texture + * mov t0.z, t1.x ; Move Cr sample into .z component + */ + for (i = 0; i < 3; ++i) { + inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1); + inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X; + inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i; + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* mul t0, t0, c0 ; Rescale texel to correct range */ + inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* tex2d t1, i3, s3 ; Read texel from ref macroblock */ + inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, 3, TGSI_FILE_SAMPLER, 3); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* add o0, t0, t1 ; Add ref and differential to form final output */ + inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + fs.tokens = tokens; + r->p_fs[0] = r->pipe->create_fs_state(r->pipe, &fs); + free(tokens); +} + +static void +create_field_pred_frag_shader(struct vl_mpeg12_mc_renderer *r) +{ + assert(false); +} + +static void +create_frame_bi_pred_vert_shader(struct vl_mpeg12_mc_renderer *r) +{ + const unsigned max_tokens = 100; + + struct pipe_shader_state vs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(r); + + tokens = (struct tgsi_token *) malloc(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version *) &tokens[0] = tgsi_build_version(); + header = (struct tgsi_header *) &tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor *) &tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_VERTEX, header); + + ti = 3; + + /* + * decl i0 ; Vertex pos + * decl i1 ; Luma texcoords + * decl i2 ; Chroma Cb texcoords + * decl i3 ; Chroma Cr texcoords + * decl i4 ; First ref macroblock top field texcoords + * decl i5 ; First ref macroblock bottom field texcoords (unused, packed in the same stream) + * decl i6 ; Second ref macroblock top field texcoords + * decl i7 ; Second ref macroblock bottom field texcoords (unused, packed in the same stream) + */ + for (i = 0; i < 8; i++) { + decl = vl_decl_input(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * decl o0 ; Vertex pos + * decl o1 ; Luma texcoords + * decl o2 ; Chroma Cb texcoords + * decl o3 ; Chroma Cr texcoords + * decl o4 ; First ref macroblock texcoords + * decl o5 ; Second ref macroblock texcoords + */ + for (i = 0; i < 6; i++) { + decl = vl_decl_output(i == 0 ? TGSI_SEMANTIC_POSITION : TGSI_SEMANTIC_GENERIC, i, i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * mov o0, i0 ; Move input vertex pos to output + * mov o1, i1 ; Move input luma texcoords to output + * mov o2, i2 ; Move input chroma Cb texcoords to output + * mov o3, i3 ; Move input chroma Cr texcoords to output + */ + for (i = 0; i < 4; ++i) { + inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_OUTPUT, i, TGSI_FILE_INPUT, i); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* + * add o4, i0, i4 ; Translate vertex pos by motion vec to form first ref macroblock texcoords + * add o5, i0, i6 ; Translate vertex pos by motion vec to form second ref macroblock texcoords + */ + for (i = 0; i < 2; ++i) { + inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, i + 4, TGSI_FILE_INPUT, 0, TGSI_FILE_INPUT, (i + 2) * 2); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + vs.tokens = tokens; + r->b_vs[0] = r->pipe->create_vs_state(r->pipe, &vs); + free(tokens); +} + +static void +create_field_bi_pred_vert_shader(struct vl_mpeg12_mc_renderer *r) +{ + assert(false); +} + +static void +create_frame_bi_pred_frag_shader(struct vl_mpeg12_mc_renderer *r) +{ + const unsigned max_tokens = 100; + + struct pipe_shader_state fs; + struct tgsi_token *tokens; + struct tgsi_header *header; + + struct tgsi_full_declaration decl; + struct tgsi_full_instruction inst; + + unsigned ti; + + unsigned i; + + assert(r); + + tokens = (struct tgsi_token *) malloc(max_tokens * sizeof(struct tgsi_token)); + *(struct tgsi_version *) &tokens[0] = tgsi_build_version(); + header = (struct tgsi_header *) &tokens[1]; + *header = tgsi_build_header(); + *(struct tgsi_processor *) &tokens[2] = tgsi_build_processor(TGSI_PROCESSOR_FRAGMENT, header); + + ti = 3; + + /* + * decl i0 ; Luma texcoords + * decl i1 ; Chroma Cb texcoords + * decl i2 ; Chroma Cr texcoords + * decl i3 ; First ref macroblock texcoords + * decl i4 ; Second ref macroblock texcoords + */ + for (i = 0; i < 5; ++i) { + decl = vl_decl_interpolated_input(TGSI_SEMANTIC_GENERIC, i + 1, i, i, TGSI_INTERPOLATE_LINEAR); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * decl c0 ; Scaling factor, rescales 16-bit snorm to 9-bit snorm + * decl c1 ; Constant 1/2 in .x channel to use as weight to blend past and future texels + */ + decl = vl_decl_constants(TGSI_SEMANTIC_GENERIC, 0, 0, 1); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl o0 ; Fragment color */ + decl = vl_decl_output(TGSI_SEMANTIC_COLOR, 0, 0, 0); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* decl t0-t2 */ + decl = vl_decl_temps(0, 2); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + + /* + * decl s0 ; Sampler for luma texture + * decl s1 ; Sampler for chroma Cb texture + * decl s2 ; Sampler for chroma Cr texture + * decl s3 ; Sampler for first ref surface texture + * decl s4 ; Sampler for second ref surface texture + */ + for (i = 0; i < 5; ++i) { + decl = vl_decl_samplers(i, i); + ti += tgsi_build_full_declaration(&decl, &tokens[ti], header, max_tokens - ti); + } + + /* + * tex2d t1, i0, s0 ; Read texel from luma texture + * mov t0.x, t1.x ; Move luma sample into .x component + * tex2d t1, i1, s1 ; Read texel from chroma Cb texture + * mov t0.y, t1.x ; Move Cb sample into .y component + * tex2d t1, i2, s2 ; Read texel from chroma Cr texture + * mov t0.z, t1.x ; Move Cr sample into .z component + */ + for (i = 0; i < 3; ++i) { + inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_INPUT, i, TGSI_FILE_SAMPLER, i); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + inst = vl_inst2(TGSI_OPCODE_MOV, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1); + inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X; + inst.FullDstRegisters[0].DstRegister.WriteMask = TGSI_WRITEMASK_X << i; + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* mul t0, t0, c0 ; Rescale texel to correct range */ + inst = vl_inst3(TGSI_OPCODE_MUL, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_CONSTANT, 0); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* + * tex2d t1, i3, s3 ; Read texel from first ref macroblock + * tex2d t2, i4, s4 ; Read texel from second ref macroblock + */ + for (i = 0; i < 2; ++i) { + inst = vl_tex(TGSI_TEXTURE_2D, TGSI_FILE_TEMPORARY, i + 1, TGSI_FILE_INPUT, i + 3, TGSI_FILE_SAMPLER, i + 3); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + } + + /* lerp t1, c1.x, t1, t2 ; Blend past and future texels */ + inst = vl_inst4(TGSI_OPCODE_LRP, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_CONSTANT, 1, TGSI_FILE_TEMPORARY, 1, TGSI_FILE_TEMPORARY, 2); + inst.FullSrcRegisters[0].SrcRegister.SwizzleX = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleY = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleZ = TGSI_SWIZZLE_X; + inst.FullSrcRegisters[0].SrcRegister.SwizzleW = TGSI_SWIZZLE_X; + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* add o0, t0, t1 ; Add past/future ref and differential to form final output */ + inst = vl_inst3(TGSI_OPCODE_ADD, TGSI_FILE_OUTPUT, 0, TGSI_FILE_TEMPORARY, 0, TGSI_FILE_TEMPORARY, 1); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + /* end */ + inst = vl_end(); + ti += tgsi_build_full_instruction(&inst, &tokens[ti], header, max_tokens - ti); + + assert(ti <= max_tokens); + + fs.tokens = tokens; + r->b_fs[0] = r->pipe->create_fs_state(r->pipe, &fs); + free(tokens); +} + +static void +create_field_bi_pred_frag_shader(struct vl_mpeg12_mc_renderer *r) +{ + assert(false); +} + +static void +xfer_buffers_map(struct vl_mpeg12_mc_renderer *r) +{ + unsigned i; + + assert(r); + + for (i = 0; i < 3; ++i) { + r->tex_transfer[i] = r->pipe->screen->get_tex_transfer + ( + r->pipe->screen, r->textures.all[i], + 0, 0, 0, PIPE_TRANSFER_WRITE, 0, 0, + r->textures.all[i]->width[0], r->textures.all[i]->height[0] + ); + + r->texels[i] = r->pipe->screen->transfer_map(r->pipe->screen, r->tex_transfer[i]); + } +} + +static void +xfer_buffers_unmap(struct vl_mpeg12_mc_renderer *r) +{ + unsigned i; + + assert(r); + + for (i = 0; i < 3; ++i) { + r->pipe->screen->transfer_unmap(r->pipe->screen, r->tex_transfer[i]); + r->pipe->screen->tex_transfer_destroy(r->tex_transfer[i]); + } +} + +static bool +init_pipe_state(struct vl_mpeg12_mc_renderer *r) +{ + struct pipe_sampler_state sampler; + unsigned filters[5]; + unsigned i; + + assert(r); + + r->viewport.scale[0] = r->pot_buffers ? + util_next_power_of_two(r->picture_width) : r->picture_width; + r->viewport.scale[1] = r->pot_buffers ? + util_next_power_of_two(r->picture_height) : r->picture_height; + r->viewport.scale[2] = 1; + r->viewport.scale[3] = 1; + r->viewport.translate[0] = 0; + r->viewport.translate[1] = 0; + r->viewport.translate[2] = 0; + r->viewport.translate[3] = 0; + + r->fb_state.width = r->pot_buffers ? + util_next_power_of_two(r->picture_width) : r->picture_width; + r->fb_state.height = r->pot_buffers ? + util_next_power_of_two(r->picture_height) : r->picture_height; + r->fb_state.nr_cbufs = 1; + r->fb_state.zsbuf = NULL; + + /* Luma filter */ + filters[0] = PIPE_TEX_FILTER_NEAREST; + /* Chroma filters */ + if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 || + r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) { + filters[1] = PIPE_TEX_FILTER_NEAREST; + filters[2] = PIPE_TEX_FILTER_NEAREST; + } + else { + filters[1] = PIPE_TEX_FILTER_LINEAR; + filters[2] = PIPE_TEX_FILTER_LINEAR; + } + /* Fwd, bkwd ref filters */ + filters[3] = PIPE_TEX_FILTER_LINEAR; + filters[4] = PIPE_TEX_FILTER_LINEAR; + + for (i = 0; i < 5; ++i) { + sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE; + sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE; + sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE; + sampler.min_img_filter = filters[i]; + sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE; + sampler.mag_img_filter = filters[i]; + sampler.compare_mode = PIPE_TEX_COMPARE_NONE; + sampler.compare_func = PIPE_FUNC_ALWAYS; + sampler.normalized_coords = 1; + /*sampler.prefilter = ; */ + /*sampler.shadow_ambient = ; */ + /*sampler.lod_bias = ; */ + sampler.min_lod = 0; + /*sampler.max_lod = ; */ + /*sampler.border_color[i] = ; */ + /*sampler.max_anisotropy = ; */ + r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler); + } + + return true; +} + +static void +cleanup_pipe_state(struct vl_mpeg12_mc_renderer *r) +{ + unsigned i; + + assert(r); + + for (i = 0; i < 5; ++i) + r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]); +} + +static bool +init_shaders(struct vl_mpeg12_mc_renderer *r) +{ + assert(r); + + create_intra_vert_shader(r); + create_intra_frag_shader(r); + create_frame_pred_vert_shader(r); + create_frame_pred_frag_shader(r); + create_frame_bi_pred_vert_shader(r); + create_frame_bi_pred_frag_shader(r); + + return true; +} + +static void +cleanup_shaders(struct vl_mpeg12_mc_renderer *r) +{ + assert(r); + + r->pipe->delete_vs_state(r->pipe, r->i_vs); + r->pipe->delete_fs_state(r->pipe, r->i_fs); + r->pipe->delete_vs_state(r->pipe, r->p_vs[0]); + r->pipe->delete_fs_state(r->pipe, r->p_fs[0]); + r->pipe->delete_vs_state(r->pipe, r->b_vs[0]); + r->pipe->delete_fs_state(r->pipe, r->b_fs[0]); +} + +static bool +init_buffers(struct vl_mpeg12_mc_renderer *r) +{ + struct pipe_texture template; + + const unsigned mbw = + align(r->picture_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH; + const unsigned mbh = + align(r->picture_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT; + + unsigned i; + + assert(r); + + r->macroblocks_per_batch = + mbw * (r->bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE ? mbh : 1); + r->num_macroblocks = 0; + r->macroblock_buf = MALLOC(r->macroblocks_per_batch * sizeof(struct pipe_mpeg12_macroblock)); + + memset(&template, 0, sizeof(struct pipe_texture)); + template.target = PIPE_TEXTURE_2D; + /* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */ + template.format = PIPE_FORMAT_R16_SNORM; + template.last_level = 0; + template.width[0] = r->pot_buffers ? + util_next_power_of_two(r->picture_width) : r->picture_width; + template.height[0] = r->pot_buffers ? + util_next_power_of_two(r->picture_height) : r->picture_height; + template.depth[0] = 1; + pf_get_block(template.format, &template.block); + template.tex_usage = PIPE_TEXTURE_USAGE_SAMPLER | PIPE_TEXTURE_USAGE_DYNAMIC; + + r->textures.individual.y = r->pipe->screen->texture_create(r->pipe->screen, &template); + + if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) { + template.width[0] = r->pot_buffers ? + util_next_power_of_two(r->picture_width / 2) : + r->picture_width / 2; + template.height[0] = r->pot_buffers ? + util_next_power_of_two(r->picture_height / 2) : + r->picture_height / 2; + } + else if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) + template.height[0] = r->pot_buffers ? + util_next_power_of_two(r->picture_height / 2) : + r->picture_height / 2; + + r->textures.individual.cb = + r->pipe->screen->texture_create(r->pipe->screen, &template); + r->textures.individual.cr = + r->pipe->screen->texture_create(r->pipe->screen, &template); + + r->vertex_bufs.individual.ycbcr.stride = sizeof(struct vertex2f) * 4; + r->vertex_bufs.individual.ycbcr.max_index = 24 * r->macroblocks_per_batch - 1; + r->vertex_bufs.individual.ycbcr.buffer_offset = 0; + r->vertex_bufs.individual.ycbcr.buffer = pipe_buffer_create + ( + r->pipe->screen, + DEFAULT_BUF_ALIGNMENT, + PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD, + sizeof(struct vertex2f) * 4 * 24 * r->macroblocks_per_batch + ); + + for (i = 1; i < 3; ++i) { + r->vertex_bufs.all[i].stride = sizeof(struct vertex2f) * 2; + r->vertex_bufs.all[i].max_index = 24 * r->macroblocks_per_batch - 1; + r->vertex_bufs.all[i].buffer_offset = 0; + r->vertex_bufs.all[i].buffer = pipe_buffer_create + ( + r->pipe->screen, + DEFAULT_BUF_ALIGNMENT, + PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_DISCARD, + sizeof(struct vertex2f) * 2 * 24 * r->macroblocks_per_batch + ); + } + + /* Position element */ + r->vertex_elems[0].src_offset = 0; + r->vertex_elems[0].vertex_buffer_index = 0; + r->vertex_elems[0].nr_components = 2; + r->vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* Luma, texcoord element */ + r->vertex_elems[1].src_offset = sizeof(struct vertex2f); + r->vertex_elems[1].vertex_buffer_index = 0; + r->vertex_elems[1].nr_components = 2; + r->vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* Chroma Cr texcoord element */ + r->vertex_elems[2].src_offset = sizeof(struct vertex2f) * 2; + r->vertex_elems[2].vertex_buffer_index = 0; + r->vertex_elems[2].nr_components = 2; + r->vertex_elems[2].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* Chroma Cb texcoord element */ + r->vertex_elems[3].src_offset = sizeof(struct vertex2f) * 3; + r->vertex_elems[3].vertex_buffer_index = 0; + r->vertex_elems[3].nr_components = 2; + r->vertex_elems[3].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* First ref surface top field texcoord element */ + r->vertex_elems[4].src_offset = 0; + r->vertex_elems[4].vertex_buffer_index = 1; + r->vertex_elems[4].nr_components = 2; + r->vertex_elems[4].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* First ref surface bottom field texcoord element */ + r->vertex_elems[5].src_offset = sizeof(struct vertex2f); + r->vertex_elems[5].vertex_buffer_index = 1; + r->vertex_elems[5].nr_components = 2; + r->vertex_elems[5].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* Second ref surface top field texcoord element */ + r->vertex_elems[6].src_offset = 0; + r->vertex_elems[6].vertex_buffer_index = 2; + r->vertex_elems[6].nr_components = 2; + r->vertex_elems[6].src_format = PIPE_FORMAT_R32G32_FLOAT; + + /* Second ref surface bottom field texcoord element */ + r->vertex_elems[7].src_offset = sizeof(struct vertex2f); + r->vertex_elems[7].vertex_buffer_index = 2; + r->vertex_elems[7].nr_components = 2; + r->vertex_elems[7].src_format = PIPE_FORMAT_R32G32_FLOAT; + + r->vs_const_buf.buffer = pipe_buffer_create + ( + r->pipe->screen, + DEFAULT_BUF_ALIGNMENT, + PIPE_BUFFER_USAGE_CONSTANT | PIPE_BUFFER_USAGE_DISCARD, + sizeof(struct vertex_shader_consts) + ); + + r->fs_const_buf.buffer = pipe_buffer_create + ( + r->pipe->screen, + DEFAULT_BUF_ALIGNMENT, + PIPE_BUFFER_USAGE_CONSTANT, sizeof(struct fragment_shader_consts) + ); + + memcpy + ( + pipe_buffer_map(r->pipe->screen, r->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE), + &fs_consts, sizeof(struct fragment_shader_consts) + ); + + pipe_buffer_unmap(r->pipe->screen, r->fs_const_buf.buffer); + + return true; +} + +static void +cleanup_buffers(struct vl_mpeg12_mc_renderer *r) +{ + unsigned i; + + assert(r); + + pipe_buffer_reference(&r->vs_const_buf.buffer, NULL); + pipe_buffer_reference(&r->fs_const_buf.buffer, NULL); + + for (i = 0; i < 3; ++i) + pipe_buffer_reference(&r->vertex_bufs.all[i].buffer, NULL); + + for (i = 0; i < 3; ++i) + pipe_texture_reference(&r->textures.all[i], NULL); + + FREE(r->macroblock_buf); +} + +static enum MACROBLOCK_TYPE +get_macroblock_type(struct pipe_mpeg12_macroblock *mb) +{ + assert(mb); + + switch (mb->mb_type) { + case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA: + return MACROBLOCK_TYPE_INTRA; + case PIPE_MPEG12_MACROBLOCK_TYPE_FWD: + return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ? + MACROBLOCK_TYPE_FWD_FRAME_PRED : MACROBLOCK_TYPE_FWD_FIELD_PRED; + case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD: + return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ? + MACROBLOCK_TYPE_BKWD_FRAME_PRED : MACROBLOCK_TYPE_BKWD_FIELD_PRED; + case PIPE_MPEG12_MACROBLOCK_TYPE_BI: + return mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME ? + MACROBLOCK_TYPE_BI_FRAME_PRED : MACROBLOCK_TYPE_BI_FIELD_PRED; + default: + assert(0); + } + + /* Unreachable */ + return -1; +} + +/* XXX: One of these days this will have to be killed with fire */ +#define SET_BLOCK(vb, cbp, mbx, mby, unitx, unity, ofsx, ofsy, hx, hy, lm, cbm, crm, use_zb, zb) \ + do { \ + (vb)[0].pos.x = (mbx) * (unitx) + (ofsx); (vb)[0].pos.y = (mby) * (unity) + (ofsy); \ + (vb)[1].pos.x = (mbx) * (unitx) + (ofsx); (vb)[1].pos.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[2].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].pos.y = (mby) * (unity) + (ofsy); \ + (vb)[3].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].pos.y = (mby) * (unity) + (ofsy); \ + (vb)[4].pos.x = (mbx) * (unitx) + (ofsx); (vb)[4].pos.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[5].pos.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].pos.y = (mby) * (unity) + (ofsy) + (hy); \ + \ + if (!use_zb || (cbp) & (lm)) \ + { \ + (vb)[0].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].luma_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[1].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[2].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].luma_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[3].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].luma_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[4].luma_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[5].luma_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].luma_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + } \ + else \ + { \ + (vb)[0].luma_tc.x = (zb)[0].x; (vb)[0].luma_tc.y = (zb)[0].y; \ + (vb)[1].luma_tc.x = (zb)[0].x; (vb)[1].luma_tc.y = (zb)[0].y + (hy); \ + (vb)[2].luma_tc.x = (zb)[0].x + (hx); (vb)[2].luma_tc.y = (zb)[0].y; \ + (vb)[3].luma_tc.x = (zb)[0].x + (hx); (vb)[3].luma_tc.y = (zb)[0].y; \ + (vb)[4].luma_tc.x = (zb)[0].x; (vb)[4].luma_tc.y = (zb)[0].y + (hy); \ + (vb)[5].luma_tc.x = (zb)[0].x + (hx); (vb)[5].luma_tc.y = (zb)[0].y + (hy); \ + } \ + \ + if (!use_zb || (cbp) & (cbm)) \ + { \ + (vb)[0].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cb_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[1].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[2].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cb_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[3].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cb_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[4].cb_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[5].cb_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cb_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + } \ + else \ + { \ + (vb)[0].cb_tc.x = (zb)[1].x; (vb)[0].cb_tc.y = (zb)[1].y; \ + (vb)[1].cb_tc.x = (zb)[1].x; (vb)[1].cb_tc.y = (zb)[1].y + (hy); \ + (vb)[2].cb_tc.x = (zb)[1].x + (hx); (vb)[2].cb_tc.y = (zb)[1].y; \ + (vb)[3].cb_tc.x = (zb)[1].x + (hx); (vb)[3].cb_tc.y = (zb)[1].y; \ + (vb)[4].cb_tc.x = (zb)[1].x; (vb)[4].cb_tc.y = (zb)[1].y + (hy); \ + (vb)[5].cb_tc.x = (zb)[1].x + (hx); (vb)[5].cb_tc.y = (zb)[1].y + (hy); \ + } \ + \ + if (!use_zb || (cbp) & (crm)) \ + { \ + (vb)[0].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[0].cr_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[1].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[1].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[2].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[2].cr_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[3].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[3].cr_tc.y = (mby) * (unity) + (ofsy); \ + (vb)[4].cr_tc.x = (mbx) * (unitx) + (ofsx); (vb)[4].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + (vb)[5].cr_tc.x = (mbx) * (unitx) + (ofsx) + (hx); (vb)[5].cr_tc.y = (mby) * (unity) + (ofsy) + (hy); \ + } \ + else \ + { \ + (vb)[0].cr_tc.x = (zb)[2].x; (vb)[0].cr_tc.y = (zb)[2].y; \ + (vb)[1].cr_tc.x = (zb)[2].x; (vb)[1].cr_tc.y = (zb)[2].y + (hy); \ + (vb)[2].cr_tc.x = (zb)[2].x + (hx); (vb)[2].cr_tc.y = (zb)[2].y; \ + (vb)[3].cr_tc.x = (zb)[2].x + (hx); (vb)[3].cr_tc.y = (zb)[2].y; \ + (vb)[4].cr_tc.x = (zb)[2].x; (vb)[4].cr_tc.y = (zb)[2].y + (hy); \ + (vb)[5].cr_tc.x = (zb)[2].x + (hx); (vb)[5].cr_tc.y = (zb)[2].y + (hy); \ + } \ + } while (0) + +static void +gen_macroblock_verts(struct vl_mpeg12_mc_renderer *r, + struct pipe_mpeg12_macroblock *mb, unsigned pos, + struct vert_stream_0 *ycbcr_vb, struct vertex2f **ref_vb) +{ + struct vertex2f mo_vec[2]; + + unsigned i; + + assert(r); + assert(mb); + assert(ycbcr_vb); + assert(pos < r->macroblocks_per_batch); + + switch (mb->mb_type) { + case PIPE_MPEG12_MACROBLOCK_TYPE_BI: + { + struct vertex2f *vb; + + assert(ref_vb && ref_vb[1]); + + vb = ref_vb[1] + pos * 2 * 24; + + mo_vec[0].x = mb->pmv[0][1][0] * 0.5f * r->surface_tex_inv_size.x; + mo_vec[0].y = mb->pmv[0][1][1] * 0.5f * r->surface_tex_inv_size.y; + + if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) { + for (i = 0; i < 24 * 2; i += 2) { + vb[i].x = mo_vec[0].x; + vb[i].y = mo_vec[0].y; + } + } + else { + mo_vec[1].x = mb->pmv[1][1][0] * 0.5f * r->surface_tex_inv_size.x; + mo_vec[1].y = mb->pmv[1][1][1] * 0.5f * r->surface_tex_inv_size.y; + + for (i = 0; i < 24 * 2; i += 2) { + vb[i].x = mo_vec[0].x; + vb[i].y = mo_vec[0].y; + vb[i + 1].x = mo_vec[1].x; + vb[i + 1].y = mo_vec[1].y; + } + } + + /* fall-through */ + } + case PIPE_MPEG12_MACROBLOCK_TYPE_FWD: + case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD: + { + struct vertex2f *vb; + + assert(ref_vb && ref_vb[0]); + + vb = ref_vb[0] + pos * 2 * 24; + + if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) { + mo_vec[0].x = mb->pmv[0][1][0] * 0.5f * r->surface_tex_inv_size.x; + mo_vec[0].y = mb->pmv[0][1][1] * 0.5f * r->surface_tex_inv_size.y; + + if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) { + mo_vec[1].x = mb->pmv[1][1][0] * 0.5f * r->surface_tex_inv_size.x; + mo_vec[1].y = mb->pmv[1][1][1] * 0.5f * r->surface_tex_inv_size.y; + } + } + else { + mo_vec[0].x = mb->pmv[0][0][0] * 0.5f * r->surface_tex_inv_size.x; + mo_vec[0].y = mb->pmv[0][0][1] * 0.5f * r->surface_tex_inv_size.y; + + if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FIELD) { + mo_vec[1].x = mb->pmv[1][0][0] * 0.5f * r->surface_tex_inv_size.x; + mo_vec[1].y = mb->pmv[1][0][1] * 0.5f * r->surface_tex_inv_size.y; + } + } + + if (mb->mb_type == PIPE_MPEG12_MOTION_TYPE_FRAME) { + for (i = 0; i < 24 * 2; i += 2) { + vb[i].x = mo_vec[0].x; + vb[i].y = mo_vec[0].y; + } + } + else { + for (i = 0; i < 24 * 2; i += 2) { + vb[i].x = mo_vec[0].x; + vb[i].y = mo_vec[0].y; + vb[i + 1].x = mo_vec[1].x; + vb[i + 1].y = mo_vec[1].y; + } + } + + /* fall-through */ + } + case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA: + { + const struct vertex2f unit = + { + r->surface_tex_inv_size.x * MACROBLOCK_WIDTH, + r->surface_tex_inv_size.y * MACROBLOCK_HEIGHT + }; + const struct vertex2f half = + { + r->surface_tex_inv_size.x * (MACROBLOCK_WIDTH / 2), + r->surface_tex_inv_size.y * (MACROBLOCK_HEIGHT / 2) + }; + const bool use_zb = r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE; + + struct vert_stream_0 *vb = ycbcr_vb + pos * 24; + + SET_BLOCK(vb, mb->cbp, mb->mbx, mb->mby, + unit.x, unit.y, 0, 0, half.x, half.y, + 32, 2, 1, use_zb, r->zero_block); + + SET_BLOCK(vb + 6, mb->cbp, mb->mbx, mb->mby, + unit.x, unit.y, half.x, 0, half.x, half.y, + 16, 2, 1, use_zb, r->zero_block); + + SET_BLOCK(vb + 12, mb->cbp, mb->mbx, mb->mby, + unit.x, unit.y, 0, half.y, half.x, half.y, + 8, 2, 1, use_zb, r->zero_block); + + SET_BLOCK(vb + 18, mb->cbp, mb->mbx, mb->mby, + unit.x, unit.y, half.x, half.y, half.x, half.y, + 4, 2, 1, use_zb, r->zero_block); + + break; + } + default: + assert(0); + } +} + +static void +gen_macroblock_stream(struct vl_mpeg12_mc_renderer *r, + unsigned *num_macroblocks) +{ + unsigned offset[NUM_MACROBLOCK_TYPES]; + struct vert_stream_0 *ycbcr_vb; + struct vertex2f *ref_vb[2]; + unsigned i; + + assert(r); + assert(num_macroblocks); + + for (i = 0; i < r->num_macroblocks; ++i) { + enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]); + ++num_macroblocks[mb_type]; + } + + offset[0] = 0; + + for (i = 1; i < NUM_MACROBLOCK_TYPES; ++i) + offset[i] = offset[i - 1] + num_macroblocks[i - 1]; + + ycbcr_vb = (struct vert_stream_0 *)pipe_buffer_map + ( + r->pipe->screen, + r->vertex_bufs.individual.ycbcr.buffer, + PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD + ); + + for (i = 0; i < 2; ++i) + ref_vb[i] = (struct vertex2f *)pipe_buffer_map + ( + r->pipe->screen, + r->vertex_bufs.individual.ref[i].buffer, + PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD + ); + + for (i = 0; i < r->num_macroblocks; ++i) { + enum MACROBLOCK_TYPE mb_type = get_macroblock_type(&r->macroblock_buf[i]); + + gen_macroblock_verts(r, &r->macroblock_buf[i], offset[mb_type], + ycbcr_vb, ref_vb); + + ++offset[mb_type]; + } + + pipe_buffer_unmap(r->pipe->screen, r->vertex_bufs.individual.ycbcr.buffer); + for (i = 0; i < 2; ++i) + pipe_buffer_unmap(r->pipe->screen, r->vertex_bufs.individual.ref[i].buffer); +} + +static void +flush(struct vl_mpeg12_mc_renderer *r) +{ + unsigned num_macroblocks[NUM_MACROBLOCK_TYPES] = { 0 }; + unsigned vb_start = 0; + struct vertex_shader_consts *vs_consts; + unsigned i; + + assert(r); + assert(r->num_macroblocks == r->macroblocks_per_batch); + + gen_macroblock_stream(r, num_macroblocks); + + r->fb_state.cbufs[0] = r->pipe->screen->get_tex_surface + ( + r->pipe->screen, r->surface, + 0, 0, 0, PIPE_BUFFER_USAGE_GPU_WRITE + ); + + r->pipe->set_framebuffer_state(r->pipe, &r->fb_state); + r->pipe->set_viewport_state(r->pipe, &r->viewport); + + vs_consts = pipe_buffer_map + ( + r->pipe->screen, r->vs_const_buf.buffer, + PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_DISCARD + ); + + vs_consts->denorm.x = r->surface->width[0]; + vs_consts->denorm.y = r->surface->height[0]; + + pipe_buffer_unmap(r->pipe->screen, r->vs_const_buf.buffer); + + r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_VERTEX, 0, + &r->vs_const_buf); + r->pipe->set_constant_buffer(r->pipe, PIPE_SHADER_FRAGMENT, 0, + &r->fs_const_buf); + + if (num_macroblocks[MACROBLOCK_TYPE_INTRA] > 0) { + r->pipe->set_vertex_buffers(r->pipe, 1, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 4, r->vertex_elems); + r->pipe->set_sampler_textures(r->pipe, 3, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 3, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->i_vs); + r->pipe->bind_fs_state(r->pipe, r->i_fs); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_INTRA] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_INTRA] * 24; + } + + if (num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] > 0) { + r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems); + r->textures.individual.ref[0] = r->past; + r->pipe->set_sampler_textures(r->pipe, 4, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 4, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->p_vs[0]); + r->pipe->bind_fs_state(r->pipe, r->p_fs[0]); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FRAME_PRED] * 24; + } + + if (false /*num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] > 0 */ ) { + r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems); + r->textures.individual.ref[0] = r->past; + r->pipe->set_sampler_textures(r->pipe, 4, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 4, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->p_vs[1]); + r->pipe->bind_fs_state(r->pipe, r->p_fs[1]); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_FWD_FIELD_PRED] * 24; + } + + if (num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] > 0) { + r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems); + r->textures.individual.ref[0] = r->future; + r->pipe->set_sampler_textures(r->pipe, 4, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 4, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->p_vs[0]); + r->pipe->bind_fs_state(r->pipe, r->p_fs[0]); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FRAME_PRED] * 24; + } + + if (false /*num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] > 0 */ ) { + r->pipe->set_vertex_buffers(r->pipe, 2, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 6, r->vertex_elems); + r->textures.individual.ref[0] = r->future; + r->pipe->set_sampler_textures(r->pipe, 4, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 4, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->p_vs[1]); + r->pipe->bind_fs_state(r->pipe, r->p_fs[1]); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_BKWD_FIELD_PRED] * 24; + } + + if (num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] > 0) { + r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 8, r->vertex_elems); + r->textures.individual.ref[0] = r->past; + r->textures.individual.ref[1] = r->future; + r->pipe->set_sampler_textures(r->pipe, 5, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 5, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->b_vs[0]); + r->pipe->bind_fs_state(r->pipe, r->b_fs[0]); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FRAME_PRED] * 24; + } + + if (false /*num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] > 0 */ ) { + r->pipe->set_vertex_buffers(r->pipe, 3, r->vertex_bufs.all); + r->pipe->set_vertex_elements(r->pipe, 8, r->vertex_elems); + r->textures.individual.ref[0] = r->past; + r->textures.individual.ref[1] = r->future; + r->pipe->set_sampler_textures(r->pipe, 5, r->textures.all); + r->pipe->bind_sampler_states(r->pipe, 5, r->samplers.all); + r->pipe->bind_vs_state(r->pipe, r->b_vs[1]); + r->pipe->bind_fs_state(r->pipe, r->b_fs[1]); + + r->pipe->draw_arrays(r->pipe, PIPE_PRIM_TRIANGLES, vb_start, + num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] * 24); + vb_start += num_macroblocks[MACROBLOCK_TYPE_BI_FIELD_PRED] * 24; + } + + r->pipe->flush(r->pipe, PIPE_FLUSH_RENDER_CACHE, r->fence); + pipe_surface_reference(&r->fb_state.cbufs[0], NULL); + + if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) + for (i = 0; i < 3; ++i) + r->zero_block[i].x = ZERO_BLOCK_NIL; + + r->num_macroblocks = 0; +} + +static void +grab_frame_coded_block(short *src, short *dst, unsigned dst_pitch) +{ + unsigned y; + + assert(src); + assert(dst); + + for (y = 0; y < BLOCK_HEIGHT; ++y) + memcpy(dst + y * dst_pitch, src + y * BLOCK_WIDTH, BLOCK_WIDTH * 2); +} + +static void +grab_field_coded_block(short *src, short *dst, unsigned dst_pitch) +{ + unsigned y; + + assert(src); + assert(dst); + + for (y = 0; y < BLOCK_HEIGHT; ++y) + memcpy(dst + y * dst_pitch * 2, src + y * BLOCK_WIDTH, BLOCK_WIDTH * 2); +} + +static void +fill_zero_block(short *dst, unsigned dst_pitch) +{ + unsigned y; + + assert(dst); + + for (y = 0; y < BLOCK_HEIGHT; ++y) + memset(dst + y * dst_pitch, 0, BLOCK_WIDTH * 2); +} + +static void +grab_blocks(struct vl_mpeg12_mc_renderer *r, unsigned mbx, unsigned mby, + enum pipe_mpeg12_dct_type dct_type, unsigned cbp, short *blocks) +{ + unsigned tex_pitch; + short *texels; + unsigned tb = 0, sb = 0; + unsigned mbpx = mbx * MACROBLOCK_WIDTH, mbpy = mby * MACROBLOCK_HEIGHT; + unsigned x, y; + + assert(r); + assert(blocks); + + tex_pitch = r->tex_transfer[0]->stride / r->tex_transfer[0]->block.size; + texels = r->texels[0] + mbpy * tex_pitch + mbpx; + + for (y = 0; y < 2; ++y) { + for (x = 0; x < 2; ++x, ++tb) { + if ((cbp >> (5 - tb)) & 1) { + if (dct_type == PIPE_MPEG12_DCT_TYPE_FRAME) { + grab_frame_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT, + texels + y * tex_pitch * BLOCK_WIDTH + + x * BLOCK_WIDTH, tex_pitch); + } + else { + grab_field_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT, + texels + y * tex_pitch + x * BLOCK_WIDTH, + tex_pitch); + } + + ++sb; + } + else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE) { + if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL || + ZERO_BLOCK_IS_NIL(r->zero_block[0])) { + fill_zero_block(texels + y * tex_pitch * BLOCK_WIDTH + x * BLOCK_WIDTH, tex_pitch); + if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) { + r->zero_block[0].x = (mbpx + x * 8) * r->surface_tex_inv_size.x; + r->zero_block[0].y = (mbpy + y * 8) * r->surface_tex_inv_size.y; + } + } + } + } + } + + /* TODO: Implement 422, 444 */ + assert(r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420); + + mbpx /= 2; + mbpy /= 2; + + for (tb = 0; tb < 2; ++tb) { + tex_pitch = r->tex_transfer[tb + 1]->stride / r->tex_transfer[tb + 1]->block.size; + texels = r->texels[tb + 1] + mbpy * tex_pitch + mbpx; + + if ((cbp >> (1 - tb)) & 1) { + grab_frame_coded_block(blocks + sb * BLOCK_WIDTH * BLOCK_HEIGHT, texels, tex_pitch); + ++sb; + } + else if (r->eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE) { + if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL || + ZERO_BLOCK_IS_NIL(r->zero_block[tb + 1])) { + fill_zero_block(texels, tex_pitch); + if (r->eb_handling == VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE) { + r->zero_block[tb + 1].x = (mbpx << 1) * r->surface_tex_inv_size.x; + r->zero_block[tb + 1].y = (mbpy << 1) * r->surface_tex_inv_size.y; + } + } + } + } +} + +static void +grab_macroblock(struct vl_mpeg12_mc_renderer *r, + struct pipe_mpeg12_macroblock *mb) +{ + assert(r); + assert(mb); + assert(r->num_macroblocks < r->macroblocks_per_batch); + + memcpy(&r->macroblock_buf[r->num_macroblocks], mb, + sizeof(struct pipe_mpeg12_macroblock)); + + grab_blocks(r, mb->mbx, mb->mby, mb->dct_type, mb->cbp, mb->blocks); + + ++r->num_macroblocks; +} + +bool +vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer, + struct pipe_context *pipe, + unsigned picture_width, + unsigned picture_height, + enum pipe_video_chroma_format chroma_format, + enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode, + enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK eb_handling, + bool pot_buffers) +{ + unsigned i; + + assert(renderer); + assert(pipe); + /* TODO: Implement other policies */ + assert(bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE); + /* TODO: Implement this */ + /* XXX: XFER_ALL sampling issue at block edges when using bilinear filtering */ + assert(eb_handling != VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE); + /* TODO: Non-pot buffers untested, probably doesn't work without changes to texcoord generation, vert shader, etc */ + assert(pot_buffers); + + memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer)); + + renderer->pipe = pipe; + renderer->picture_width = picture_width; + renderer->picture_height = picture_height; + renderer->chroma_format = chroma_format; + renderer->bufmode = bufmode; + renderer->eb_handling = eb_handling; + renderer->pot_buffers = pot_buffers; + + if (!init_pipe_state(renderer)) + return false; + if (!init_shaders(renderer)) { + cleanup_pipe_state(renderer); + return false; + } + if (!init_buffers(renderer)) { + cleanup_shaders(renderer); + cleanup_pipe_state(renderer); + return false; + } + + renderer->surface = NULL; + renderer->past = NULL; + renderer->future = NULL; + for (i = 0; i < 3; ++i) + renderer->zero_block[i].x = ZERO_BLOCK_NIL; + renderer->num_macroblocks = 0; + + xfer_buffers_map(renderer); + + return true; +} + +void +vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer) +{ + assert(renderer); + + xfer_buffers_unmap(renderer); + + cleanup_pipe_state(renderer); + cleanup_shaders(renderer); + cleanup_buffers(renderer); +} + +void +vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer + *renderer, + struct pipe_texture *surface, + struct pipe_texture *past, + struct pipe_texture *future, + unsigned num_macroblocks, + struct pipe_mpeg12_macroblock + *mpeg12_macroblocks, + struct pipe_fence_handle **fence) +{ + bool new_surface = false; + + assert(renderer); + assert(surface); + assert(num_macroblocks); + assert(mpeg12_macroblocks); + + if (renderer->surface) { + if (surface != renderer->surface) { + if (renderer->num_macroblocks > 0) { + xfer_buffers_unmap(renderer); + flush(renderer); + } + + new_surface = true; + } + + /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */ + assert(surface != renderer->surface || renderer->past == past); + assert(surface != renderer->surface || renderer->future == future); + } + else + new_surface = true; + + if (new_surface) { + renderer->surface = surface; + renderer->past = past; + renderer->future = future; + renderer->fence = fence; + renderer->surface_tex_inv_size.x = 1.0f / surface->width[0]; + renderer->surface_tex_inv_size.y = 1.0f / surface->height[0]; + } + + while (num_macroblocks) { + unsigned left_in_batch = renderer->macroblocks_per_batch - renderer->num_macroblocks; + unsigned num_to_submit = MIN2(num_macroblocks, left_in_batch); + unsigned i; + + for (i = 0; i < num_to_submit; ++i) { + assert(mpeg12_macroblocks[i].base.codec == PIPE_VIDEO_CODEC_MPEG12); + grab_macroblock(renderer, &mpeg12_macroblocks[i]); + } + + num_macroblocks -= num_to_submit; + + if (renderer->num_macroblocks == renderer->macroblocks_per_batch) { + xfer_buffers_unmap(renderer); + flush(renderer); + xfer_buffers_map(renderer); + /* Next time we get this surface it may have new ref frames */ + renderer->surface = NULL; + } + } +} diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h new file mode 100644 index 00000000000..5d2c1273ee3 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h @@ -0,0 +1,120 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef vl_mpeg12_mc_renderer_h +#define vl_mpeg12_mc_renderer_h + +#include <pipe/p_compiler.h> +#include <pipe/p_state.h> +#include <pipe/p_video_state.h> + +struct pipe_context; +struct pipe_video_surface; +struct pipe_macroblock; + +/* A slice is video-width (rounded up to a multiple of macroblock width) x macroblock height */ +enum VL_MPEG12_MC_RENDERER_BUFFER_MODE +{ + VL_MPEG12_MC_RENDERER_BUFFER_SLICE, /* Saves memory at the cost of smaller batches */ + VL_MPEG12_MC_RENDERER_BUFFER_PICTURE /* Larger batches, more memory */ +}; + +enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK +{ + VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ALL, /* Waste of memory bandwidth */ + VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_ONE, /* Can only do point-filtering when interpolating subsampled chroma channels */ + VL_MPEG12_MC_RENDERER_EMPTY_BLOCK_XFER_NONE /* Needs conditional texel fetch! */ +}; + +struct vl_mpeg12_mc_renderer +{ + struct pipe_context *pipe; + unsigned picture_width; + unsigned picture_height; + enum pipe_video_chroma_format chroma_format; + enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode; + enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK eb_handling; + bool pot_buffers; + unsigned macroblocks_per_batch; + + struct pipe_viewport_state viewport; + struct pipe_constant_buffer vs_const_buf; + struct pipe_constant_buffer fs_const_buf; + struct pipe_framebuffer_state fb_state; + struct pipe_vertex_element vertex_elems[8]; + + union + { + void *all[5]; + struct { void *y, *cb, *cr, *ref[2]; } individual; + } samplers; + + void *i_vs, *p_vs[2], *b_vs[2]; + void *i_fs, *p_fs[2], *b_fs[2]; + + union + { + struct pipe_texture *all[5]; + struct { struct pipe_texture *y, *cb, *cr, *ref[2]; } individual; + } textures; + + union + { + struct pipe_vertex_buffer all[3]; + struct { struct pipe_vertex_buffer ycbcr, ref[2]; } individual; + } vertex_bufs; + + struct pipe_texture *surface, *past, *future; + struct pipe_fence_handle **fence; + unsigned num_macroblocks; + struct pipe_mpeg12_macroblock *macroblock_buf; + struct pipe_transfer *tex_transfer[3]; + short *texels[3]; + struct { float x, y; } surface_tex_inv_size; + struct { float x, y; } zero_block[3]; +}; + +bool vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer, + struct pipe_context *pipe, + unsigned picture_width, + unsigned picture_height, + enum pipe_video_chroma_format chroma_format, + enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode, + enum VL_MPEG12_MC_RENDERER_EMPTY_BLOCK eb_handling, + bool pot_buffers); + +void vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer); + +void vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer *renderer, + struct pipe_texture *surface, + struct pipe_texture *past, + struct pipe_texture *future, + unsigned num_macroblocks, + struct pipe_mpeg12_macroblock *mpeg12_macroblocks, + struct pipe_fence_handle **fence); + +#endif /* vl_mpeg12_mc_renderer_h */ diff --git a/src/gallium/auxiliary/vl/vl_shader_build.c b/src/gallium/auxiliary/vl/vl_shader_build.c new file mode 100644 index 00000000000..faa20a903cd --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_shader_build.c @@ -0,0 +1,242 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vl_shader_build.h" +#include <assert.h> +#include <tgsi/tgsi_parse.h> +#include <tgsi/tgsi_build.h> + +struct tgsi_full_declaration vl_decl_input(unsigned int name, unsigned int index, unsigned int first, unsigned int last) +{ + struct tgsi_full_declaration decl = tgsi_default_full_declaration(); + + decl.Declaration.File = TGSI_FILE_INPUT; + decl.Declaration.Semantic = 1; + decl.Semantic.SemanticName = name; + decl.Semantic.SemanticIndex = index; + decl.DeclarationRange.First = first; + decl.DeclarationRange.Last = last; + + return decl; +} + +struct tgsi_full_declaration vl_decl_interpolated_input +( + unsigned int name, + unsigned int index, + unsigned int first, + unsigned int last, + int interpolation +) +{ + struct tgsi_full_declaration decl = tgsi_default_full_declaration(); + + assert + ( + interpolation == TGSI_INTERPOLATE_CONSTANT || + interpolation == TGSI_INTERPOLATE_LINEAR || + interpolation == TGSI_INTERPOLATE_PERSPECTIVE + ); + + decl.Declaration.File = TGSI_FILE_INPUT; + decl.Declaration.Semantic = 1; + decl.Semantic.SemanticName = name; + decl.Semantic.SemanticIndex = index; + decl.Declaration.Interpolate = interpolation;; + decl.DeclarationRange.First = first; + decl.DeclarationRange.Last = last; + + return decl; +} + +struct tgsi_full_declaration vl_decl_constants(unsigned int name, unsigned int index, unsigned int first, unsigned int last) +{ + struct tgsi_full_declaration decl = tgsi_default_full_declaration(); + + decl.Declaration.File = TGSI_FILE_CONSTANT; + decl.Declaration.Semantic = 1; + decl.Semantic.SemanticName = name; + decl.Semantic.SemanticIndex = index; + decl.DeclarationRange.First = first; + decl.DeclarationRange.Last = last; + + return decl; +} + +struct tgsi_full_declaration vl_decl_output(unsigned int name, unsigned int index, unsigned int first, unsigned int last) +{ + struct tgsi_full_declaration decl = tgsi_default_full_declaration(); + + decl.Declaration.File = TGSI_FILE_OUTPUT; + decl.Declaration.Semantic = 1; + decl.Semantic.SemanticName = name; + decl.Semantic.SemanticIndex = index; + decl.DeclarationRange.First = first; + decl.DeclarationRange.Last = last; + + return decl; +} + +struct tgsi_full_declaration vl_decl_temps(unsigned int first, unsigned int last) +{ + struct tgsi_full_declaration decl = tgsi_default_full_declaration(); + + decl = tgsi_default_full_declaration(); + decl.Declaration.File = TGSI_FILE_TEMPORARY; + decl.DeclarationRange.First = first; + decl.DeclarationRange.Last = last; + + return decl; +} + +struct tgsi_full_declaration vl_decl_samplers(unsigned int first, unsigned int last) +{ + struct tgsi_full_declaration decl = tgsi_default_full_declaration(); + + decl = tgsi_default_full_declaration(); + decl.Declaration.File = TGSI_FILE_SAMPLER; + decl.DeclarationRange.First = first; + decl.DeclarationRange.Last = last; + + return decl; +} + +struct tgsi_full_instruction vl_inst2 +( + int opcode, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src_file, + unsigned int src_index +) +{ + struct tgsi_full_instruction inst = tgsi_default_full_instruction(); + + inst.Instruction.Opcode = opcode; + inst.Instruction.NumDstRegs = 1; + inst.FullDstRegisters[0].DstRegister.File = dst_file; + inst.FullDstRegisters[0].DstRegister.Index = dst_index; + inst.Instruction.NumSrcRegs = 1; + inst.FullSrcRegisters[0].SrcRegister.File = src_file; + inst.FullSrcRegisters[0].SrcRegister.Index = src_index; + + return inst; +} + +struct tgsi_full_instruction vl_inst3 +( + int opcode, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src1_file, + unsigned int src1_index, + enum tgsi_file_type src2_file, + unsigned int src2_index +) +{ + struct tgsi_full_instruction inst = tgsi_default_full_instruction(); + + inst.Instruction.Opcode = opcode; + inst.Instruction.NumDstRegs = 1; + inst.FullDstRegisters[0].DstRegister.File = dst_file; + inst.FullDstRegisters[0].DstRegister.Index = dst_index; + inst.Instruction.NumSrcRegs = 2; + inst.FullSrcRegisters[0].SrcRegister.File = src1_file; + inst.FullSrcRegisters[0].SrcRegister.Index = src1_index; + inst.FullSrcRegisters[1].SrcRegister.File = src2_file; + inst.FullSrcRegisters[1].SrcRegister.Index = src2_index; + + return inst; +} + +struct tgsi_full_instruction vl_tex +( + int tex, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src1_file, + unsigned int src1_index, + enum tgsi_file_type src2_file, + unsigned int src2_index +) +{ + struct tgsi_full_instruction inst = tgsi_default_full_instruction(); + + inst.Instruction.Opcode = TGSI_OPCODE_TEX; + inst.Instruction.NumDstRegs = 1; + inst.FullDstRegisters[0].DstRegister.File = dst_file; + inst.FullDstRegisters[0].DstRegister.Index = dst_index; + inst.Instruction.NumSrcRegs = 2; + inst.InstructionExtTexture.Texture = tex; + inst.FullSrcRegisters[0].SrcRegister.File = src1_file; + inst.FullSrcRegisters[0].SrcRegister.Index = src1_index; + inst.FullSrcRegisters[1].SrcRegister.File = src2_file; + inst.FullSrcRegisters[1].SrcRegister.Index = src2_index; + + return inst; +} + +struct tgsi_full_instruction vl_inst4 +( + int opcode, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src1_file, + unsigned int src1_index, + enum tgsi_file_type src2_file, + unsigned int src2_index, + enum tgsi_file_type src3_file, + unsigned int src3_index +) +{ + struct tgsi_full_instruction inst = tgsi_default_full_instruction(); + + inst.Instruction.Opcode = opcode; + inst.Instruction.NumDstRegs = 1; + inst.FullDstRegisters[0].DstRegister.File = dst_file; + inst.FullDstRegisters[0].DstRegister.Index = dst_index; + inst.Instruction.NumSrcRegs = 3; + inst.FullSrcRegisters[0].SrcRegister.File = src1_file; + inst.FullSrcRegisters[0].SrcRegister.Index = src1_index; + inst.FullSrcRegisters[1].SrcRegister.File = src2_file; + inst.FullSrcRegisters[1].SrcRegister.Index = src2_index; + inst.FullSrcRegisters[2].SrcRegister.File = src3_file; + inst.FullSrcRegisters[2].SrcRegister.Index = src3_index; + + return inst; +} + +struct tgsi_full_instruction vl_end(void) +{ + struct tgsi_full_instruction inst = tgsi_default_full_instruction(); + + inst.Instruction.Opcode = TGSI_OPCODE_END; + inst.Instruction.NumDstRegs = 0; + inst.Instruction.NumSrcRegs = 0; + + return inst; +} diff --git a/src/gallium/auxiliary/vl/vl_shader_build.h b/src/gallium/auxiliary/vl/vl_shader_build.h new file mode 100644 index 00000000000..5da71f8e136 --- /dev/null +++ b/src/gallium/auxiliary/vl/vl_shader_build.h @@ -0,0 +1,88 @@ +/************************************************************************** + * + * Copyright 2009 Younes Manton. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef vl_shader_build_h +#define vl_shader_build_h + +#include <pipe/p_shader_tokens.h> + +struct tgsi_full_declaration vl_decl_input(unsigned int name, unsigned int index, unsigned int first, unsigned int last); +struct tgsi_full_declaration vl_decl_interpolated_input +( + unsigned int name, + unsigned int index, + unsigned int first, + unsigned int last, + int interpolation +); +struct tgsi_full_declaration vl_decl_constants(unsigned int name, unsigned int index, unsigned int first, unsigned int last); +struct tgsi_full_declaration vl_decl_output(unsigned int name, unsigned int index, unsigned int first, unsigned int last); +struct tgsi_full_declaration vl_decl_temps(unsigned int first, unsigned int last); +struct tgsi_full_declaration vl_decl_samplers(unsigned int first, unsigned int last); +struct tgsi_full_instruction vl_inst2 +( + int opcode, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src_file, + unsigned int src_index +); +struct tgsi_full_instruction vl_inst3 +( + int opcode, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src1_file, + unsigned int src1_index, + enum tgsi_file_type src2_file, + unsigned int src2_index +); +struct tgsi_full_instruction vl_tex +( + int tex, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src1_file, + unsigned int src1_index, + enum tgsi_file_type src2_file, + unsigned int src2_index +); +struct tgsi_full_instruction vl_inst4 +( + int opcode, + enum tgsi_file_type dst_file, + unsigned int dst_index, + enum tgsi_file_type src1_file, + unsigned int src1_index, + enum tgsi_file_type src2_file, + unsigned int src2_index, + enum tgsi_file_type src3_file, + unsigned int src3_index +); +struct tgsi_full_instruction vl_end(void); + +#endif |