summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/auxiliary')
-rw-r--r--src/gallium/auxiliary/Makefile16
-rw-r--r--src/gallium/auxiliary/SConscript10
-rw-r--r--src/gallium/auxiliary/util/u_format.csv8
-rw-r--r--src/gallium/auxiliary/util/u_format_yuv.c135
-rw-r--r--src/gallium/auxiliary/util/u_format_yuv.h135
-rw-r--r--src/gallium/auxiliary/util/u_video.h71
-rw-r--r--src/gallium/auxiliary/vl/vl_bitstream_parser.c208
-rw-r--r--src/gallium/auxiliary/vl/vl_bitstream_parser.h67
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor.c640
-rw-r--r--src/gallium/auxiliary/vl/vl_compositor.h98
-rw-r--r--src/gallium/auxiliary/vl/vl_csc.c206
-rw-r--r--src/gallium/auxiliary/vl/vl_csc.h53
-rw-r--r--src/gallium/auxiliary/vl/vl_idct.c766
-rw-r--r--src/gallium/auxiliary/vl/vl_idct.h124
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c1093
-rw-r--r--src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h141
-rw-r--r--src/gallium/auxiliary/vl/vl_types.h51
-rw-r--r--src/gallium/auxiliary/vl/vl_vertex_buffers.c183
-rw-r--r--src/gallium/auxiliary/vl/vl_vertex_buffers.h78
19 files changed, 4069 insertions, 14 deletions
diff --git a/src/gallium/auxiliary/Makefile b/src/gallium/auxiliary/Makefile
index e48063166ff..cc16bcfb539 100644
--- a/src/gallium/auxiliary/Makefile
+++ b/src/gallium/auxiliary/Makefile
@@ -144,14 +144,13 @@ C_SOURCES = \
util/u_transfer.c \
util/u_resource.c \
util/u_upload_mgr.c \
- util/u_vbuf_mgr.c
-
- # Disabling until pipe-video branch gets merged in
- #vl/vl_bitstream_parser.c \
- #vl/vl_mpeg12_mc_renderer.c \
- #vl/vl_compositor.c \
- #vl/vl_csc.c \
- #vl/vl_shader_build.c \
+ util/u_vbuf_mgr.c \
+ vl/vl_bitstream_parser.c \
+ vl/vl_mpeg12_mc_renderer.c \
+ vl/vl_compositor.c \
+ vl/vl_csc.c \
+ vl/vl_idct.c \
+ vl/vl_vertex_buffers.c
GALLIVM_SOURCES = \
gallivm/lp_bld_arit.c \
@@ -222,3 +221,4 @@ util/u_format_table.c: util/u_format_table.py util/u_format_pack.py util/u_forma
util/u_half.c: util/u_half.py
$(PYTHON2) util/u_half.py > $@
+# DO NOT DELETE
diff --git a/src/gallium/auxiliary/SConscript b/src/gallium/auxiliary/SConscript
index ef0b15f06ce..f4ab8a50da4 100644
--- a/src/gallium/auxiliary/SConscript
+++ b/src/gallium/auxiliary/SConscript
@@ -192,12 +192,10 @@ source = [
'util/u_transfer.c',
'util/u_upload_mgr.c',
'util/u_vbuf_mgr.c',
- # Disabling until pipe-video branch gets merged in
- #'vl/vl_bitstream_parser.c',
- #'vl/vl_mpeg12_mc_renderer.c',
- #'vl/vl_compositor.c',
- #'vl/vl_csc.c',
- #'vl/vl_shader_build.c',
+ 'vl/vl_bitstream_parser.c',
+ 'vl/vl_mpeg12_mc_renderer.c',
+ 'vl/vl_compositor.c',
+ 'vl/vl_csc.c',
]
if env['llvm']:
diff --git a/src/gallium/auxiliary/util/u_format.csv b/src/gallium/auxiliary/util/u_format.csv
index 26a0eebc544..0ae15c9cc0a 100644
--- a/src/gallium/auxiliary/util/u_format.csv
+++ b/src/gallium/auxiliary/util/u_format.csv
@@ -237,3 +237,11 @@ PIPE_FORMAT_R32G32B32A32_FIXED , plain, 1, 1, h32 , h32 , h32 , h32 , xyzw, r
PIPE_FORMAT_R10G10B10X2_USCALED , plain, 1, 1, u10 , u10 , u10 , x2 , xyz1, rgb
# A.k.a. D3DDECLTYPE_DEC3N
PIPE_FORMAT_R10G10B10X2_SNORM , plain, 1, 1, sn10, sn10, sn10 , x2 , xyz1, rgb
+
+PIPE_FORMAT_YV12 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_YV16 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_IYUV , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_NV12 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_NV21 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_IA44 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
+PIPE_FORMAT_AI44 , subsampled, 1, 1, x8 , x8 , x8 , x8 , xyzw, yuv
diff --git a/src/gallium/auxiliary/util/u_format_yuv.c b/src/gallium/auxiliary/util/u_format_yuv.c
index ab8bf29c97b..64ea0b35347 100644
--- a/src/gallium/auxiliary/util/u_format_yuv.c
+++ b/src/gallium/auxiliary/util/u_format_yuv.c
@@ -1045,3 +1045,138 @@ util_format_yuyv_fetch_rgba_float(float *dst, const uint8_t *src,
dst[3] = 1.0f;
}
+
+/* XXX: Stubbed for now */
+void
+util_format_yv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv12_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
+void
+util_format_yv16_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv16_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv16_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv16_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_yv16_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
+void
+util_format_iyuv_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_iyuv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_iyuv_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_iyuv_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_iyuv_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
+void
+util_format_nv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv12_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
+void
+util_format_nv21_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv21_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv21_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv21_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_nv21_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
+void
+util_format_ia44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ia44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ia44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ia44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ia44_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
+void
+util_format_ai44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ai44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ai44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ai44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height) {}
+void
+util_format_ai44_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j) {}
diff --git a/src/gallium/auxiliary/util/u_format_yuv.h b/src/gallium/auxiliary/util/u_format_yuv.h
index dc9632346d1..9f2365a5266 100644
--- a/src/gallium/auxiliary/util/u_format_yuv.h
+++ b/src/gallium/auxiliary/util/u_format_yuv.h
@@ -169,6 +169,141 @@ void
util_format_yuyv_fetch_rgba_float(float *dst, const uint8_t *src,
unsigned i, unsigned j);
+/* XXX: Stubbed for now */
+void
+util_format_yv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv12_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+void
+util_format_yv16_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv16_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv16_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv16_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_yv16_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+void
+util_format_iyuv_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_iyuv_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_iyuv_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_iyuv_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_iyuv_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+void
+util_format_nv12_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv12_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv12_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv12_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv12_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+void
+util_format_nv21_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv21_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv21_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv21_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_nv21_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+void
+util_format_ia44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ia44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ia44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ia44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ia44_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+void
+util_format_ai44_unpack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ai44_pack_rgba_8unorm(uint8_t *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ai44_unpack_rgba_float(float *dst_row, unsigned dst_stride,
+ const uint8_t *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ai44_pack_rgba_float(uint8_t *dst_row, unsigned dst_stride,
+ const float *src_row, unsigned src_stride,
+ unsigned width, unsigned height);
+void
+util_format_ai44_fetch_rgba_float(float *dst, const uint8_t *src,
+ unsigned i, unsigned j);
+
void
util_format_r8g8_b8g8_unorm_unpack_rgba_float(float *dst_row, unsigned dst_stride,
diff --git a/src/gallium/auxiliary/util/u_video.h b/src/gallium/auxiliary/util/u_video.h
new file mode 100644
index 00000000000..78cceb6bcf2
--- /dev/null
+++ b/src/gallium/auxiliary/util/u_video.h
@@ -0,0 +1,71 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_VIDEO_H
+#define U_VIDEO_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <pipe/p_defines.h>
+
+/* u_reduce_video_profile() needs these */
+#include <pipe/p_compiler.h>
+#include <util/u_debug.h>
+
+static INLINE enum pipe_video_codec
+u_reduce_video_profile(enum pipe_video_profile profile)
+{
+ switch (profile)
+ {
+ case PIPE_VIDEO_PROFILE_MPEG1:
+ case PIPE_VIDEO_PROFILE_MPEG2_SIMPLE:
+ case PIPE_VIDEO_PROFILE_MPEG2_MAIN:
+ return PIPE_VIDEO_CODEC_MPEG12;
+
+ case PIPE_VIDEO_PROFILE_MPEG4_SIMPLE:
+ case PIPE_VIDEO_PROFILE_MPEG4_ADVANCED_SIMPLE:
+ return PIPE_VIDEO_CODEC_MPEG4;
+
+ case PIPE_VIDEO_PROFILE_VC1_SIMPLE:
+ case PIPE_VIDEO_PROFILE_VC1_MAIN:
+ case PIPE_VIDEO_PROFILE_VC1_ADVANCED:
+ return PIPE_VIDEO_CODEC_VC1;
+
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN:
+ case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH:
+ return PIPE_VIDEO_CODEC_MPEG4_AVC;
+
+ default:
+ assert(0);
+ return PIPE_VIDEO_CODEC_UNKNOWN;
+ }
+}
+
+#endif /* U_VIDEO_H */
diff --git a/src/gallium/auxiliary/vl/vl_bitstream_parser.c b/src/gallium/auxiliary/vl/vl_bitstream_parser.c
new file mode 100644
index 00000000000..f07b3443b92
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_bitstream_parser.c
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_bitstream_parser.h"
+#include <assert.h>
+#include <limits.h>
+#include <util/u_memory.h>
+#include <stdio.h>
+
+inline void endian_swap_ushort(unsigned short *x)
+{
+ x[0] = (x[0]>>8) |
+ (x[0]<<8);
+}
+
+inline void endian_swap_uint(unsigned int *x)
+{
+ x[0] = (x[0]>>24) |
+ ((x[0]<<8) & 0x00FF0000) |
+ ((x[0]>>8) & 0x0000FF00) |
+ (x[0]<<24);
+}
+
+inline void endian_swap_ulonglong(unsigned long long *x)
+{
+ x[0] = (x[0]>>56) |
+ ((x[0]<<40) & 0x00FF000000000000) |
+ ((x[0]<<24) & 0x0000FF0000000000) |
+ ((x[0]<<8) & 0x000000FF00000000) |
+ ((x[0]>>8) & 0x00000000FF000000) |
+ ((x[0]>>24) & 0x0000000000FF0000) |
+ ((x[0]>>40) & 0x000000000000FF00) |
+ (x[0]<<56);
+}
+
+static unsigned
+grab_bits(unsigned cursor, unsigned how_many_bits, unsigned bitstream_elt)
+{
+ unsigned excess_bits = sizeof(unsigned) * CHAR_BIT - how_many_bits;
+
+ assert(cursor < sizeof(unsigned) * CHAR_BIT);
+ assert(how_many_bits > 0 && how_many_bits <= sizeof(unsigned) * CHAR_BIT);
+ assert(cursor + how_many_bits <= sizeof(unsigned) * CHAR_BIT);
+
+ #ifndef PIPE_ARCH_BIG_ENDIAN
+ switch (sizeof(unsigned)) {
+ case 2:
+ endian_swap_ushort(&bitstream_elt);
+ break;
+ case 4:
+ endian_swap_uint(&bitstream_elt);
+ break;
+ case 8:
+ endian_swap_ulonglong(&bitstream_elt);
+ break;
+ }
+ #endif // !PIPE_ARCH_BIG_ENDIAN
+
+ return (bitstream_elt << cursor) >> (excess_bits);
+}
+
+static unsigned
+show_bits(unsigned cursor, unsigned how_many_bits, const unsigned *bitstream)
+{
+ unsigned cur_int = cursor / (sizeof(unsigned) * CHAR_BIT);
+ unsigned cur_bit = cursor % (sizeof(unsigned) * CHAR_BIT);
+
+ assert(bitstream);
+
+ if (cur_bit + how_many_bits > sizeof(unsigned) * CHAR_BIT) {
+ unsigned lower = grab_bits(cur_bit, sizeof(unsigned) * CHAR_BIT - cur_bit,
+ bitstream[cur_int]);
+ unsigned upper = grab_bits(0, cur_bit + how_many_bits - sizeof(unsigned) * CHAR_BIT,
+ bitstream[cur_int + 1]);
+ return lower | upper << (sizeof(unsigned) * CHAR_BIT - cur_bit);
+ }
+ else
+ return grab_bits(cur_bit, how_many_bits, bitstream[cur_int]);
+}
+
+bool vl_bitstream_parser_init(struct vl_bitstream_parser *parser,
+ unsigned num_bitstreams,
+ const void **bitstreams,
+ const unsigned *sizes)
+{
+ assert(parser);
+ assert(num_bitstreams);
+ assert(bitstreams);
+ assert(sizes);
+
+ parser->num_bitstreams = num_bitstreams;
+ parser->bitstreams = (const unsigned**)bitstreams;
+ parser->sizes = sizes;
+ parser->cur_bitstream = 0;
+ parser->cursor = 0;
+
+ return true;
+}
+
+void vl_bitstream_parser_cleanup(struct vl_bitstream_parser *parser)
+{
+ assert(parser);
+}
+
+unsigned
+vl_bitstream_parser_get_bits(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits)
+{
+ unsigned bits;
+
+ assert(parser);
+
+ bits = vl_bitstream_parser_show_bits(parser, how_many_bits);
+
+ vl_bitstream_parser_forward(parser, how_many_bits);
+
+ return bits;
+}
+
+unsigned
+vl_bitstream_parser_show_bits(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits)
+{
+ unsigned bits = 0;
+ unsigned shift = 0;
+ unsigned cursor;
+ unsigned cur_bitstream;
+
+ assert(parser);
+
+ cursor = parser->cursor;
+ cur_bitstream = parser->cur_bitstream;
+
+ while (1) {
+ unsigned bits_left = parser->sizes[cur_bitstream] * CHAR_BIT - cursor;
+ unsigned bits_to_show = how_many_bits > bits_left ? bits_left : how_many_bits;
+
+ bits |= show_bits(cursor, bits_to_show,
+ parser->bitstreams[cur_bitstream]) << shift;
+
+ if (how_many_bits > bits_to_show) {
+ how_many_bits -= bits_to_show;
+ cursor = 0;
+ ++cur_bitstream;
+ shift += bits_to_show;
+ }
+ else
+ break;
+ }
+
+ return bits;
+}
+
+void vl_bitstream_parser_forward(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits)
+{
+ assert(parser);
+ assert(how_many_bits);
+
+ parser->cursor += how_many_bits;
+
+ while (parser->cursor > parser->sizes[parser->cur_bitstream] * CHAR_BIT) {
+ parser->cursor -= parser->sizes[parser->cur_bitstream++] * CHAR_BIT;
+ assert(parser->cur_bitstream < parser->num_bitstreams);
+ }
+}
+
+void vl_bitstream_parser_rewind(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits)
+{
+ signed c;
+
+ assert(parser);
+ assert(how_many_bits);
+
+ c = parser->cursor - how_many_bits;
+
+ while (c < 0) {
+ c += parser->sizes[parser->cur_bitstream--] * CHAR_BIT;
+ assert(parser->cur_bitstream < parser->num_bitstreams);
+ }
+
+ parser->cursor = (unsigned)c;
+}
diff --git a/src/gallium/auxiliary/vl/vl_bitstream_parser.h b/src/gallium/auxiliary/vl/vl_bitstream_parser.h
new file mode 100644
index 00000000000..eeb51dd4295
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_bitstream_parser.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_bitstream_parser_h
+#define vl_bitstream_parser_h
+
+#include "pipe/p_compiler.h"
+
+struct vl_bitstream_parser
+{
+ unsigned num_bitstreams;
+ const unsigned **bitstreams;
+ const unsigned *sizes;
+ unsigned cur_bitstream;
+ unsigned cursor;
+};
+
+inline void endian_swap_ushort(unsigned short *x);
+inline void endian_swap_uint(unsigned int *x);
+inline void endian_swap_ulonglong(unsigned long long *x);
+
+bool vl_bitstream_parser_init(struct vl_bitstream_parser *parser,
+ unsigned num_bitstreams,
+ const void **bitstreams,
+ const unsigned *sizes);
+
+void vl_bitstream_parser_cleanup(struct vl_bitstream_parser *parser);
+
+unsigned
+vl_bitstream_parser_get_bits(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits);
+
+unsigned
+vl_bitstream_parser_show_bits(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits);
+
+void vl_bitstream_parser_forward(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits);
+
+void vl_bitstream_parser_rewind(struct vl_bitstream_parser *parser,
+ unsigned how_many_bits);
+
+#endif /* vl_bitstream_parser_h */
diff --git a/src/gallium/auxiliary/vl/vl_compositor.c b/src/gallium/auxiliary/vl/vl_compositor.c
new file mode 100644
index 00000000000..d1ba5faf788
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_compositor.c
@@ -0,0 +1,640 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_compositor.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_draw.h>
+#include <util/u_sampler.h>
+#include <tgsi/tgsi_ureg.h>
+#include "vl_csc.h"
+
+struct vertex_shader_consts
+{
+ struct vertex4f dst_scale;
+ struct vertex4f dst_trans;
+ struct vertex4f src_scale;
+ struct vertex4f src_trans;
+};
+
+struct fragment_shader_consts
+{
+ float matrix[16];
+};
+
+static bool
+u_video_rects_equal(struct pipe_video_rect *a, struct pipe_video_rect *b)
+{
+ assert(a && b);
+
+ if (a->x != b->x)
+ return false;
+ if (a->y != b->y)
+ return false;
+ if (a->w != b->w)
+ return false;
+ if (a->h != b->h)
+ return false;
+
+ return true;
+}
+
+static bool
+create_vert_shader(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src vpos, vtex;
+ struct ureg_dst o_vpos, o_vtex;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return false;
+
+ vpos = ureg_DECL_vs_input(shader, 0);
+ vtex = ureg_DECL_vs_input(shader, 1);
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, 0);
+ o_vtex = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, 1);
+
+ /*
+ * o_vpos = vpos
+ * o_vtex = vtex
+ */
+ ureg_MOV(shader, o_vpos, vpos);
+ ureg_MOV(shader, o_vtex, vtex);
+
+ ureg_END(shader);
+
+ c->vertex_shader = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->vertex_shader)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader_ycbcr_2_rgb(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src csc[4];
+ struct ureg_src sampler;
+ struct ureg_dst texel;
+ struct ureg_dst fragment;
+ unsigned i;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ for (i = 0; i < 4; ++i)
+ csc[i] = ureg_DECL_constant(shader, i);
+ sampler = ureg_DECL_sampler(shader, 0);
+ texel = ureg_DECL_temporary(shader);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * texel = tex(tc, sampler)
+ * fragment = csc * texel
+ */
+ ureg_TEX(shader, texel, TGSI_TEXTURE_2D, tc, sampler);
+ for (i = 0; i < 4; ++i)
+ ureg_DP4(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X << i), csc[i], ureg_src(texel));
+
+ ureg_release_temporary(shader, texel);
+ ureg_END(shader);
+
+ c->fragment_shader.ycbcr_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader.ycbcr_2_rgb)
+ return false;
+
+ return true;
+}
+
+static bool
+create_frag_shader_rgb_2_rgb(struct vl_compositor *c)
+{
+ struct ureg_program *shader;
+ struct ureg_src tc;
+ struct ureg_src sampler;
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return false;
+
+ tc = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, 1, TGSI_INTERPOLATE_LINEAR);
+ sampler = ureg_DECL_sampler(shader, 0);
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ /*
+ * fragment = tex(tc, sampler)
+ */
+ ureg_TEX(shader, fragment, TGSI_TEXTURE_2D, tc, sampler);
+ ureg_END(shader);
+
+ c->fragment_shader.rgb_2_rgb = ureg_create_shader_and_destroy(shader, c->pipe);
+ if (!c->fragment_shader.rgb_2_rgb)
+ return false;
+
+ return true;
+}
+
+static bool
+init_pipe_state(struct vl_compositor *c)
+{
+ struct pipe_sampler_state sampler;
+
+ assert(c);
+
+ c->fb_state.nr_cbufs = 1;
+ c->fb_state.zsbuf = NULL;
+
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.lod_bias = ;*/
+ /*sampler.min_lod = ;*/
+ /*sampler.max_lod = ;*/
+ /*sampler.border_color[i] = ;*/
+ /*sampler.max_anisotropy = ;*/
+ c->sampler = c->pipe->create_sampler_state(c->pipe, &sampler);
+
+ return true;
+}
+
+static void cleanup_pipe_state(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_sampler_state(c->pipe, c->sampler);
+}
+
+static bool
+init_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ if (!create_vert_shader(c)) {
+ debug_printf("Unable to create vertex shader.\n");
+ return false;
+ }
+ if (!create_frag_shader_ycbcr_2_rgb(c)) {
+ debug_printf("Unable to create YCbCr-to-RGB fragment shader.\n");
+ return false;
+ }
+ if (!create_frag_shader_rgb_2_rgb(c)) {
+ debug_printf("Unable to create RGB-to-RGB fragment shader.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void cleanup_shaders(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vs_state(c->pipe, c->vertex_shader);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader.ycbcr_2_rgb);
+ c->pipe->delete_fs_state(c->pipe, c->fragment_shader.rgb_2_rgb);
+}
+
+static bool
+init_buffers(struct vl_compositor *c)
+{
+ struct fragment_shader_consts fsc;
+ struct pipe_vertex_element vertex_elems[2];
+
+ assert(c);
+
+ /*
+ * Create our vertex buffer and vertex buffer elements
+ */
+ c->vertex_buf.stride = sizeof(struct vertex4f);
+ c->vertex_buf.buffer_offset = 0;
+ /* XXX: Create with DYNAMIC or STREAM */
+ c->vertex_buf.buffer = pipe_buffer_create
+ (
+ c->pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STATIC,
+ sizeof(struct vertex4f) * (VL_COMPOSITOR_MAX_LAYERS + 2) * 6
+ );
+
+ vertex_elems[0].src_offset = 0;
+ vertex_elems[0].instance_divisor = 0;
+ vertex_elems[0].vertex_buffer_index = 0;
+ vertex_elems[0].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ vertex_elems[1].src_offset = sizeof(struct vertex2f);
+ vertex_elems[1].instance_divisor = 0;
+ vertex_elems[1].vertex_buffer_index = 0;
+ vertex_elems[1].src_format = PIPE_FORMAT_R32G32_FLOAT;
+ c->vertex_elems_state = c->pipe->create_vertex_elements_state(c->pipe, 2, vertex_elems);
+
+ /*
+ * Create our fragment shader's constant buffer
+ * Const buffer contains the color conversion matrix and bias vectors
+ */
+ /* XXX: Create with IMMUTABLE/STATIC... although it does change every once in a long while... */
+ c->fs_const_buf = pipe_buffer_create
+ (
+ c->pipe->screen,
+ PIPE_BIND_CONSTANT_BUFFER,
+ PIPE_USAGE_STATIC,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_IDENTITY, NULL, true, fsc.matrix);
+
+ vl_compositor_set_csc_matrix(c, fsc.matrix);
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_compositor *c)
+{
+ assert(c);
+
+ c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
+ pipe_resource_reference(&c->vertex_buf.buffer, NULL);
+ pipe_resource_reference(&c->fs_const_buf, NULL);
+}
+
+static void
+texview_map_delete(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ pipe_sampler_view_reference(&sv, NULL);
+}
+
+bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe)
+{
+ unsigned i;
+
+ assert(compositor);
+
+ memset(compositor, 0, sizeof(struct vl_compositor));
+
+ compositor->pipe = pipe;
+
+ compositor->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
+ texview_map_delete);
+ if (!compositor->texview_map)
+ return false;
+
+ if (!init_pipe_state(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ return false;
+ }
+ if (!init_shaders(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+ if (!init_buffers(compositor)) {
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+ return false;
+ }
+
+ compositor->fb_state.width = 0;
+ compositor->fb_state.height = 0;
+ compositor->bg = NULL;
+ compositor->dirty_bg = false;
+ for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ compositor->layers[i] = NULL;
+ compositor->dirty_layers = 0;
+
+ return true;
+}
+
+void vl_compositor_cleanup(struct vl_compositor *compositor)
+{
+ assert(compositor);
+
+ util_delete_keymap(compositor->texview_map, compositor->pipe);
+ cleanup_buffers(compositor);
+ cleanup_shaders(compositor);
+ cleanup_pipe_state(compositor);
+}
+
+void vl_compositor_set_background(struct vl_compositor *compositor,
+ struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect)
+{
+ assert(compositor);
+ assert((bg && bg_src_rect) || (!bg && !bg_src_rect));
+
+ if (compositor->bg != bg ||
+ !u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect)) {
+ pipe_surface_reference(&compositor->bg, bg);
+ /*if (!u_video_rects_equal(&compositor->bg_src_rect, bg_src_rect))*/
+ compositor->bg_src_rect = *bg_src_rect;
+ compositor->dirty_bg = true;
+ }
+}
+
+void vl_compositor_set_layers(struct vl_compositor *compositor,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers)
+{
+ unsigned i;
+
+ assert(compositor);
+ assert(num_layers <= VL_COMPOSITOR_MAX_LAYERS);
+
+ for (i = 0; i < num_layers; ++i)
+ {
+ assert((layers[i] && src_rects[i] && dst_rects[i]) ||
+ (!layers[i] && !src_rects[i] && !dst_rects[i]));
+
+ if (compositor->layers[i] != layers[i] ||
+ !u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]) ||
+ !u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))
+ {
+ pipe_surface_reference(&compositor->layers[i], layers[i]);
+ /*if (!u_video_rects_equal(&compositor->layer_src_rects[i], src_rects[i]))*/
+ compositor->layer_src_rects[i] = *src_rects[i];
+ /*if (!u_video_rects_equal(&compositor->layer_dst_rects[i], dst_rects[i]))*/
+ compositor->layer_dst_rects[i] = *dst_rects[i];
+ compositor->dirty_layers |= 1 << i;
+ }
+
+ if (layers[i])
+ compositor->dirty_layers |= 1 << i;
+ }
+
+ for (; i < VL_COMPOSITOR_MAX_LAYERS; ++i)
+ pipe_surface_reference(&compositor->layers[i], NULL);
+}
+
+static void gen_rect_verts(unsigned pos,
+ struct pipe_video_rect *src_rect,
+ struct vertex2f *src_inv_size,
+ struct pipe_video_rect *dst_rect,
+ struct vertex2f *dst_inv_size,
+ struct vertex4f *vb)
+{
+ assert(pos < VL_COMPOSITOR_MAX_LAYERS + 2);
+ assert(src_rect);
+ assert(src_inv_size);
+ assert((dst_rect && dst_inv_size) /*|| (!dst_rect && !dst_inv_size)*/);
+ assert(vb);
+
+ vb[pos * 6 + 0].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 0].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 0].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 0].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 1].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 1].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 1].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 1].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 2].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 2].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 2].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 2].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 3].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 3].y = dst_rect->y * dst_inv_size->y;
+ vb[pos * 6 + 3].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 3].w = src_rect->y * src_inv_size->y;
+
+ vb[pos * 6 + 4].x = dst_rect->x * dst_inv_size->x;
+ vb[pos * 6 + 4].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 4].z = src_rect->x * src_inv_size->x;
+ vb[pos * 6 + 4].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+
+ vb[pos * 6 + 5].x = (dst_rect->x + dst_rect->w) * dst_inv_size->x;
+ vb[pos * 6 + 5].y = (dst_rect->y + dst_rect->h) * dst_inv_size->y;
+ vb[pos * 6 + 5].z = (src_rect->x + src_rect->w) * src_inv_size->x;
+ vb[pos * 6 + 5].w = (src_rect->y + src_rect->h) * src_inv_size->y;
+}
+
+static unsigned gen_data(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect,
+ struct pipe_surface **textures,
+ void **frag_shaders)
+{
+ void *vb;
+ struct pipe_transfer *buf_transfer;
+ unsigned num_rects = 0;
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+ assert(textures);
+
+ vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer);
+
+ if (!vb)
+ return 0;
+
+ if (c->dirty_bg) {
+ struct vertex2f bg_inv_size = {1.0f / c->bg->width, 1.0f / c->bg->height};
+ gen_rect_verts(num_rects, &c->bg_src_rect, &bg_inv_size, NULL, NULL, vb);
+ textures[num_rects] = c->bg;
+ /* XXX: Hack */
+ frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
+ ++num_rects;
+ c->dirty_bg = false;
+ }
+
+ {
+ struct vertex2f src_inv_size = { 1.0f / src_surface->width, 1.0f / src_surface->height};
+ gen_rect_verts(num_rects, src_rect, &src_inv_size, dst_rect, &c->fb_inv_size, vb);
+ textures[num_rects] = src_surface;
+ /* XXX: Hack, sort of */
+ frag_shaders[num_rects] = c->fragment_shader.ycbcr_2_rgb;
+ ++num_rects;
+ }
+
+ for (i = 0; c->dirty_layers > 0; i++) {
+ assert(i < VL_COMPOSITOR_MAX_LAYERS);
+
+ if (c->dirty_layers & (1 << i)) {
+ struct vertex2f layer_inv_size = {1.0f / c->layers[i]->width, 1.0f / c->layers[i]->height};
+ gen_rect_verts(num_rects, &c->layer_src_rects[i], &layer_inv_size,
+ &c->layer_dst_rects[i], &c->fb_inv_size, vb);
+ textures[num_rects] = c->layers[i];
+ /* XXX: Hack */
+ frag_shaders[num_rects] = c->fragment_shader.rgb_2_rgb;
+ ++num_rects;
+ c->dirty_layers &= ~(1 << i);
+ }
+ }
+
+ pipe_buffer_unmap(c->pipe, buf_transfer);
+
+ return num_rects;
+}
+
+static void draw_layers(struct vl_compositor *c,
+ struct pipe_surface *src_surface,
+ struct pipe_video_rect *src_rect,
+ struct pipe_video_rect *dst_rect)
+{
+ unsigned num_rects;
+ struct pipe_surface *src_surfaces[VL_COMPOSITOR_MAX_LAYERS + 2];
+ void *frag_shaders[VL_COMPOSITOR_MAX_LAYERS + 2];
+ unsigned i;
+
+ assert(c);
+ assert(src_surface);
+ assert(src_rect);
+ assert(dst_rect);
+
+ num_rects = gen_data(c, src_surface, src_rect, dst_rect, src_surfaces, frag_shaders);
+
+ for (i = 0; i < num_rects; ++i) {
+ boolean delete_view = FALSE;
+ struct pipe_sampler_view *surface_view = (struct pipe_sampler_view*)util_keymap_lookup(c->texview_map,
+ &src_surfaces[i]);
+ if (!surface_view) {
+ struct pipe_sampler_view templat;
+ u_sampler_view_default_template(&templat, src_surfaces[i]->texture,
+ src_surfaces[i]->texture->format);
+ surface_view = c->pipe->create_sampler_view(c->pipe, src_surfaces[i]->texture,
+ &templat);
+ if (!surface_view)
+ return;
+
+ delete_view = !util_keymap_insert(c->texview_map, &src_surfaces[i],
+ surface_view, c->pipe);
+ }
+
+ c->pipe->bind_fs_state(c->pipe, frag_shaders[i]);
+ c->pipe->set_fragment_sampler_views(c->pipe, 1, &surface_view);
+
+ util_draw_arrays(c->pipe, PIPE_PRIM_TRIANGLES, i * 6, 6);
+
+ if (delete_view) {
+ pipe_sampler_view_reference(&surface_view, NULL);
+ }
+ }
+}
+
+void vl_compositor_render(struct vl_compositor *compositor,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence)
+{
+ assert(compositor);
+ assert(src_surface);
+ assert(src_area);
+ assert(dst_surface);
+ assert(dst_area);
+ assert(picture_type == PIPE_MPEG12_PICTURE_TYPE_FRAME);
+
+ if (compositor->fb_state.width != dst_surface->width) {
+ compositor->fb_inv_size.x = 1.0f / dst_surface->width;
+ compositor->fb_state.width = dst_surface->width;
+ }
+ if (compositor->fb_state.height != dst_surface->height) {
+ compositor->fb_inv_size.y = 1.0f / dst_surface->height;
+ compositor->fb_state.height = dst_surface->height;
+ }
+
+ compositor->fb_state.cbufs[0] = dst_surface;
+
+ compositor->viewport.scale[0] = compositor->fb_state.width;
+ compositor->viewport.scale[1] = compositor->fb_state.height;
+ compositor->viewport.scale[2] = 1;
+ compositor->viewport.scale[3] = 1;
+ compositor->viewport.translate[0] = 0;
+ compositor->viewport.translate[1] = 0;
+ compositor->viewport.translate[2] = 0;
+ compositor->viewport.translate[3] = 0;
+
+ compositor->pipe->set_framebuffer_state(compositor->pipe, &compositor->fb_state);
+ compositor->pipe->set_viewport_state(compositor->pipe, &compositor->viewport);
+ compositor->pipe->bind_fragment_sampler_states(compositor->pipe, 1, &compositor->sampler);
+ compositor->pipe->bind_vs_state(compositor->pipe, compositor->vertex_shader);
+ compositor->pipe->set_vertex_buffers(compositor->pipe, 1, &compositor->vertex_buf);
+ compositor->pipe->bind_vertex_elements_state(compositor->pipe, compositor->vertex_elems_state);
+ compositor->pipe->set_constant_buffer(compositor->pipe, PIPE_SHADER_FRAGMENT, 0, compositor->fs_const_buf);
+
+ draw_layers(compositor, src_surface, src_area, dst_area);
+
+ assert(!compositor->dirty_bg && !compositor->dirty_layers);
+ compositor->pipe->flush(compositor->pipe, PIPE_FLUSH_RENDER_CACHE, fence);
+}
+
+void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat)
+{
+ struct pipe_transfer *buf_transfer;
+
+ assert(compositor);
+
+ memcpy
+ (
+ pipe_buffer_map(compositor->pipe, compositor->fs_const_buf,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer),
+ mat,
+ sizeof(struct fragment_shader_consts)
+ );
+
+ pipe_buffer_unmap(compositor->pipe, buf_transfer);
+}
diff --git a/src/gallium/auxiliary/vl/vl_compositor.h b/src/gallium/auxiliary/vl/vl_compositor.h
new file mode 100644
index 00000000000..820c9ef6ddb
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_compositor.h
@@ -0,0 +1,98 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_compositor_h
+#define vl_compositor_h
+
+#include <pipe/p_compiler.h>
+#include <pipe/p_state.h>
+#include <pipe/p_video_state.h>
+#include "vl_types.h"
+
+struct pipe_context;
+struct keymap;
+
+#define VL_COMPOSITOR_MAX_LAYERS 16
+
+struct vl_compositor
+{
+ struct pipe_context *pipe;
+
+ struct pipe_framebuffer_state fb_state;
+ struct vertex2f fb_inv_size;
+ void *sampler;
+ struct pipe_sampler_view *sampler_view;
+ void *vertex_shader;
+ struct
+ {
+ void *ycbcr_2_rgb;
+ void *rgb_2_rgb;
+ } fragment_shader;
+ struct pipe_viewport_state viewport;
+ struct pipe_vertex_buffer vertex_buf;
+ void *vertex_elems_state;
+ struct pipe_resource *fs_const_buf;
+
+ struct pipe_surface *bg;
+ struct pipe_video_rect bg_src_rect;
+ bool dirty_bg;
+ struct pipe_surface *layers[VL_COMPOSITOR_MAX_LAYERS];
+ struct pipe_video_rect layer_src_rects[VL_COMPOSITOR_MAX_LAYERS];
+ struct pipe_video_rect layer_dst_rects[VL_COMPOSITOR_MAX_LAYERS];
+ unsigned dirty_layers;
+
+ struct keymap *texview_map;
+};
+
+bool vl_compositor_init(struct vl_compositor *compositor, struct pipe_context *pipe);
+
+void vl_compositor_cleanup(struct vl_compositor *compositor);
+
+void vl_compositor_set_background(struct vl_compositor *compositor,
+ struct pipe_surface *bg, struct pipe_video_rect *bg_src_rect);
+
+void vl_compositor_set_layers(struct vl_compositor *compositor,
+ struct pipe_surface *layers[],
+ struct pipe_video_rect *src_rects[],
+ struct pipe_video_rect *dst_rects[],
+ unsigned num_layers);
+
+void vl_compositor_render(struct vl_compositor *compositor,
+ struct pipe_surface *src_surface,
+ enum pipe_mpeg12_picture_type picture_type,
+ /*unsigned num_past_surfaces,
+ struct pipe_surface *past_surfaces,
+ unsigned num_future_surfaces,
+ struct pipe_surface *future_surfaces,*/
+ struct pipe_video_rect *src_area,
+ struct pipe_surface *dst_surface,
+ struct pipe_video_rect *dst_area,
+ struct pipe_fence_handle **fence);
+
+void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat);
+
+#endif /* vl_compositor_h */
diff --git a/src/gallium/auxiliary/vl/vl_csc.c b/src/gallium/auxiliary/vl/vl_csc.c
new file mode 100644
index 00000000000..5ecc43a5fa3
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_csc.c
@@ -0,0 +1,206 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_csc.h"
+#include <util/u_math.h>
+#include <util/u_debug.h>
+
+/*
+ * Color space conversion formulas
+ *
+ * To convert YCbCr to RGB,
+ * vec4 ycbcr, rgb
+ * mat44 csc
+ * rgb = csc * ycbcr
+ *
+ * To calculate the color space conversion matrix csc with ProcAmp adjustments,
+ * mat44 csc, cstd, procamp, bias
+ * csc = cstd * (procamp * bias)
+ *
+ * Where cstd is a matrix corresponding to one of the color standards (BT.601, BT.709, etc)
+ * adjusted for the kind of YCbCr -> RGB mapping wanted (1:1, full),
+ * bias is a matrix corresponding to the kind of YCbCr -> RGB mapping wanted (1:1, full)
+ *
+ * To calculate procamp,
+ * mat44 procamp, hue, saturation, brightness, contrast
+ * procamp = brightness * (saturation * (contrast * hue))
+ * Alternatively,
+ * procamp = saturation * (brightness * (contrast * hue))
+ *
+ * contrast
+ * [ c, 0, 0, 0]
+ * [ 0, c, 0, 0]
+ * [ 0, 0, c, 0]
+ * [ 0, 0, 0, 1]
+ *
+ * brightness
+ * [ 1, 0, 0, b]
+ * [ 0, 1, 0, 0]
+ * [ 0, 0, 1, 0]
+ * [ 0, 0, 0, 1]
+ *
+ * saturation
+ * [ 1, 0, 0, 0]
+ * [ 0, s, 0, 0]
+ * [ 0, 0, s, 0]
+ * [ 0, 0, 0, 1]
+ *
+ * hue
+ * [ 1, 0, 0, 0]
+ * [ 0, cos(h), sin(h), 0]
+ * [ 0, -sin(h), cos(h), 0]
+ * [ 0, 0, 0, 1]
+ *
+ * procamp
+ * [ c, 0, 0, b]
+ * [ 0, c*s*cos(h), c*s*sin(h), 0]
+ * [ 0, -c*s*sin(h), c*s*cos(h), 0]
+ * [ 0, 0, 0, 1]
+ *
+ * bias
+ * [ 1, 0, 0, ybias]
+ * [ 0, 1, 0, cbbias]
+ * [ 0, 0, 1, crbias]
+ * [ 0, 0, 0, 1]
+ *
+ * csc
+ * [ c*cstd[ 0], c*cstd[ 1]*s*cos(h) - c*cstd[ 2]*s*sin(h), c*cstd[ 2]*s*cos(h) + c*cstd[ 1]*s*sin(h), cstd[ 3] + cstd[ 0]*(b + c*ybias) + cstd[ 1]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[ 2]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))]
+ * [ c*cstd[ 4], c*cstd[ 5]*s*cos(h) - c*cstd[ 6]*s*sin(h), c*cstd[ 6]*s*cos(h) + c*cstd[ 5]*s*sin(h), cstd[ 7] + cstd[ 4]*(b + c*ybias) + cstd[ 5]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[ 6]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))]
+ * [ c*cstd[ 8], c*cstd[ 9]*s*cos(h) - c*cstd[10]*s*sin(h), c*cstd[10]*s*cos(h) + c*cstd[ 9]*s*sin(h), cstd[11] + cstd[ 8]*(b + c*ybias) + cstd[ 9]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[10]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))]
+ * [ c*cstd[12], c*cstd[13]*s*cos(h) - c*cstd[14]*s*sin(h), c*cstd[14]*s*cos(h) + c*cstd[13]*s*sin(h), cstd[15] + cstd[12]*(b + c*ybias) + cstd[13]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[14]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h))]
+ */
+
+/*
+ * Converts ITU-R BT.601 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [16,235]
+ */
+static const float bt_601[16] =
+{
+ 1.0f, 0.0f, 1.371f, 0.0f,
+ 1.0f, -0.336f, -0.698f, 0.0f,
+ 1.0f, 1.732f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+/*
+ * Converts ITU-R BT.601 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [0,255]
+ */
+static const float bt_601_full[16] =
+{
+ 1.164f, 0.0f, 1.596f, 0.0f,
+ 1.164f, -0.391f, -0.813f, 0.0f,
+ 1.164f, 2.018f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+/*
+ * Converts ITU-R BT.709 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [16,235]
+ */
+static const float bt_709[16] =
+{
+ 1.0f, 0.0f, 1.540f, 0.0f,
+ 1.0f, -0.183f, -0.459f, 0.0f,
+ 1.0f, 1.816f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+/*
+ * Converts ITU-R BT.709 YCbCr pixels to RGB pixels where:
+ * Y is in [16,235], Cb and Cr are in [16,240]
+ * R, G, and B are in [0,255]
+ */
+static const float bt_709_full[16] =
+{
+ 1.164f, 0.0f, 1.793f, 0.0f,
+ 1.164f, -0.213f, -0.534f, 0.0f,
+ 1.164f, 2.115f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+static const float identity[16] =
+{
+ 1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f
+};
+
+void vl_csc_get_matrix(enum VL_CSC_COLOR_STANDARD cs,
+ struct vl_procamp *procamp,
+ bool full_range,
+ float *matrix)
+{
+ float ybias = full_range ? -16.0f/255.0f : 0.0f;
+ float cbbias = -128.0f/255.0f;
+ float crbias = -128.0f/255.0f;
+ float c = procamp ? procamp->contrast : 1.0f;
+ float s = procamp ? procamp->saturation : 1.0f;
+ float b = procamp ? procamp->brightness : 0.0f;
+ float h = procamp ? procamp->hue : 0.0f;
+ const float *cstd;
+
+ assert(matrix);
+
+ switch (cs) {
+ case VL_CSC_COLOR_STANDARD_BT_601:
+ cstd = full_range ? &bt_601_full[0] : &bt_601[0];
+ break;
+ case VL_CSC_COLOR_STANDARD_BT_709:
+ cstd = full_range ? &bt_709_full[0] : &bt_709[0];
+ break;
+ case VL_CSC_COLOR_STANDARD_IDENTITY:
+ default:
+ assert(cs == VL_CSC_COLOR_STANDARD_IDENTITY);
+ memcpy(matrix, &identity[0], sizeof(float) * 16);
+ return;
+ }
+
+ matrix[ 0] = c*cstd[ 0];
+ matrix[ 1] = c*cstd[ 1]*s*cosf(h) - c*cstd[ 2]*s*sinf(h);
+ matrix[ 2] = c*cstd[ 2]*s*cosf(h) + c*cstd[ 1]*s*sinf(h);
+ matrix[ 3] = cstd[ 3] + cstd[ 0]*(b + c*ybias) + cstd[ 1]*(c*cbbias*s*cosf(h) + c*crbias*s*sinf(h)) + cstd[ 2]*(c*crbias*s*cosf(h) - c*cbbias*s*sinf(h));
+
+ matrix[ 4] = c*cstd[ 4];
+ matrix[ 5] = c*cstd[ 5]*s*cosf(h) - c*cstd[ 6]*s*sinf(h);
+ matrix[ 6] = c*cstd[ 6]*s*cosf(h) + c*cstd[ 5]*s*sinf(h);
+ matrix[ 7] = cstd[ 7] + cstd[ 4]*(b + c*ybias) + cstd[ 5]*(c*cbbias*s*cosf(h) + c*crbias*s*sinf(h)) + cstd[ 6]*(c*crbias*s*cosf(h) - c*cbbias*s*sinf(h));
+
+ matrix[ 8] = c*cstd[ 8];
+ matrix[ 9] = c*cstd[ 9]*s*cosf(h) - c*cstd[10]*s*sinf(h);
+ matrix[10] = c*cstd[10]*s*cosf(h) + c*cstd[ 9]*s*sinf(h);
+ matrix[11] = cstd[11] + cstd[ 8]*(b + c*ybias) + cstd[ 9]*(c*cbbias*s*cosf(h) + c*crbias*s*sinf(h)) + cstd[10]*(c*crbias*s*cosf(h) - c*cbbias*s*sinf(h));
+
+ matrix[12] = c*cstd[12];
+ matrix[13] = c*cstd[13]*s*cos(h) - c*cstd[14]*s*sin(h);
+ matrix[14] = c*cstd[14]*s*cos(h) + c*cstd[13]*s*sin(h);
+ matrix[15] = cstd[15] + cstd[12]*(b + c*ybias) + cstd[13]*(c*cbbias*s*cos(h) + c*crbias*s*sin(h)) + cstd[14]*(c*crbias*s*cos(h) - c*cbbias*s*sin(h));
+}
diff --git a/src/gallium/auxiliary/vl/vl_csc.h b/src/gallium/auxiliary/vl/vl_csc.h
new file mode 100644
index 00000000000..722ca35f339
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_csc.h
@@ -0,0 +1,53 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_csc_h
+#define vl_csc_h
+
+#include <pipe/p_compiler.h>
+
+struct vl_procamp
+{
+ float brightness;
+ float contrast;
+ float saturation;
+ float hue;
+};
+
+enum VL_CSC_COLOR_STANDARD
+{
+ VL_CSC_COLOR_STANDARD_IDENTITY,
+ VL_CSC_COLOR_STANDARD_BT_601,
+ VL_CSC_COLOR_STANDARD_BT_709
+};
+
+void vl_csc_get_matrix(enum VL_CSC_COLOR_STANDARD cs,
+ struct vl_procamp *procamp,
+ bool full_range,
+ float *matrix);
+
+#endif /* vl_csc_h */
diff --git a/src/gallium/auxiliary/vl/vl_idct.c b/src/gallium/auxiliary/vl/vl_idct.c
new file mode 100644
index 00000000000..89463a5c75c
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_idct.c
@@ -0,0 +1,766 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Christian König
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_idct.h"
+#include "vl_vertex_buffers.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_screen.h>
+#include <util/u_inlines.h>
+#include <util/u_sampler.h>
+#include <util/u_format.h>
+#include <tgsi/tgsi_ureg.h>
+#include "vl_types.h"
+
+#define BLOCK_WIDTH 8
+#define BLOCK_HEIGHT 8
+
+#define SCALE_FACTOR_16_TO_9 (32768.0f / 256.0f)
+
+#define NR_RENDER_TARGETS 4
+
+enum VS_INPUT
+{
+ VS_I_RECT,
+ VS_I_VPOS,
+
+ NUM_VS_INPUTS
+};
+
+enum VS_OUTPUT
+{
+ VS_O_VPOS,
+ VS_O_L_ADDR0,
+ VS_O_L_ADDR1,
+ VS_O_R_ADDR0,
+ VS_O_R_ADDR1
+};
+
+static const float const_matrix[8][8] = {
+ { 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.3535530f, 0.353553f, 0.3535530f },
+ { 0.4903930f, 0.4157350f, 0.2777850f, 0.0975451f, -0.0975452f, -0.2777850f, -0.415735f, -0.4903930f },
+ { 0.4619400f, 0.1913420f, -0.1913420f, -0.4619400f, -0.4619400f, -0.1913420f, 0.191342f, 0.4619400f },
+ { 0.4157350f, -0.0975452f, -0.4903930f, -0.2777850f, 0.2777850f, 0.4903930f, 0.097545f, -0.4157350f },
+ { 0.3535530f, -0.3535530f, -0.3535530f, 0.3535540f, 0.3535530f, -0.3535540f, -0.353553f, 0.3535530f },
+ { 0.2777850f, -0.4903930f, 0.0975452f, 0.4157350f, -0.4157350f, -0.0975451f, 0.490393f, -0.2777850f },
+ { 0.1913420f, -0.4619400f, 0.4619400f, -0.1913420f, -0.1913410f, 0.4619400f, -0.461940f, 0.1913420f },
+ { 0.0975451f, -0.2777850f, 0.4157350f, -0.4903930f, 0.4903930f, -0.4157350f, 0.277786f, -0.0975458f }
+};
+
+static void
+calc_addr(struct ureg_program *shader, struct ureg_dst addr[2],
+ struct ureg_src tc, struct ureg_src start, bool right_side,
+ bool transposed, float size)
+{
+ unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
+ unsigned sw_start = right_side ? TGSI_SWIZZLE_Y : TGSI_SWIZZLE_X;
+
+ unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
+ unsigned sw_tc = right_side ? TGSI_SWIZZLE_X : TGSI_SWIZZLE_Y;
+
+ /*
+ * addr[0..1].(start) = right_side ? start.x : tc.x
+ * addr[0..1].(tc) = right_side ? tc.y : start.y
+ * addr[0..1].z = tc.z
+ * addr[1].(start) += 1.0f / scale
+ */
+ ureg_MOV(shader, ureg_writemask(addr[0], wm_start), ureg_scalar(start, sw_start));
+ ureg_MOV(shader, ureg_writemask(addr[0], wm_tc), ureg_scalar(tc, sw_tc));
+ ureg_MOV(shader, ureg_writemask(addr[0], TGSI_WRITEMASK_Z), tc);
+
+ ureg_ADD(shader, ureg_writemask(addr[1], wm_start), ureg_scalar(start, sw_start), ureg_imm1f(shader, 1.0f / size));
+ ureg_MOV(shader, ureg_writemask(addr[1], wm_tc), ureg_scalar(tc, sw_tc));
+ ureg_MOV(shader, ureg_writemask(addr[1], TGSI_WRITEMASK_Z), tc);
+}
+
+static void *
+create_vert_shader(struct vl_idct *idct, bool matrix_stage)
+{
+ struct ureg_program *shader;
+ struct ureg_src scale;
+ struct ureg_src vrect, vpos;
+ struct ureg_dst t_tex, t_start;
+ struct ureg_dst o_vpos, o_l_addr[2], o_r_addr[2];
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return NULL;
+
+ t_tex = ureg_DECL_temporary(shader);
+ t_start = ureg_DECL_temporary(shader);
+
+ vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
+ vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
+
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
+
+ o_l_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0);
+ o_l_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1);
+
+ o_r_addr[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0);
+ o_r_addr[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1);
+
+ /*
+ * scale = (BLOCK_WIDTH, BLOCK_HEIGHT) / (dst.width, dst.height)
+ *
+ * t_vpos = vpos + vrect
+ * o_vpos.xy = t_vpos * scale
+ * o_vpos.zw = vpos
+ *
+ * o_l_addr = calc_addr(...)
+ * o_r_addr = calc_addr(...)
+ *
+ */
+ scale = ureg_imm2f(shader,
+ (float)BLOCK_WIDTH / idct->buffer_width,
+ (float)BLOCK_HEIGHT / idct->buffer_height);
+
+ ureg_ADD(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), vpos, vrect);
+ ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_XY), ureg_src(t_tex), scale);
+ ureg_MUL(shader, ureg_writemask(t_tex, TGSI_WRITEMASK_Z),
+ ureg_scalar(vrect, TGSI_SWIZZLE_X),
+ ureg_imm1f(shader, BLOCK_WIDTH / NR_RENDER_TARGETS));
+
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_tex));
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
+
+ ureg_MUL(shader, ureg_writemask(t_start, TGSI_WRITEMASK_XY), vpos, scale);
+
+ if(matrix_stage) {
+ calc_addr(shader, o_l_addr, ureg_src(t_tex), ureg_src(t_start), false, false, idct->buffer_width / 4);
+ calc_addr(shader, o_r_addr, vrect, ureg_imm1f(shader, 0.0f), true, true, BLOCK_WIDTH / 4);
+ } else {
+ calc_addr(shader, o_l_addr, vrect, ureg_imm1f(shader, 0.0f), false, false, BLOCK_WIDTH / 4);
+ calc_addr(shader, o_r_addr, ureg_src(t_tex), ureg_src(t_start), true, false, idct->buffer_height / 4);
+ }
+
+ ureg_release_temporary(shader, t_tex);
+ ureg_release_temporary(shader, t_start);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, idct->pipe);
+}
+
+static void
+increment_addr(struct ureg_program *shader, struct ureg_dst daddr[2],
+ struct ureg_src saddr[2], bool right_side, bool transposed,
+ int pos, float size)
+{
+ unsigned wm_start = (right_side == transposed) ? TGSI_WRITEMASK_X : TGSI_WRITEMASK_Y;
+ unsigned wm_tc = (right_side == transposed) ? TGSI_WRITEMASK_Y : TGSI_WRITEMASK_X;
+
+ /*
+ * daddr[0..1].(start) = saddr[0..1].(start)
+ * daddr[0..1].(tc) = saddr[0..1].(tc)
+ */
+
+ ureg_MOV(shader, ureg_writemask(daddr[0], wm_start), saddr[0]);
+ ureg_ADD(shader, ureg_writemask(daddr[0], wm_tc), saddr[0], ureg_imm1f(shader, pos / size));
+ ureg_MOV(shader, ureg_writemask(daddr[1], wm_start), saddr[1]);
+ ureg_ADD(shader, ureg_writemask(daddr[1], wm_tc), saddr[1], ureg_imm1f(shader, pos / size));
+}
+
+static void
+fetch_four(struct ureg_program *shader, struct ureg_dst m[2], struct ureg_src addr[2], struct ureg_src sampler)
+{
+ ureg_TEX(shader, m[0], TGSI_TEXTURE_3D, addr[0], sampler);
+ ureg_TEX(shader, m[1], TGSI_TEXTURE_3D, addr[1], sampler);
+}
+
+static void
+matrix_mul(struct ureg_program *shader, struct ureg_dst dst, struct ureg_dst l[2], struct ureg_dst r[2])
+{
+ struct ureg_dst tmp;
+
+ tmp = ureg_DECL_temporary(shader);
+
+ /*
+ * tmp.xy = dot4(m[0][0..1], m[1][0..1])
+ * dst = tmp.x + tmp.y
+ */
+ ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_X), ureg_src(l[0]), ureg_src(r[0]));
+ ureg_DP4(shader, ureg_writemask(tmp, TGSI_WRITEMASK_Y), ureg_src(l[1]), ureg_src(r[1]));
+ ureg_ADD(shader, dst,
+ ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_X),
+ ureg_scalar(ureg_src(tmp), TGSI_SWIZZLE_Y));
+
+ ureg_release_temporary(shader, tmp);
+}
+
+static void *
+create_matrix_frag_shader(struct vl_idct *idct)
+{
+ struct ureg_program *shader;
+
+ struct ureg_src l_addr[2], r_addr[2];
+
+ struct ureg_dst l[4][2], r[2];
+ struct ureg_dst fragment[NR_RENDER_TARGETS];
+
+ unsigned i, j;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ for (i = 0; i < NR_RENDER_TARGETS; ++i)
+ fragment[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, i);
+
+ for (i = 0; i < 4; ++i) {
+ l[i][0] = ureg_DECL_temporary(shader);
+ l[i][1] = ureg_DECL_temporary(shader);
+ }
+
+ r[0] = ureg_DECL_temporary(shader);
+ r[1] = ureg_DECL_temporary(shader);
+
+ for (i = 1; i < 4; ++i) {
+ increment_addr(shader, l[i], l_addr, false, false, i, idct->buffer_height);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ struct ureg_src s_addr[2];
+ s_addr[0] = i == 0 ? l_addr[0] : ureg_src(l[i][0]);
+ s_addr[1] = i == 0 ? l_addr[1] : ureg_src(l[i][1]);
+ fetch_four(shader, l[i], s_addr, ureg_DECL_sampler(shader, 1));
+ }
+
+ for (i = 0; i < NR_RENDER_TARGETS; ++i) {
+ if(i > 0)
+ increment_addr(shader, r, r_addr, true, true, i, BLOCK_HEIGHT);
+
+ struct ureg_src s_addr[2] = { ureg_src(r[0]), ureg_src(r[1]) };
+ s_addr[0] = i == 0 ? r_addr[0] : ureg_src(r[0]);
+ s_addr[1] = i == 0 ? r_addr[1] : ureg_src(r[1]);
+ fetch_four(shader, r, s_addr, ureg_DECL_sampler(shader, 0));
+
+ for (j = 0; j < 4; ++j) {
+ matrix_mul(shader, ureg_writemask(fragment[i], TGSI_WRITEMASK_X << j), l[j], r);
+ }
+ }
+
+ for (i = 0; i < 4; ++i) {
+ ureg_release_temporary(shader, l[i][0]);
+ ureg_release_temporary(shader, l[i][1]);
+ }
+ ureg_release_temporary(shader, r[0]);
+ ureg_release_temporary(shader, r[1]);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, idct->pipe);
+}
+
+static void *
+create_transpose_frag_shader(struct vl_idct *idct)
+{
+ struct ureg_program *shader;
+
+ struct ureg_src l_addr[2], r_addr[2];
+
+ struct ureg_dst l[2], r[2];
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ l_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ l_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_L_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ r_addr[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR0, TGSI_INTERPOLATE_LINEAR);
+ r_addr[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_R_ADDR1, TGSI_INTERPOLATE_LINEAR);
+
+ l[0] = ureg_DECL_temporary(shader);
+ l[1] = ureg_DECL_temporary(shader);
+ r[0] = ureg_DECL_temporary(shader);
+ r[1] = ureg_DECL_temporary(shader);
+
+ fetch_four(shader, l, l_addr, ureg_DECL_sampler(shader, 0));
+ fetch_four(shader, r, r_addr, ureg_DECL_sampler(shader, 1));
+
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ matrix_mul(shader, ureg_writemask(fragment, TGSI_WRITEMASK_X), l, r);
+
+ ureg_release_temporary(shader, l[0]);
+ ureg_release_temporary(shader, l[1]);
+ ureg_release_temporary(shader, r[0]);
+ ureg_release_temporary(shader, r[1]);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, idct->pipe);
+}
+
+static bool
+init_shaders(struct vl_idct *idct)
+{
+ idct->matrix_vs = create_vert_shader(idct, true);
+ idct->matrix_fs = create_matrix_frag_shader(idct);
+
+ idct->transpose_vs = create_vert_shader(idct, false);
+ idct->transpose_fs = create_transpose_frag_shader(idct);
+
+ return
+ idct->matrix_vs != NULL &&
+ idct->matrix_fs != NULL &&
+ idct->transpose_vs != NULL &&
+ idct->transpose_fs != NULL;
+}
+
+static void
+cleanup_shaders(struct vl_idct *idct)
+{
+ idct->pipe->delete_vs_state(idct->pipe, idct->matrix_vs);
+ idct->pipe->delete_fs_state(idct->pipe, idct->matrix_fs);
+ idct->pipe->delete_vs_state(idct->pipe, idct->transpose_vs);
+ idct->pipe->delete_fs_state(idct->pipe, idct->transpose_fs);
+}
+
+static bool
+init_state(struct vl_idct *idct)
+{
+ struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
+ struct pipe_sampler_state sampler;
+ struct pipe_rasterizer_state rs_state;
+ unsigned i;
+
+ assert(idct);
+
+ idct->quad = vl_vb_upload_quads(idct->pipe, idct->max_blocks);
+
+ if(idct->quad.buffer == NULL)
+ return false;
+
+ for (i = 0; i < 4; ++i) {
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_REPEAT;
+ sampler.wrap_t = PIPE_TEX_WRAP_REPEAT;
+ sampler.wrap_r = PIPE_TEX_WRAP_REPEAT;
+ sampler.min_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = PIPE_TEX_FILTER_NEAREST;
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.shadow_ambient = ; */
+ /*sampler.lod_bias = ; */
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ; */
+ /*sampler.border_color[0] = ; */
+ /*sampler.max_anisotropy = ; */
+ idct->samplers.all[i] = idct->pipe->create_sampler_state(idct->pipe, &sampler);
+ }
+
+ memset(&rs_state, 0, sizeof(rs_state));
+ /*rs_state.sprite_coord_enable */
+ rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
+ rs_state.point_quad_rasterization = true;
+ rs_state.point_size = BLOCK_WIDTH;
+ rs_state.gl_rasterization_rules = false;
+ idct->rs_state = idct->pipe->create_rasterizer_state(idct->pipe, &rs_state);
+
+ vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
+
+ /* Pos element */
+ vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R16G16_SSCALED;
+
+ idct->vertex_buffer_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 1, 1);
+ idct->vertex_elems_state = idct->pipe->create_vertex_elements_state(idct->pipe, 2, vertex_elems);
+
+ return true;
+}
+
+static void
+cleanup_state(struct vl_idct *idct)
+{
+ unsigned i;
+
+ for (i = 0; i < 4; ++i)
+ idct->pipe->delete_sampler_state(idct->pipe, idct->samplers.all[i]);
+
+ idct->pipe->delete_rasterizer_state(idct->pipe, idct->rs_state);
+ idct->pipe->delete_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
+}
+
+static bool
+init_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ struct pipe_resource template;
+ struct pipe_sampler_view sampler_view;
+ unsigned i;
+
+ assert(idct && buffer);
+
+ /* create textures */
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.last_level = 0;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
+ template.width0 = idct->buffer_width / 4;
+ template.height0 = idct->buffer_height;
+ template.depth0 = 1;
+ template.array_size = 1;
+ template.usage = PIPE_USAGE_STREAM;
+ buffer->textures.individual.source = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
+
+ template.target = PIPE_TEXTURE_3D;
+ template.format = PIPE_FORMAT_R16G16B16A16_SNORM;
+ template.width0 = idct->buffer_width / NR_RENDER_TARGETS;
+ template.height0 = idct->buffer_height / 4;
+ template.depth0 = NR_RENDER_TARGETS;
+ template.usage = PIPE_USAGE_STATIC;
+ buffer->textures.individual.intermediate = idct->pipe->screen->resource_create(idct->pipe->screen, &template);
+
+ for (i = 0; i < 4; ++i) {
+ if(buffer->textures.all[i] == NULL)
+ return false; /* a texture failed to allocate */
+
+ u_sampler_view_default_template(&sampler_view, buffer->textures.all[i], buffer->textures.all[i]->format);
+ buffer->sampler_views.all[i] = idct->pipe->create_sampler_view(idct->pipe, buffer->textures.all[i], &sampler_view);
+ }
+
+ return true;
+}
+
+static void
+cleanup_textures(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ unsigned i;
+
+ assert(idct && buffer);
+
+ for (i = 0; i < 4; ++i) {
+ pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
+ pipe_resource_reference(&buffer->textures.all[i], NULL);
+ }
+}
+
+static bool
+init_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct && buffer);
+
+ buffer->vertex_bufs.individual.quad.stride = idct->quad.stride;
+ buffer->vertex_bufs.individual.quad.buffer_offset = idct->quad.buffer_offset;
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, idct->quad.buffer);
+
+ buffer->vertex_bufs.individual.pos = vl_vb_init(
+ &buffer->blocks, idct->pipe, idct->max_blocks,
+ idct->vertex_buffer_stride);
+
+ if(buffer->vertex_bufs.individual.pos.buffer == NULL)
+ return false;
+
+ return true;
+}
+
+static void
+cleanup_vertex_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct && buffer);
+
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
+ pipe_resource_reference(&buffer->vertex_bufs.individual.pos.buffer, NULL);
+
+ vl_vb_cleanup(&buffer->blocks);
+}
+
+struct pipe_resource *
+vl_idct_upload_matrix(struct pipe_context *pipe)
+{
+ struct pipe_resource template, *matrix;
+ struct pipe_transfer *buf_transfer;
+ unsigned i, j, pitch;
+ float *f;
+
+ struct pipe_box rect =
+ {
+ 0, 0, 0,
+ BLOCK_WIDTH / 4,
+ BLOCK_HEIGHT,
+ 1
+ };
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ template.format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ template.last_level = 0;
+ template.width0 = 2;
+ template.height0 = 8;
+ template.depth0 = 1;
+ template.array_size = 1;
+ template.usage = PIPE_USAGE_IMMUTABLE;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ matrix = pipe->screen->resource_create(pipe->screen, &template);
+
+ /* matrix */
+ buf_transfer = pipe->get_transfer
+ (
+ pipe, matrix,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &rect
+ );
+ pitch = buf_transfer->stride / sizeof(float);
+
+ f = pipe->transfer_map(pipe, buf_transfer);
+ for(i = 0; i < BLOCK_HEIGHT; ++i)
+ for(j = 0; j < BLOCK_WIDTH; ++j)
+ // transpose and scale
+ f[i * pitch + j] = const_matrix[j][i] * sqrtf(SCALE_FACTOR_16_TO_9);
+
+ pipe->transfer_unmap(pipe, buf_transfer);
+ pipe->transfer_destroy(pipe, buf_transfer);
+
+ return matrix;
+}
+
+bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
+ unsigned buffer_width, unsigned buffer_height,
+ struct pipe_resource *matrix)
+{
+ assert(idct && pipe && matrix);
+
+ idct->pipe = pipe;
+ idct->buffer_width = buffer_width;
+ idct->buffer_height = buffer_height;
+ pipe_resource_reference(&idct->matrix, matrix);
+
+ idct->max_blocks =
+ align(buffer_width, BLOCK_WIDTH) / BLOCK_WIDTH *
+ align(buffer_height, BLOCK_HEIGHT) / BLOCK_HEIGHT;
+
+ if(!init_shaders(idct))
+ return false;
+
+ if(!init_state(idct)) {
+ cleanup_shaders(idct);
+ return false;
+ }
+
+ return true;
+}
+
+void
+vl_idct_cleanup(struct vl_idct *idct)
+{
+ cleanup_shaders(idct);
+ cleanup_state(idct);
+
+ pipe_resource_reference(&idct->matrix, NULL);
+}
+
+bool
+vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer, struct pipe_resource *dst)
+{
+ struct pipe_surface template;
+
+ unsigned i;
+
+ assert(buffer);
+ assert(idct);
+ assert(dst);
+
+ pipe_resource_reference(&buffer->textures.individual.matrix, idct->matrix);
+ pipe_resource_reference(&buffer->textures.individual.transpose, idct->matrix);
+ pipe_resource_reference(&buffer->destination, dst);
+
+ if (!init_textures(idct, buffer))
+ return false;
+
+ if (!init_vertex_buffers(idct, buffer))
+ return false;
+
+ /* init state */
+ buffer->viewport[0].scale[0] = buffer->textures.individual.intermediate->width0;
+ buffer->viewport[0].scale[1] = buffer->textures.individual.intermediate->height0;
+
+ buffer->viewport[1].scale[0] = buffer->destination->width0;
+ buffer->viewport[1].scale[1] = buffer->destination->height0;
+
+ buffer->fb_state[0].width = buffer->textures.individual.intermediate->width0;
+ buffer->fb_state[0].height = buffer->textures.individual.intermediate->height0;
+
+ buffer->fb_state[0].nr_cbufs = NR_RENDER_TARGETS;
+ for(i = 0; i < NR_RENDER_TARGETS; ++i) {
+ memset(&template, 0, sizeof(template));
+ template.format = buffer->textures.individual.intermediate->format;
+ template.u.tex.first_layer = i;
+ template.u.tex.last_layer = i;
+ template.usage = PIPE_BIND_RENDER_TARGET;
+ buffer->fb_state[0].cbufs[i] = idct->pipe->create_surface(
+ idct->pipe, buffer->textures.individual.intermediate,
+ &template);
+ }
+
+ buffer->fb_state[1].width = buffer->destination->width0;
+ buffer->fb_state[1].height = buffer->destination->height0;
+
+ buffer->fb_state[1].nr_cbufs = 1;
+
+ memset(&template, 0, sizeof(template));
+ template.format = buffer->destination->format;
+ template.usage = PIPE_BIND_RENDER_TARGET;
+ buffer->fb_state[1].cbufs[0] = idct->pipe->create_surface(
+ idct->pipe, buffer->destination, &template);
+
+ for(i = 0; i < 2; ++i) {
+ buffer->viewport[i].scale[2] = 1;
+ buffer->viewport[i].scale[3] = 1;
+ buffer->viewport[i].translate[0] = 0;
+ buffer->viewport[i].translate[1] = 0;
+ buffer->viewport[i].translate[2] = 0;
+ buffer->viewport[i].translate[3] = 0;
+
+ buffer->fb_state[i].zsbuf = NULL;
+ }
+
+ return true;
+}
+
+void
+vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ unsigned i;
+
+ assert(buffer);
+
+ for(i = 0; i < NR_RENDER_TARGETS; ++i) {
+ idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[0].cbufs[i]);
+ }
+
+ idct->pipe->surface_destroy(idct->pipe, buffer->fb_state[1].cbufs[0]);
+
+ cleanup_textures(idct, buffer);
+ cleanup_vertex_buffers(idct, buffer);
+}
+
+void
+vl_idct_map_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct);
+
+ struct pipe_box rect =
+ {
+ 0, 0, 0,
+ buffer->textures.individual.source->width0,
+ buffer->textures.individual.source->height0,
+ 1
+ };
+
+ buffer->tex_transfer = idct->pipe->get_transfer
+ (
+ idct->pipe, buffer->textures.individual.source,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &rect
+ );
+
+ buffer->texels = idct->pipe->transfer_map(idct->pipe, buffer->tex_transfer);
+
+ vl_vb_map(&buffer->blocks, idct->pipe);
+}
+
+void
+vl_idct_add_block(struct vl_idct_buffer *buffer, unsigned x, unsigned y, short *block)
+{
+ struct vertex2s v;
+ unsigned tex_pitch;
+ short *texels;
+
+ unsigned i;
+
+ assert(buffer);
+
+ tex_pitch = buffer->tex_transfer->stride / sizeof(short);
+ texels = buffer->texels + y * tex_pitch * BLOCK_HEIGHT + x * BLOCK_WIDTH;
+
+ for (i = 0; i < BLOCK_HEIGHT; ++i)
+ memcpy(texels + i * tex_pitch, block + i * BLOCK_WIDTH, BLOCK_WIDTH * sizeof(short));
+
+ v.x = x;
+ v.y = y;
+ vl_vb_add_block(&buffer->blocks, &v);
+}
+
+void
+vl_idct_unmap_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ assert(idct && buffer);
+
+ idct->pipe->transfer_unmap(idct->pipe, buffer->tex_transfer);
+ idct->pipe->transfer_destroy(idct->pipe, buffer->tex_transfer);
+ vl_vb_unmap(&buffer->blocks, idct->pipe);
+}
+
+void
+vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+{
+ unsigned num_verts;
+
+ assert(idct);
+
+ num_verts = vl_vb_restart(&buffer->blocks);
+
+ if(num_verts > 0) {
+
+ idct->pipe->bind_rasterizer_state(idct->pipe, idct->rs_state);
+ idct->pipe->set_vertex_buffers(idct->pipe, 2, buffer->vertex_bufs.all);
+ idct->pipe->bind_vertex_elements_state(idct->pipe, idct->vertex_elems_state);
+
+ /* first stage */
+ idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[0]);
+ idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[0]);
+ idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[0]);
+ idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[0]);
+ idct->pipe->bind_vs_state(idct->pipe, idct->matrix_vs);
+ idct->pipe->bind_fs_state(idct->pipe, idct->matrix_fs);
+ util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
+
+ /* second stage */
+ idct->pipe->set_framebuffer_state(idct->pipe, &buffer->fb_state[1]);
+ idct->pipe->set_viewport_state(idct->pipe, &buffer->viewport[1]);
+ idct->pipe->set_fragment_sampler_views(idct->pipe, 2, buffer->sampler_views.stage[1]);
+ idct->pipe->bind_fragment_sampler_states(idct->pipe, 2, idct->samplers.stage[1]);
+ idct->pipe->bind_vs_state(idct->pipe, idct->transpose_vs);
+ idct->pipe->bind_fs_state(idct->pipe, idct->transpose_fs);
+ util_draw_arrays(idct->pipe, PIPE_PRIM_QUADS, 0, num_verts);
+ }
+}
diff --git a/src/gallium/auxiliary/vl/vl_idct.h b/src/gallium/auxiliary/vl/vl_idct.h
new file mode 100644
index 00000000000..fcba75a7607
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_idct.h
@@ -0,0 +1,124 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Christian König
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_idct_h
+#define vl_idct_h
+
+#include <pipe/p_state.h>
+#include "vl_vertex_buffers.h"
+
+struct vl_idct
+{
+ struct pipe_context *pipe;
+
+ unsigned buffer_width;
+ unsigned buffer_height;
+
+ unsigned max_blocks;
+
+ void *rs_state;
+ void *vertex_elems_state;
+
+ union
+ {
+ void *all[4];
+ void *stage[2][2];
+ struct {
+ void *matrix, *source;
+ void *transpose, *intermediate;
+ } individual;
+ } samplers;
+
+ void *matrix_vs, *transpose_vs;
+ void *matrix_fs, *transpose_fs;
+
+ struct pipe_resource *matrix;
+ struct pipe_vertex_buffer quad;
+
+ unsigned vertex_buffer_stride;
+};
+
+struct vl_idct_buffer
+{
+ struct pipe_viewport_state viewport[2];
+ struct pipe_framebuffer_state fb_state[2];
+
+ struct pipe_resource *destination;
+
+ union
+ {
+ struct pipe_sampler_view *all[4];
+ struct pipe_sampler_view *stage[2][2];
+ struct {
+ struct pipe_sampler_view *matrix, *source;
+ struct pipe_sampler_view *transpose, *intermediate;
+ } individual;
+ } sampler_views;
+
+ union
+ {
+ struct pipe_resource *all[4];
+ struct pipe_resource *stage[2][2];
+ struct {
+ struct pipe_resource *matrix, *source;
+ struct pipe_resource *transpose, *intermediate;
+ } individual;
+ } textures;
+
+ union
+ {
+ struct pipe_vertex_buffer all[2];
+ struct { struct pipe_vertex_buffer quad, pos; } individual;
+ } vertex_bufs;
+
+ struct vl_vertex_buffer blocks;
+
+ struct pipe_transfer *tex_transfer;
+ short *texels;
+};
+
+struct pipe_resource *vl_idct_upload_matrix(struct pipe_context *pipe);
+
+bool vl_idct_init(struct vl_idct *idct, struct pipe_context *pipe,
+ unsigned buffer_width, unsigned buffer_height,
+ struct pipe_resource *matrix);
+
+void vl_idct_cleanup(struct vl_idct *idct);
+
+bool vl_idct_init_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer, struct pipe_resource *dst);
+
+void vl_idct_cleanup_buffer(struct vl_idct *idct, struct vl_idct_buffer *buffer);
+
+void vl_idct_map_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer);
+
+void vl_idct_add_block(struct vl_idct_buffer *buffer, unsigned x, unsigned y, short *block);
+
+void vl_idct_unmap_buffers(struct vl_idct *idct, struct vl_idct_buffer *buffer);
+
+void vl_idct_flush(struct vl_idct *idct, struct vl_idct_buffer *buffer);
+
+#endif
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c
new file mode 100644
index 00000000000..484e781f0cb
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.c
@@ -0,0 +1,1093 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vl_mpeg12_mc_renderer.h"
+#include "util/u_draw.h"
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <util/u_inlines.h>
+#include <util/u_format.h>
+#include <util/u_math.h>
+#include <util/u_memory.h>
+#include <util/u_keymap.h>
+#include <util/u_sampler.h>
+#include <util/u_draw.h>
+#include <tgsi/tgsi_ureg.h>
+
+#define DEFAULT_BUF_ALIGNMENT 1
+#define MACROBLOCK_WIDTH 16
+#define MACROBLOCK_HEIGHT 16
+#define BLOCK_WIDTH 8
+#define BLOCK_HEIGHT 8
+
+struct vertex_stream
+{
+ struct vertex2s pos;
+ struct vertex2s mv[4];
+ struct {
+ int8_t y;
+ int8_t cr;
+ int8_t cb;
+ int8_t flag;
+ } eb[2][2];
+};
+
+enum VS_INPUT
+{
+ VS_I_RECT,
+ VS_I_VPOS,
+ VS_I_MV0,
+ VS_I_MV1,
+ VS_I_MV2,
+ VS_I_MV3,
+ VS_I_EB_0_0,
+ VS_I_EB_0_1,
+ VS_I_EB_1_0,
+ VS_I_EB_1_1,
+
+ NUM_VS_INPUTS
+};
+
+enum VS_OUTPUT
+{
+ VS_O_VPOS,
+ VS_O_LINE,
+ VS_O_TEX0,
+ VS_O_TEX1,
+ VS_O_TEX2,
+ VS_O_EB_0,
+ VS_O_EB_1,
+ VS_O_INFO,
+ VS_O_MV0,
+ VS_O_MV1,
+ VS_O_MV2,
+ VS_O_MV3
+};
+
+static const unsigned const_empty_block_mask_420[3][2][2] = {
+ { { 0x20, 0x10 }, { 0x08, 0x04 } },
+ { { 0x02, 0x02 }, { 0x02, 0x02 } },
+ { { 0x01, 0x01 }, { 0x01, 0x01 } }
+};
+
+static void *
+create_vert_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_src block_scale, mv_scale;
+ struct ureg_src vrect, vpos, eb[2][2], vmv[4];
+ struct ureg_dst t_vpos, t_vtex, t_vmv;
+ struct ureg_dst o_vpos, o_line, o_vtex[3], o_eb[2], o_vmv[4], o_info;
+ unsigned i, label;
+
+ shader = ureg_create(TGSI_PROCESSOR_VERTEX);
+ if (!shader)
+ return NULL;
+
+ t_vpos = ureg_DECL_temporary(shader);
+ t_vtex = ureg_DECL_temporary(shader);
+ t_vmv = ureg_DECL_temporary(shader);
+
+ vrect = ureg_DECL_vs_input(shader, VS_I_RECT);
+ vpos = ureg_DECL_vs_input(shader, VS_I_VPOS);
+ eb[0][0] = ureg_DECL_vs_input(shader, VS_I_EB_0_0);
+ eb[1][0] = ureg_DECL_vs_input(shader, VS_I_EB_1_0);
+ eb[0][1] = ureg_DECL_vs_input(shader, VS_I_EB_0_1);
+ eb[1][1] = ureg_DECL_vs_input(shader, VS_I_EB_1_1);
+
+ o_vpos = ureg_DECL_output(shader, TGSI_SEMANTIC_POSITION, VS_O_VPOS);
+ o_line = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE);
+ o_vtex[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0);
+ o_vtex[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1);
+ o_vtex[2] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2);
+ o_eb[0] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0);
+ o_eb[1] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1);
+ o_info = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO);
+
+ for (i = 0; i < 4; ++i) {
+ vmv[i] = ureg_DECL_vs_input(shader, VS_I_MV0 + i);
+ o_vmv[i] = ureg_DECL_output(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i);
+ }
+
+ /*
+ * block_scale = (MACROBLOCK_WIDTH, MACROBLOCK_HEIGHT) / (dst.width, dst.height)
+ * mv_scale = 0.5 / (dst.width, dst.height);
+ *
+ * t_vpos = (vpos + vrect) * block_scale
+ * o_vpos.xy = t_vpos
+ * o_vpos.zw = vpos
+ *
+ * o_eb[0..1] = vrect.x ? eb[0..1][1] : eb[0..1][0]
+ *
+ * o_frame_pred = frame_pred
+ * o_info.x = ref_frames
+ * o_info.y = ref_frames > 0
+ * o_info.z = bkwd_pred
+ *
+ * // Apply motion vectors
+ * o_vmv[0..count] = t_vpos + vmv[0..count] * mv_scale
+ *
+ * o_line.xy = vrect * 8
+ * o_line.z = interlaced
+ *
+ * if(eb[0][0].w) { //interlaced
+ * t_vtex.x = vrect.x
+ * t_vtex.y = vrect.y * 0.5
+ * t_vtex += vpos
+ *
+ * o_vtex[0].xy = t_vtex * block_scale
+ *
+ * t_vtex.y += 0.5
+ * o_vtex[1].xy = t_vtex * block_scale
+ * } else {
+ * o_vtex[0..1].xy = t_vpos
+ * }
+ * o_vtex[2].xy = t_vpos
+ *
+ */
+ block_scale = ureg_imm2f(shader,
+ (float)MACROBLOCK_WIDTH / r->buffer_width,
+ (float)MACROBLOCK_HEIGHT / r->buffer_height);
+
+ mv_scale = ureg_imm2f(shader,
+ 0.5f / r->buffer_width,
+ 0.5f / r->buffer_height);
+
+ ureg_ADD(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), vpos, vrect);
+ ureg_MUL(shader, ureg_writemask(t_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos), block_scale);
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vpos, TGSI_WRITEMASK_ZW), vpos);
+
+ ureg_CMP(shader, ureg_writemask(o_eb[0], TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
+ eb[0][1], eb[0][0]);
+ ureg_CMP(shader, ureg_writemask(o_eb[1], TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(vrect, TGSI_SWIZZLE_X)),
+ eb[1][1], eb[1][0]);
+
+ ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_X),
+ ureg_scalar(eb[1][1], TGSI_SWIZZLE_W));
+ ureg_SGE(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Y),
+ ureg_scalar(eb[1][1], TGSI_SWIZZLE_W), ureg_imm1f(shader, 0.0f));
+ ureg_MOV(shader, ureg_writemask(o_info, TGSI_WRITEMASK_Z),
+ ureg_scalar(eb[1][0], TGSI_SWIZZLE_W));
+
+ ureg_MAD(shader, ureg_writemask(o_vmv[0], TGSI_WRITEMASK_XY), mv_scale, vmv[0], ureg_src(t_vpos));
+ ureg_MAD(shader, ureg_writemask(o_vmv[2], TGSI_WRITEMASK_XY), mv_scale, vmv[2], ureg_src(t_vpos));
+
+ ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
+ vmv[0], vmv[1]);
+ ureg_MAD(shader, ureg_writemask(o_vmv[1], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
+
+ ureg_CMP(shader, ureg_writemask(t_vmv, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(eb[0][1], TGSI_SWIZZLE_W)),
+ vmv[2], vmv[3]);
+ ureg_MAD(shader, ureg_writemask(o_vmv[3], TGSI_WRITEMASK_XY), mv_scale, ureg_src(t_vmv), ureg_src(t_vpos));
+
+ ureg_MOV(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+ ureg_MOV(shader, ureg_writemask(o_vtex[2], TGSI_WRITEMASK_XY), ureg_src(t_vpos));
+
+ ureg_MOV(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X), ureg_scalar(vrect, TGSI_SWIZZLE_Y));
+ ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_Y),
+ vrect, ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
+
+ ureg_IF(shader, ureg_scalar(eb[0][0], TGSI_SWIZZLE_W), &label);
+
+ ureg_MOV(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_X), vrect);
+ ureg_MUL(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), vrect, ureg_imm1f(shader, 0.5f));
+ ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_XY), vpos, ureg_src(t_vtex));
+ ureg_MUL(shader, ureg_writemask(o_vtex[0], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
+ ureg_ADD(shader, ureg_writemask(t_vtex, TGSI_WRITEMASK_Y), ureg_src(t_vtex), ureg_imm1f(shader, 0.5f));
+ ureg_MUL(shader, ureg_writemask(o_vtex[1], TGSI_WRITEMASK_XY), ureg_src(t_vtex), block_scale);
+
+ ureg_MUL(shader, ureg_writemask(o_line, TGSI_WRITEMASK_X),
+ ureg_scalar(vrect, TGSI_SWIZZLE_Y),
+ ureg_imm1f(shader, MACROBLOCK_HEIGHT / 2));
+
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ ureg_release_temporary(shader, t_vtex);
+ ureg_release_temporary(shader, t_vpos);
+ ureg_release_temporary(shader, t_vmv);
+
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, r->pipe);
+}
+
+static struct ureg_dst
+calc_field(struct ureg_program *shader)
+{
+ struct ureg_dst tmp;
+ struct ureg_src line;
+
+ tmp = ureg_DECL_temporary(shader);
+
+ line = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_LINE, TGSI_INTERPOLATE_LINEAR);
+
+ /*
+ * line.x going from 0 to 1 if not interlaced
+ * line.x going from 0 to 8 in steps of 0.5 if interlaced
+ * line.y going from 0 to 8 in steps of 0.5
+ *
+ * tmp.xy = fraction(line)
+ * tmp.xy = tmp.xy >= 0.5 ? 1 : 0
+ */
+ ureg_FRC(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), line);
+ ureg_SGE(shader, ureg_writemask(tmp, TGSI_WRITEMASK_XY), ureg_src(tmp), ureg_imm1f(shader, 0.5f));
+
+ return tmp;
+}
+
+static struct ureg_dst
+fetch_ycbcr(struct vl_mpeg12_mc_renderer *r, struct ureg_program *shader, struct ureg_dst field)
+{
+ struct ureg_src tc[3], sampler[3], eb[2];
+ struct ureg_dst texel, t_tc, t_eb_info;
+ unsigned i, label;
+
+ texel = ureg_DECL_temporary(shader);
+ t_tc = ureg_DECL_temporary(shader);
+ t_eb_info = ureg_DECL_temporary(shader);
+
+ tc[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX0, TGSI_INTERPOLATE_LINEAR);
+ tc[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX1, TGSI_INTERPOLATE_LINEAR);
+ tc[2] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_TEX2, TGSI_INTERPOLATE_LINEAR);
+
+ eb[0] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_0, TGSI_INTERPOLATE_CONSTANT);
+ eb[1] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_EB_1, TGSI_INTERPOLATE_CONSTANT);
+
+ for (i = 0; i < 3; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i);
+ }
+
+ /*
+ * texel.y = tex(field.y ? tc[1] : tc[0], sampler[0])
+ * texel.cb = tex(tc[2], sampler[1])
+ * texel.cr = tex(tc[2], sampler[2])
+ */
+
+ ureg_CMP(shader, ureg_writemask(t_tc, TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
+ tc[1], tc[0]);
+
+ ureg_CMP(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_X)),
+ eb[1], eb[0]);
+
+ /* r600g is ignoring TGSI_INTERPOLATE_CONSTANT, just workaround this */
+ ureg_SLT(shader, ureg_writemask(t_eb_info, TGSI_WRITEMASK_XYZ), ureg_src(t_eb_info), ureg_imm1f(shader, 0.5f));
+
+ ureg_MOV(shader, ureg_writemask(texel, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.0f));
+ for (i = 0; i < 3; ++i) {
+ ureg_IF(shader, ureg_scalar(ureg_src(t_eb_info), TGSI_SWIZZLE_X + i), &label);
+
+ /* Nouveau can't writemask tex dst regs (yet?), so this won't work anymore on nvidia hardware */
+ if(i==0 || r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444) {
+ ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, ureg_src(t_tc), sampler[i]);
+ } else {
+ ureg_TEX(shader, ureg_writemask(texel, TGSI_WRITEMASK_X << i), TGSI_TEXTURE_3D, tc[2], sampler[i]);
+ }
+
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+ }
+
+ ureg_release_temporary(shader, t_tc);
+ ureg_release_temporary(shader, t_eb_info);
+
+ return texel;
+}
+
+static struct ureg_dst
+fetch_ref(struct ureg_program *shader, struct ureg_dst field)
+{
+ struct ureg_src info;
+ struct ureg_src tc[4], sampler[2];
+ struct ureg_dst ref[2], result;
+ unsigned i, intra_label, bi_label, label;
+
+ info = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_INFO, TGSI_INTERPOLATE_CONSTANT);
+
+ for (i = 0; i < 4; ++i)
+ tc[i] = ureg_DECL_fs_input(shader, TGSI_SEMANTIC_GENERIC, VS_O_MV0 + i, TGSI_INTERPOLATE_LINEAR);
+
+ for (i = 0; i < 2; ++i) {
+ sampler[i] = ureg_DECL_sampler(shader, i + 3);
+ ref[i] = ureg_DECL_temporary(shader);
+ }
+
+ result = ureg_DECL_temporary(shader);
+
+ ureg_MOV(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f));
+
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Y), &intra_label);
+ ureg_CMP(shader, ureg_writemask(ref[0], TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[1], tc[0]);
+
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_X), &bi_label);
+
+ /*
+ * result = tex(field.z ? tc[1] : tc[0], sampler[bkwd_pred ? 1 : 0])
+ */
+ ureg_IF(shader, ureg_scalar(info, TGSI_SWIZZLE_Z), &label);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[1]);
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ELSE(shader, &label);
+ ureg_TEX(shader, result, TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
+ ureg_fixup_label(shader, label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
+ ureg_ELSE(shader, &bi_label);
+
+ /*
+ * if (field.z)
+ * ref[0..1] = tex(tc[0..1], sampler[0..1])
+ * else
+ * ref[0..1] = tex(tc[2..3], sampler[0..1])
+ */
+ ureg_CMP(shader, ureg_writemask(ref[1], TGSI_WRITEMASK_XY),
+ ureg_negate(ureg_scalar(ureg_src(field), TGSI_SWIZZLE_Y)),
+ tc[3], tc[2]);
+ ureg_TEX(shader, ref[0], TGSI_TEXTURE_2D, ureg_src(ref[0]), sampler[0]);
+ ureg_TEX(shader, ref[1], TGSI_TEXTURE_2D, ureg_src(ref[1]), sampler[1]);
+
+ ureg_LRP(shader, ureg_writemask(result, TGSI_WRITEMASK_XYZ), ureg_imm1f(shader, 0.5f),
+ ureg_src(ref[0]), ureg_src(ref[1]));
+
+ ureg_fixup_label(shader, bi_label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+ ureg_fixup_label(shader, intra_label, ureg_get_instruction_number(shader));
+ ureg_ENDIF(shader);
+
+ for (i = 0; i < 2; ++i)
+ ureg_release_temporary(shader, ref[i]);
+
+ return result;
+}
+
+static void *
+create_frag_shader(struct vl_mpeg12_mc_renderer *r)
+{
+ struct ureg_program *shader;
+ struct ureg_dst result;
+ struct ureg_dst field, texel;
+ struct ureg_dst fragment;
+
+ shader = ureg_create(TGSI_PROCESSOR_FRAGMENT);
+ if (!shader)
+ return NULL;
+
+ fragment = ureg_DECL_output(shader, TGSI_SEMANTIC_COLOR, 0);
+
+ field = calc_field(shader);
+ texel = fetch_ycbcr(r, shader, field);
+
+ result = fetch_ref(shader, field);
+
+ ureg_ADD(shader, ureg_writemask(fragment, TGSI_WRITEMASK_XYZ), ureg_src(texel), ureg_src(result));
+
+ ureg_release_temporary(shader, field);
+ ureg_release_temporary(shader, texel);
+ ureg_release_temporary(shader, result);
+ ureg_END(shader);
+
+ return ureg_create_shader_and_destroy(shader, r->pipe);
+}
+
+static bool
+init_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ struct pipe_sampler_state sampler;
+ struct pipe_rasterizer_state rs_state;
+ unsigned filters[5];
+ unsigned i;
+
+ assert(r);
+
+ r->viewport.scale[0] = r->buffer_width;
+ r->viewport.scale[1] = r->buffer_height;
+ r->viewport.scale[2] = 1;
+ r->viewport.scale[3] = 1;
+ r->viewport.translate[0] = 0;
+ r->viewport.translate[1] = 0;
+ r->viewport.translate[2] = 0;
+ r->viewport.translate[3] = 0;
+
+ r->fb_state.width = r->buffer_width;
+ r->fb_state.height = r->buffer_height;
+ r->fb_state.nr_cbufs = 1;
+ r->fb_state.zsbuf = NULL;
+
+ /* Luma filter */
+ filters[0] = PIPE_TEX_FILTER_NEAREST;
+ /* Chroma filters */
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_444 || true) { //TODO
+ filters[1] = PIPE_TEX_FILTER_NEAREST;
+ filters[2] = PIPE_TEX_FILTER_NEAREST;
+ }
+ else {
+ filters[1] = PIPE_TEX_FILTER_LINEAR;
+ filters[2] = PIPE_TEX_FILTER_LINEAR;
+ }
+ /* Fwd, bkwd ref filters */
+ filters[3] = PIPE_TEX_FILTER_LINEAR;
+ filters[4] = PIPE_TEX_FILTER_LINEAR;
+
+ for (i = 0; i < 5; ++i) {
+ memset(&sampler, 0, sizeof(sampler));
+ sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
+ sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_BORDER;
+ sampler.min_img_filter = filters[i];
+ sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ sampler.mag_img_filter = filters[i];
+ sampler.compare_mode = PIPE_TEX_COMPARE_NONE;
+ sampler.compare_func = PIPE_FUNC_ALWAYS;
+ sampler.normalized_coords = 1;
+ /*sampler.shadow_ambient = ; */
+ /*sampler.lod_bias = ; */
+ sampler.min_lod = 0;
+ /*sampler.max_lod = ; */
+ sampler.border_color[0] = 0.0f;
+ sampler.border_color[1] = 0.0f;
+ sampler.border_color[2] = 0.0f;
+ sampler.border_color[3] = 0.0f;
+ /*sampler.max_anisotropy = ; */
+ r->samplers.all[i] = r->pipe->create_sampler_state(r->pipe, &sampler);
+ }
+
+ memset(&rs_state, 0, sizeof(rs_state));
+ /*rs_state.sprite_coord_enable */
+ rs_state.sprite_coord_mode = PIPE_SPRITE_COORD_UPPER_LEFT;
+ rs_state.point_quad_rasterization = true;
+ rs_state.point_size = BLOCK_WIDTH;
+ rs_state.gl_rasterization_rules = true;
+ r->rs_state = r->pipe->create_rasterizer_state(r->pipe, &rs_state);
+
+ return true;
+}
+
+static void
+cleanup_pipe_state(struct vl_mpeg12_mc_renderer *r)
+{
+ unsigned i;
+
+ assert(r);
+
+ for (i = 0; i < 5; ++i)
+ r->pipe->delete_sampler_state(r->pipe, r->samplers.all[i]);
+
+ r->pipe->delete_rasterizer_state(r->pipe, r->rs_state);
+}
+
+static bool
+init_buffers(struct vl_mpeg12_mc_renderer *r)
+{
+ struct pipe_resource *idct_matrix;
+ struct pipe_vertex_element vertex_elems[NUM_VS_INPUTS];
+
+ const unsigned mbw =
+ align(r->buffer_width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
+ const unsigned mbh =
+ align(r->buffer_height, MACROBLOCK_HEIGHT) / MACROBLOCK_HEIGHT;
+
+ unsigned i, chroma_width, chroma_height;
+
+ assert(r);
+
+ r->macroblocks_per_batch =
+ mbw * (r->bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE ? mbh : 1);
+
+ if (!(idct_matrix = vl_idct_upload_matrix(r->pipe)))
+ return false;
+
+ if (!vl_idct_init(&r->idct_luma, r->pipe, r->buffer_width, r->buffer_height, idct_matrix))
+ return false;
+
+ if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ chroma_width = r->buffer_width / 2;
+ chroma_height = r->buffer_height / 2;
+ } else if (r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422) {
+ chroma_width = r->buffer_width;
+ chroma_height = r->buffer_height / 2;
+ } else {
+ chroma_width = r->buffer_width;
+ chroma_height = r->buffer_height;
+ }
+
+ if(!vl_idct_init(&r->idct_chroma, r->pipe, chroma_width, chroma_height, idct_matrix))
+ return false;
+
+ memset(&vertex_elems, 0, sizeof(vertex_elems));
+
+ vertex_elems[VS_I_RECT] = vl_vb_get_quad_vertex_element();
+ r->quad = vl_vb_upload_quads(r->pipe, r->macroblocks_per_batch);
+
+ /* Position element */
+ vertex_elems[VS_I_VPOS].src_format = PIPE_FORMAT_R16G16_SSCALED;
+
+ for (i = 0; i < 4; ++i)
+ /* motion vector 0..4 element */
+ vertex_elems[VS_I_MV0 + i].src_format = PIPE_FORMAT_R16G16_SSCALED;
+
+ /* y, cr, cb empty block element top left block */
+ vertex_elems[VS_I_EB_0_0].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ /* y, cr, cb empty block element top right block */
+ vertex_elems[VS_I_EB_0_1].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ /* y, cr, cb empty block element bottom left block */
+ vertex_elems[VS_I_EB_1_0].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ /* y, cr, cb empty block element bottom right block */
+ vertex_elems[VS_I_EB_1_1].src_format = PIPE_FORMAT_R8G8B8A8_SSCALED;
+
+ r->vertex_stream_stride = vl_vb_element_helper(&vertex_elems[VS_I_VPOS], 9, 1);
+
+ r->vertex_elems_state = r->pipe->create_vertex_elements_state(
+ r->pipe, NUM_VS_INPUTS, vertex_elems);
+
+ if (r->vertex_elems_state == NULL)
+ return false;
+
+ r->vs = create_vert_shader(r);
+ r->fs = create_frag_shader(r);
+
+ if (r->vs == NULL || r->fs == NULL)
+ return false;
+
+ return true;
+}
+
+static void
+cleanup_buffers(struct vl_mpeg12_mc_renderer *r)
+{
+ assert(r);
+
+ r->pipe->delete_vs_state(r->pipe, r->vs);
+ r->pipe->delete_fs_state(r->pipe, r->fs);
+
+ vl_idct_cleanup(&r->idct_luma);
+ vl_idct_cleanup(&r->idct_chroma);
+
+ r->pipe->delete_vertex_elements_state(r->pipe, r->vertex_elems_state);
+}
+
+static struct pipe_sampler_view
+*find_or_create_sampler_view(struct vl_mpeg12_mc_renderer *r, struct pipe_surface *surface)
+{
+ struct pipe_sampler_view *sampler_view;
+ assert(r);
+ assert(surface);
+
+ sampler_view = (struct pipe_sampler_view*)util_keymap_lookup(r->texview_map, &surface);
+ if (!sampler_view) {
+ struct pipe_sampler_view templat;
+ boolean added_to_map;
+
+ u_sampler_view_default_template(&templat, surface->texture,
+ surface->texture->format);
+ sampler_view = r->pipe->create_sampler_view(r->pipe, surface->texture,
+ &templat);
+ if (!sampler_view)
+ return NULL;
+
+ added_to_map = util_keymap_insert(r->texview_map, &surface,
+ sampler_view, r->pipe);
+ assert(added_to_map);
+ }
+
+ return sampler_view;
+}
+
+static void
+get_motion_vectors(struct pipe_mpeg12_macroblock *mb, struct vertex2s mv[4])
+{
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ {
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[2].x = mb->pmv[0][1][0];
+ mv[2].y = mb->pmv[0][1][1];
+
+ } else {
+ mv[2].x = mb->pmv[0][1][0];
+ mv[2].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
+
+ mv[3].x = mb->pmv[1][1][0];
+ mv[3].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
+
+ if(mb->mvfs[0][1]) mv[2].y += 2;
+ if(!mb->mvfs[1][1]) mv[3].y -= 2;
+ }
+
+ /* fall-through */
+ }
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ {
+ if (mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD) {
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[0].x = mb->pmv[0][1][0];
+ mv[0].y = mb->pmv[0][1][1];
+
+ } else {
+ mv[0].x = mb->pmv[0][1][0];
+ mv[0].y = mb->pmv[0][1][1] - (mb->pmv[0][1][1] % 4);
+
+ mv[1].x = mb->pmv[1][1][0];
+ mv[1].y = mb->pmv[1][1][1] - (mb->pmv[1][1][1] % 4);
+
+ if(mb->mvfs[0][1]) mv[0].y += 2;
+ if(!mb->mvfs[1][1]) mv[1].y -= 2;
+ }
+
+ } else {
+
+ if (mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME) {
+ mv[0].x = mb->pmv[0][0][0];
+ mv[0].y = mb->pmv[0][0][1];
+
+ } else {
+ mv[0].x = mb->pmv[0][0][0];
+ mv[0].y = mb->pmv[0][0][1] - (mb->pmv[0][0][1] % 4);
+
+ mv[1].x = mb->pmv[1][0][0];
+ mv[1].y = mb->pmv[1][0][1] - (mb->pmv[1][0][1] % 4);
+
+ if(mb->mvfs[0][0]) mv[0].y += 2;
+ if(!mb->mvfs[1][0]) mv[1].y -= 2;
+ }
+ }
+ }
+ default:
+ break;
+ }
+}
+
+static void
+grab_vectors(struct vl_mpeg12_mc_renderer *r,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ struct vertex_stream stream;
+
+ unsigned i, j;
+
+ assert(r);
+ assert(mb);
+
+ stream.pos.x = mb->mbx;
+ stream.pos.y = mb->mby;
+ for ( i = 0; i < 2; ++i) {
+ for ( j = 0; j < 2; ++j) {
+ stream.eb[i][j].y = !(mb->cbp & (*r->empty_block_mask)[0][i][j]);
+ stream.eb[i][j].cr = !(mb->cbp & (*r->empty_block_mask)[1][i][j]);
+ stream.eb[i][j].cb = !(mb->cbp & (*r->empty_block_mask)[2][i][j]);
+ }
+ }
+ stream.eb[0][0].flag = mb->dct_type == PIPE_MPEG12_DCT_TYPE_FIELD;
+ stream.eb[0][1].flag = mb->mo_type == PIPE_MPEG12_MOTION_TYPE_FRAME;
+ stream.eb[1][0].flag = mb->mb_type == PIPE_MPEG12_MACROBLOCK_TYPE_BKWD;
+ switch (mb->mb_type) {
+ case PIPE_MPEG12_MACROBLOCK_TYPE_INTRA:
+ stream.eb[1][1].flag = -1;
+ break;
+
+ case PIPE_MPEG12_MACROBLOCK_TYPE_FWD:
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BKWD:
+ stream.eb[1][1].flag = 1;
+ break;
+
+ case PIPE_MPEG12_MACROBLOCK_TYPE_BI:
+ stream.eb[1][1].flag = 0;
+ break;
+
+ default:
+ assert(0);
+ }
+
+ get_motion_vectors(mb, stream.mv);
+ vl_vb_add_block(&buffer->vertex_stream, &stream);
+}
+
+static void
+grab_blocks(struct vl_mpeg12_mc_renderer *r,
+ struct vl_mpeg12_mc_buffer *buffer,
+ unsigned mbx, unsigned mby,
+ unsigned cbp, short *blocks)
+{
+ unsigned tb = 0;
+ unsigned x, y;
+
+ assert(r);
+ assert(blocks);
+
+ for (y = 0; y < 2; ++y) {
+ for (x = 0; x < 2; ++x, ++tb) {
+ if (cbp & (*r->empty_block_mask)[0][y][x]) {
+ vl_idct_add_block(&buffer->idct_y, mbx * 2 + x, mby * 2 + y, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+ }
+
+ /* TODO: Implement 422, 444 */
+ assert(r->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+ for (tb = 1; tb < 3; ++tb) {
+ if (cbp & (*r->empty_block_mask)[tb][0][0]) {
+ if(tb == 1)
+ vl_idct_add_block(&buffer->idct_cb, mbx, mby, blocks);
+ else
+ vl_idct_add_block(&buffer->idct_cr, mbx, mby, blocks);
+ blocks += BLOCK_WIDTH * BLOCK_HEIGHT;
+ }
+ }
+}
+
+static void
+grab_macroblock(struct vl_mpeg12_mc_renderer *r,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_mpeg12_macroblock *mb)
+{
+ assert(r);
+ assert(mb);
+ assert(mb->blocks);
+ assert(buffer->num_macroblocks < r->macroblocks_per_batch);
+
+ grab_vectors(r, buffer, mb);
+ grab_blocks(r, buffer, mb->mbx, mb->mby, mb->cbp, mb->blocks);
+
+ ++buffer->num_macroblocks;
+}
+
+static void
+texview_map_delete(const struct keymap *map,
+ const void *key, void *data,
+ void *user)
+{
+ struct pipe_sampler_view *sv = (struct pipe_sampler_view*)data;
+
+ assert(map);
+ assert(key);
+ assert(data);
+ assert(user);
+
+ pipe_sampler_view_reference(&sv, NULL);
+}
+
+bool
+vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_context *pipe,
+ unsigned buffer_width,
+ unsigned buffer_height,
+ enum pipe_video_chroma_format chroma_format,
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode)
+{
+ assert(renderer);
+ assert(pipe);
+
+ /* TODO: Implement other policies */
+ assert(bufmode == VL_MPEG12_MC_RENDERER_BUFFER_PICTURE);
+
+ memset(renderer, 0, sizeof(struct vl_mpeg12_mc_renderer));
+
+ renderer->pipe = pipe;
+ renderer->buffer_width = buffer_width;
+ renderer->buffer_height = buffer_height;
+ renderer->chroma_format = chroma_format;
+ renderer->bufmode = bufmode;
+
+ /* TODO: Implement 422, 444 */
+ assert(chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+ renderer->empty_block_mask = &const_empty_block_mask_420;
+
+ renderer->texview_map = util_new_keymap(sizeof(struct pipe_surface*), -1,
+ texview_map_delete);
+ if (!renderer->texview_map)
+ return false;
+
+ if (!init_pipe_state(renderer))
+ goto error_pipe_state;
+
+ if (!init_buffers(renderer))
+ goto error_buffers;
+
+ return true;
+
+error_buffers:
+ cleanup_pipe_state(renderer);
+
+error_pipe_state:
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ return false;
+}
+
+void
+vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer)
+{
+ assert(renderer);
+
+ util_delete_keymap(renderer->texview_map, renderer->pipe);
+ cleanup_pipe_state(renderer);
+ cleanup_buffers(renderer);
+}
+
+bool
+vl_mpeg12_mc_init_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ struct pipe_resource template;
+ struct pipe_sampler_view sampler_view;
+
+ unsigned i;
+
+ assert(renderer && buffer);
+
+ buffer->surface = NULL;
+ buffer->past = NULL;
+ buffer->future = NULL;
+ buffer->num_macroblocks = 0;
+
+ memset(&template, 0, sizeof(struct pipe_resource));
+ template.target = PIPE_TEXTURE_2D;
+ /* TODO: Accomodate HW that can't do this and also for cases when this isn't precise enough */
+ template.format = PIPE_FORMAT_R16_SNORM;
+ template.last_level = 0;
+ template.width0 = renderer->buffer_width;
+ template.height0 = renderer->buffer_height;
+ template.depth0 = 1;
+ template.array_size = 1;
+ template.usage = PIPE_USAGE_STATIC;
+ template.bind = PIPE_BIND_SAMPLER_VIEW;
+ template.flags = 0;
+
+ buffer->textures.individual.y = renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
+
+ if (!vl_idct_init_buffer(&renderer->idct_luma, &buffer->idct_y, buffer->textures.individual.y))
+ return false;
+
+ if (renderer->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420) {
+ template.width0 = renderer->buffer_width / 2;
+ template.height0 = renderer->buffer_height / 2;
+ }
+ else if (renderer->chroma_format == PIPE_VIDEO_CHROMA_FORMAT_422)
+ template.height0 = renderer->buffer_height / 2;
+
+ buffer->textures.individual.cb =
+ renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
+ buffer->textures.individual.cr =
+ renderer->pipe->screen->resource_create(renderer->pipe->screen, &template);
+
+ if (!vl_idct_init_buffer(&renderer->idct_chroma, &buffer->idct_cb, buffer->textures.individual.cb))
+ return false;
+
+ if (!vl_idct_init_buffer(&renderer->idct_chroma, &buffer->idct_cr, buffer->textures.individual.cr))
+ return false;
+
+ for (i = 0; i < 3; ++i) {
+ u_sampler_view_default_template(&sampler_view,
+ buffer->textures.all[i],
+ buffer->textures.all[i]->format);
+ sampler_view.swizzle_r = i == 0 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_g = i == 1 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_b = i == 2 ? PIPE_SWIZZLE_RED : PIPE_SWIZZLE_ZERO;
+ sampler_view.swizzle_a = PIPE_SWIZZLE_ONE;
+ buffer->sampler_views.all[i] = renderer->pipe->create_sampler_view(
+ renderer->pipe, buffer->textures.all[i], &sampler_view);
+ }
+
+ buffer->vertex_bufs.individual.quad.stride = renderer->quad.stride;
+ buffer->vertex_bufs.individual.quad.buffer_offset = renderer->quad.buffer_offset;
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, renderer->quad.buffer);
+
+ buffer->vertex_bufs.individual.stream = vl_vb_init(
+ &buffer->vertex_stream, renderer->pipe, renderer->macroblocks_per_batch,
+ renderer->vertex_stream_stride);
+
+ return true;
+}
+
+void
+vl_mpeg12_mc_cleanup_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ unsigned i;
+
+ assert(renderer && buffer);
+
+ for (i = 0; i < 3; ++i) {
+ pipe_sampler_view_reference(&buffer->sampler_views.all[i], NULL);
+ pipe_resource_reference(&buffer->vertex_bufs.all[i].buffer, NULL);
+ pipe_resource_reference(&buffer->textures.all[i], NULL);
+ }
+
+ pipe_resource_reference(&buffer->vertex_bufs.individual.quad.buffer, NULL);
+ vl_vb_cleanup(&buffer->vertex_stream);
+
+ vl_idct_cleanup_buffer(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_cleanup_buffer(&renderer->idct_chroma, &buffer->idct_cb);
+ vl_idct_cleanup_buffer(&renderer->idct_chroma, &buffer->idct_cr);
+
+ pipe_surface_reference(&buffer->surface, NULL);
+ pipe_surface_reference(&buffer->past, NULL);
+ pipe_surface_reference(&buffer->future, NULL);
+}
+
+void
+vl_mpeg12_mc_map_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ assert(renderer && buffer);
+
+ vl_idct_map_buffers(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_map_buffers(&renderer->idct_chroma, &buffer->idct_cr);
+ vl_idct_map_buffers(&renderer->idct_chroma, &buffer->idct_cb);
+
+ vl_vb_map(&buffer->vertex_stream, renderer->pipe);
+}
+
+void
+vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer *renderer,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_surface *surface,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_mpeg12_macroblock *mpeg12_macroblocks,
+ struct pipe_fence_handle **fence)
+{
+ assert(renderer && buffer);
+ assert(surface);
+ assert(num_macroblocks);
+ assert(mpeg12_macroblocks);
+
+ if (surface != buffer->surface) {
+ pipe_surface_reference(&buffer->surface, surface);
+ pipe_surface_reference(&buffer->past, past);
+ pipe_surface_reference(&buffer->future, future);
+ buffer->fence = fence;
+ } else {
+ /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
+ assert(buffer->past == past);
+ assert(buffer->future == future);
+ }
+
+ while (num_macroblocks) {
+ unsigned left_in_batch = renderer->macroblocks_per_batch - buffer->num_macroblocks;
+ unsigned num_to_submit = MIN2(num_macroblocks, left_in_batch);
+ unsigned i;
+
+ for (i = 0; i < num_to_submit; ++i) {
+ assert(mpeg12_macroblocks[i].base.codec == PIPE_VIDEO_CODEC_MPEG12);
+ grab_macroblock(renderer, buffer, &mpeg12_macroblocks[i]);
+ }
+
+ num_macroblocks -= num_to_submit;
+
+ if (buffer->num_macroblocks == renderer->macroblocks_per_batch) {
+ vl_mpeg12_mc_unmap_buffer(renderer, buffer);
+ vl_mpeg12_mc_renderer_flush(renderer, buffer);
+ pipe_surface_reference(&buffer->surface, surface);
+ pipe_surface_reference(&buffer->past, past);
+ pipe_surface_reference(&buffer->future, future);
+ vl_mpeg12_mc_map_buffer(renderer, buffer);
+ }
+ }
+}
+
+void
+vl_mpeg12_mc_unmap_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ assert(renderer && buffer);
+
+ vl_idct_unmap_buffers(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_unmap_buffers(&renderer->idct_chroma, &buffer->idct_cr);
+ vl_idct_unmap_buffers(&renderer->idct_chroma, &buffer->idct_cb);
+
+ vl_vb_unmap(&buffer->vertex_stream, renderer->pipe);
+}
+
+void
+vl_mpeg12_mc_renderer_flush(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer)
+{
+ assert(renderer && buffer);
+ assert(buffer->num_macroblocks <= renderer->macroblocks_per_batch);
+
+ if (buffer->num_macroblocks == 0)
+ return;
+
+ vl_idct_flush(&renderer->idct_luma, &buffer->idct_y);
+ vl_idct_flush(&renderer->idct_chroma, &buffer->idct_cr);
+ vl_idct_flush(&renderer->idct_chroma, &buffer->idct_cb);
+
+ vl_vb_restart(&buffer->vertex_stream);
+
+ renderer->fb_state.cbufs[0] = buffer->surface;
+ renderer->pipe->bind_rasterizer_state(renderer->pipe, renderer->rs_state);
+ renderer->pipe->set_framebuffer_state(renderer->pipe, &renderer->fb_state);
+ renderer->pipe->set_viewport_state(renderer->pipe, &renderer->viewport);
+ renderer->pipe->set_vertex_buffers(renderer->pipe, 2, buffer->vertex_bufs.all);
+ renderer->pipe->bind_vertex_elements_state(renderer->pipe, renderer->vertex_elems_state);
+
+ if (buffer->past) {
+ buffer->textures.individual.ref[0] = buffer->past->texture;
+ buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->past);
+ } else {
+ buffer->textures.individual.ref[0] = buffer->surface->texture;
+ buffer->sampler_views.individual.ref[0] = find_or_create_sampler_view(renderer, buffer->surface);
+ }
+
+ if (buffer->future) {
+ buffer->textures.individual.ref[1] = buffer->future->texture;
+ buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->future);
+ } else {
+ buffer->textures.individual.ref[1] = buffer->surface->texture;
+ buffer->sampler_views.individual.ref[1] = find_or_create_sampler_view(renderer, buffer->surface);
+ }
+
+ renderer->pipe->set_fragment_sampler_views(renderer->pipe, 5, buffer->sampler_views.all);
+ renderer->pipe->bind_fragment_sampler_states(renderer->pipe, 5, renderer->samplers.all);
+
+ renderer->pipe->bind_vs_state(renderer->pipe, renderer->vs);
+ renderer->pipe->bind_fs_state(renderer->pipe, renderer->fs);
+ util_draw_arrays(renderer->pipe, PIPE_PRIM_QUADS, 0, buffer->num_macroblocks * 4);
+
+ renderer->pipe->flush(renderer->pipe, PIPE_FLUSH_RENDER_CACHE, buffer->fence);
+
+ /* Next time we get this surface it may have new ref frames */
+ pipe_surface_reference(&buffer->surface, NULL);
+ pipe_surface_reference(&buffer->past, NULL);
+ pipe_surface_reference(&buffer->future, NULL);
+
+ buffer->num_macroblocks = 0;
+}
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h
new file mode 100644
index 00000000000..76d6e25ca36
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_mc_renderer.h
@@ -0,0 +1,141 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_mpeg12_mc_renderer_h
+#define vl_mpeg12_mc_renderer_h
+
+#include <pipe/p_compiler.h>
+#include <pipe/p_state.h>
+#include <pipe/p_video_state.h>
+#include "vl_types.h"
+#include "vl_idct.h"
+#include "vl_vertex_buffers.h"
+
+struct pipe_context;
+struct pipe_macroblock;
+struct keymap;
+
+/* A slice is video-width (rounded up to a multiple of macroblock width) x macroblock height */
+enum VL_MPEG12_MC_RENDERER_BUFFER_MODE
+{
+ VL_MPEG12_MC_RENDERER_BUFFER_SLICE, /* Saves memory at the cost of smaller batches */
+ VL_MPEG12_MC_RENDERER_BUFFER_PICTURE /* Larger batches, more memory */
+};
+
+struct vl_mpeg12_mc_renderer
+{
+ struct pipe_context *pipe;
+ unsigned buffer_width;
+ unsigned buffer_height;
+ enum pipe_video_chroma_format chroma_format;
+ const unsigned (*empty_block_mask)[3][2][2];
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode;
+ unsigned macroblocks_per_batch;
+
+ unsigned vertex_stream_stride;
+
+ struct pipe_viewport_state viewport;
+ struct pipe_framebuffer_state fb_state;
+
+ struct vl_idct idct_luma, idct_chroma;
+
+ void *vertex_elems_state;
+ void *rs_state;
+
+ void *vs, *fs;
+
+ struct pipe_vertex_buffer quad;
+
+ union
+ {
+ void *all[5];
+ struct { void *y, *cb, *cr, *ref[2]; } individual;
+ } samplers;
+
+ struct keymap *texview_map;
+};
+
+struct vl_mpeg12_mc_buffer
+{
+ struct vl_idct_buffer idct_y, idct_cb, idct_cr;
+
+ struct vl_vertex_buffer vertex_stream;
+
+ union
+ {
+ struct pipe_sampler_view *all[5];
+ struct { struct pipe_sampler_view *y, *cb, *cr, *ref[2]; } individual;
+ } sampler_views;
+
+ union
+ {
+ struct pipe_resource *all[5];
+ struct { struct pipe_resource *y, *cb, *cr, *ref[2]; } individual;
+ } textures;
+
+ union
+ {
+ struct pipe_vertex_buffer all[2];
+ struct {
+ struct pipe_vertex_buffer quad, stream;
+ } individual;
+ } vertex_bufs;
+
+ struct pipe_surface *surface, *past, *future;
+ struct pipe_fence_handle **fence;
+ unsigned num_macroblocks;
+};
+
+bool vl_mpeg12_mc_renderer_init(struct vl_mpeg12_mc_renderer *renderer,
+ struct pipe_context *pipe,
+ unsigned picture_width,
+ unsigned picture_height,
+ enum pipe_video_chroma_format chroma_format,
+ enum VL_MPEG12_MC_RENDERER_BUFFER_MODE bufmode);
+
+void vl_mpeg12_mc_renderer_cleanup(struct vl_mpeg12_mc_renderer *renderer);
+
+bool vl_mpeg12_mc_init_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer);
+
+void vl_mpeg12_mc_cleanup_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer);
+
+void vl_mpeg12_mc_map_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer);
+
+void vl_mpeg12_mc_renderer_render_macroblocks(struct vl_mpeg12_mc_renderer *renderer,
+ struct vl_mpeg12_mc_buffer *buffer,
+ struct pipe_surface *surface,
+ struct pipe_surface *past,
+ struct pipe_surface *future,
+ unsigned num_macroblocks,
+ struct pipe_mpeg12_macroblock *mpeg12_macroblocks,
+ struct pipe_fence_handle **fence);
+
+void vl_mpeg12_mc_unmap_buffer(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer);
+
+void vl_mpeg12_mc_renderer_flush(struct vl_mpeg12_mc_renderer *renderer, struct vl_mpeg12_mc_buffer *buffer);
+
+#endif /* vl_mpeg12_mc_renderer_h */
diff --git a/src/gallium/auxiliary/vl/vl_types.h b/src/gallium/auxiliary/vl/vl_types.h
new file mode 100644
index 00000000000..9c745d73978
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_types.h
@@ -0,0 +1,51 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Younes Manton.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef vl_types_h
+#define vl_types_h
+
+struct vertex2f
+{
+ float x, y;
+};
+
+struct vertex2s
+{
+ short x, y;
+};
+
+struct vertex4f
+{
+ float x, y, z, w;
+};
+
+struct quadf
+{
+ struct vertex2f bl, tl, tr, br;
+};
+
+#endif /* vl_types_h */
diff --git a/src/gallium/auxiliary/vl/vl_vertex_buffers.c b/src/gallium/auxiliary/vl/vl_vertex_buffers.c
new file mode 100644
index 00000000000..552a0451fef
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_vertex_buffers.c
@@ -0,0 +1,183 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Christian König
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <assert.h>
+#include <pipe/p_context.h>
+#include <pipe/p_screen.h>
+#include <util/u_memory.h>
+#include <util/u_inlines.h>
+#include <util/u_format.h>
+#include "vl_vertex_buffers.h"
+#include "vl_types.h"
+
+/* vertices for a quad covering a block */
+static const struct quadf const_quad = {
+ {0.0f, 1.0f}, {0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}
+};
+
+struct pipe_vertex_buffer
+vl_vb_upload_quads(struct pipe_context *pipe, unsigned max_blocks)
+{
+ struct pipe_vertex_buffer quad;
+ struct pipe_transfer *buf_transfer;
+ struct quadf *v;
+
+ unsigned i;
+
+ assert(pipe);
+ assert(max_blocks);
+
+ /* create buffer */
+ quad.stride = sizeof(struct vertex2f);
+ quad.buffer_offset = 0;
+ quad.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STATIC,
+ sizeof(struct vertex2f) * 4 * max_blocks
+ );
+
+ if(!quad.buffer)
+ return quad;
+
+ /* and fill it */
+ v = pipe_buffer_map
+ (
+ pipe,
+ quad.buffer,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buf_transfer
+ );
+
+ for ( i = 0; i < max_blocks; ++i)
+ memcpy(v + i, &const_quad, sizeof(const_quad));
+
+ pipe_buffer_unmap(pipe, buf_transfer);
+
+ return quad;
+}
+
+struct pipe_vertex_element
+vl_vb_get_quad_vertex_element(void)
+{
+ struct pipe_vertex_element element;
+
+ /* setup rectangle element */
+ element.src_offset = 0;
+ element.instance_divisor = 0;
+ element.vertex_buffer_index = 0;
+ element.src_format = PIPE_FORMAT_R32G32_FLOAT;
+
+ return element;
+}
+
+unsigned
+vl_vb_element_helper(struct pipe_vertex_element* elements, unsigned num_elements,
+ unsigned vertex_buffer_index)
+{
+ unsigned i, offset = 0;
+
+ assert(elements && num_elements);
+
+ for ( i = 0; i < num_elements; ++i ) {
+ elements[i].src_offset = offset;
+ elements[i].instance_divisor = 0;
+ elements[i].vertex_buffer_index = vertex_buffer_index;
+ offset += util_format_get_blocksize(elements[i].src_format);
+ }
+
+ return offset;
+}
+
+struct pipe_vertex_buffer
+vl_vb_init(struct vl_vertex_buffer *buffer, struct pipe_context *pipe,
+ unsigned max_blocks, unsigned stride)
+{
+ struct pipe_vertex_buffer buf;
+
+ assert(buffer);
+
+ buffer->num_verts = 0;
+ buffer->stride = stride;
+
+ buf.stride = stride;
+ buf.buffer_offset = 0;
+ buf.buffer = pipe_buffer_create
+ (
+ pipe->screen,
+ PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STREAM,
+ stride * 4 * max_blocks
+ );
+
+ pipe_resource_reference(&buffer->resource, buf.buffer);
+
+ vl_vb_map(buffer, pipe);
+
+ return buf;
+}
+
+void
+vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
+{
+ assert(buffer && pipe);
+
+ buffer->vectors = pipe_buffer_map
+ (
+ pipe,
+ buffer->resource,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &buffer->transfer
+ );
+}
+
+void
+vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
+{
+ assert(buffer && pipe);
+
+ pipe_buffer_unmap(pipe, buffer->transfer);
+}
+
+unsigned
+vl_vb_restart(struct vl_vertex_buffer *buffer)
+{
+ assert(buffer);
+
+ unsigned todo = buffer->num_verts;
+ buffer->num_verts = 0;
+ return todo;
+}
+
+void
+vl_vb_cleanup(struct vl_vertex_buffer *buffer)
+{
+ assert(buffer);
+
+ pipe_resource_reference(&buffer->resource, NULL);
+}
diff --git a/src/gallium/auxiliary/vl/vl_vertex_buffers.h b/src/gallium/auxiliary/vl/vl_vertex_buffers.h
new file mode 100644
index 00000000000..b8e8766ec50
--- /dev/null
+++ b/src/gallium/auxiliary/vl/vl_vertex_buffers.h
@@ -0,0 +1,78 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Christian König
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef vl_vertex_buffers_h
+#define vl_vertex_buffers_h
+
+#include <assert.h>
+#include <pipe/p_state.h>
+#include "vl_types.h"
+
+struct vl_vertex_buffer
+{
+ unsigned num_verts;
+ unsigned stride;
+ struct pipe_resource *resource;
+ struct pipe_transfer *transfer;
+ void *vectors;
+};
+
+struct pipe_vertex_buffer vl_vb_upload_quads(struct pipe_context *pipe, unsigned max_blocks);
+
+struct pipe_vertex_element vl_vb_get_quad_vertex_element(void);
+
+unsigned vl_vb_element_helper(struct pipe_vertex_element* elements, unsigned num_elements,
+ unsigned vertex_buffer_index);
+
+struct pipe_vertex_buffer vl_vb_init(struct vl_vertex_buffer *buffer,
+ struct pipe_context *pipe,
+ unsigned max_blocks, unsigned stride);
+
+void vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe);
+
+static inline void
+vl_vb_add_block(struct vl_vertex_buffer *buffer, void *elements)
+{
+ void *pos;
+ unsigned i;
+
+ assert(buffer);
+
+ pos = buffer->vectors + buffer->num_verts * buffer->stride;
+ for(i = 0; i < 4; ++i) {
+ memcpy(pos, elements, buffer->stride);
+ pos += buffer->stride;
+ buffer->num_verts++;
+ }
+}
+
+void vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe);
+
+unsigned vl_vb_restart(struct vl_vertex_buffer *buffer);
+
+void vl_vb_cleanup(struct vl_vertex_buffer *buffer);
+
+#endif