diff options
author | Tapani Pälli <[email protected]> | 2018-12-17 14:17:15 +0200 |
---|---|---|
committer | Tapani Pälli <[email protected]> | 2019-01-10 08:02:30 +0200 |
commit | 864cc419eb0a418827620afd42879698cb149088 (patch) | |
tree | 605050e71cb7ff460e88d35f70790169c0892891 /src/intel | |
parent | 406f603b347f554f9f796d22cb74dde48d6551d3 (diff) |
intel/isl: move tiled_memcpy static libs from i965 to isl
Patch moves intel_tiled_memcpy[_sse41] libraries to isl, renames some
functions and types and makes the required build system changes for
meson, automake and Android. No functional changes are introduced.
v2: code cleanups, move isl_get_memcpy_type to i965 (Jason)
v3: move isl_mem_copy_fn to priv header, cleanups (Jason, Dylan)
Signed-off-by: Tapani Pälli <[email protected]>
Reviewed-by: Jason Ekstrand <[email protected]>
Reviewed-by: Dylan Baker <[email protected]>
Acked-by: Kenneth Graunke <[email protected]>
Diffstat (limited to 'src/intel')
-rw-r--r-- | src/intel/Android.isl.mk | 51 | ||||
-rw-r--r-- | src/intel/Makefile.isl.am | 22 | ||||
-rw-r--r-- | src/intel/Makefile.sources | 9 | ||||
-rw-r--r-- | src/intel/isl/isl.c | 46 | ||||
-rw-r--r-- | src/intel/isl/isl.h | 32 | ||||
-rw-r--r-- | src/intel/isl/isl_priv.h | 39 | ||||
-rw-r--r-- | src/intel/isl/isl_tiled_memcpy.c | 1005 | ||||
-rw-r--r-- | src/intel/isl/isl_tiled_memcpy_normal.c | 59 | ||||
-rw-r--r-- | src/intel/isl/isl_tiled_memcpy_sse41.c | 60 | ||||
-rw-r--r-- | src/intel/isl/meson.build | 35 |
10 files changed, 1353 insertions, 5 deletions
diff --git a/src/intel/Android.isl.mk b/src/intel/Android.isl.mk index 23cff55d251..07a64b8ed1c 100644 --- a/src/intel/Android.isl.mk +++ b/src/intel/Android.isl.mk @@ -199,6 +199,47 @@ include $(MESA_COMMON_MK) include $(BUILD_STATIC_LIBRARY) # --------------------------------------- +# Build libmesa_isl_tiled_memcpy +# --------------------------------------- + +include $(CLEAR_VARS) + +LOCAL_MODULE := libmesa_isl_tiled_memcpy + +LOCAL_C_INCLUDES := \ + $(MESA_TOP)/src/gallium/include \ + $(MESA_TOP)/src/mapi \ + $(MESA_TOP)/src/mesa + +LOCAL_SRC_FILES := $(ISL_TILED_MEMCPY_FILES) + +include $(MESA_COMMON_MK) +include $(BUILD_STATIC_LIBRARY) + +# --------------------------------------- +# Build libmesa_isl_tiled_memcpy_sse41 +# --------------------------------------- + +ifeq ($(ARCH_X86_HAVE_SSE4_1),true) +include $(CLEAR_VARS) + +LOCAL_MODULE := libmesa_isl_tiled_memcpy_sse41 + +LOCAL_C_INCLUDES := \ + $(MESA_TOP)/src/gallium/include \ + $(MESA_TOP)/src/mapi \ + $(MESA_TOP)/src/mesa + +LOCAL_SRC_FILES := $(ISL_TILED_MEMCPY_SSE41_FILES) + +LOCAL_CFLAGS += \ + -DUSE_SSE41 -msse4.1 -mstackrealign + +include $(MESA_COMMON_MK) +include $(BUILD_STATIC_LIBRARY) +endif + +# --------------------------------------- # Build libmesa_isl # --------------------------------------- @@ -227,7 +268,15 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \ libmesa_isl_gen9 \ libmesa_isl_gen10 \ libmesa_isl_gen11 \ - libmesa_genxml + libmesa_genxml \ + libmesa_isl_tiled_memcpy + +ifeq ($(ARCH_X86_HAVE_SSE4_1),true) +LOCAL_CFLAGS += \ + -DUSE_SSE41 +LOCAL_WHOLE_STATIC_LIBRARIES += \ + libmesa_isl_tiled_memcpy_sse41 +endif # Autogenerated sources diff --git a/src/intel/Makefile.isl.am b/src/intel/Makefile.isl.am index f51294468cd..a6733f3ba8e 100644 --- a/src/intel/Makefile.isl.am +++ b/src/intel/Makefile.isl.am @@ -31,11 +31,26 @@ ISL_GEN_LIBS = \ isl/libisl-gen11.la \ $(NULL) -noinst_LTLIBRARIES += $(ISL_GEN_LIBS) isl/libisl.la +noinst_LTLIBRARIES += $(ISL_GEN_LIBS) \ + isl/libisl.la \ + libisl_tiled_memcpy.la \ + libisl_tiled_memcpy_sse41.la + +isl_libisl_la_LIBADD = $(ISL_GEN_LIBS) \ + libisl_tiled_memcpy.la \ + libisl_tiled_memcpy_sse41.la -isl_libisl_la_LIBADD = $(ISL_GEN_LIBS) isl_libisl_la_SOURCES = $(ISL_FILES) $(ISL_GENERATED_FILES) +libisl_tiled_memcpy_la_SOURCES = $(ISL_TILED_MEMCPY_FILES) +libisl_tiled_memcpy_la_CFLAGS = $(AM_CFLAGS) + +libisl_tiled_memcpy_sse41_la_SOURCES = $(ISL_TILED_MEMCPY_SSE41_FILES) +libisl_tiled_memcpy_sse41_la_CFLAGS = $(AM_CFLAGS) $(SSE41_CFLAGS) + +isl_tiled_memcpy_normal.c: $(ISL_TILED_MEMCPY_DEP_FILES) +isl_tiled_memcpy_sse41.c: $(ISL_TILED_MEMCPY_DEP_FILES) + isl_libisl_gen4_la_SOURCES = $(ISL_GEN4_FILES) isl_libisl_gen4_la_CFLAGS = $(AM_CFLAGS) -DGEN_VERSIONx10=40 @@ -90,4 +105,5 @@ isl_tests_isl_surf_get_image_offset_test_LDADD = \ EXTRA_DIST += \ isl/gen_format_layout.py \ isl/isl_format_layout.csv \ - isl/README + isl/README \ + $(ISL_TILED_MEMCPY_DEP_FILES) diff --git a/src/intel/Makefile.sources b/src/intel/Makefile.sources index fe06a57b42e..94a28d370e8 100644 --- a/src/intel/Makefile.sources +++ b/src/intel/Makefile.sources @@ -219,6 +219,15 @@ ISL_GEN11_FILES = \ ISL_GENERATED_FILES = \ isl/isl_format_layout.c +ISL_TILED_MEMCPY_FILES = \ + isl/isl_tiled_memcpy_normal.c + +ISL_TILED_MEMCPY_SSE41_FILES = \ + isl/isl_tiled_memcpy_sse41.c + +ISL_TILED_MEMCPY_DEP_FILES = \ + isl/isl_tiled_memcpy.c + VULKAN_FILES := \ vulkan/anv_allocator.c \ vulkan/anv_android.h \ diff --git a/src/intel/isl/isl.c b/src/intel/isl/isl.c index 359293cfcb2..7bb0fce3b60 100644 --- a/src/intel/isl/isl.c +++ b/src/intel/isl/isl.c @@ -35,6 +35,52 @@ #include "isl_gen9.h" #include "isl_priv.h" +void +isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ +#ifdef USE_SSE41 + if (copy_type == ISL_MEMCPY_STREAMING_LOAD) { + _isl_memcpy_linear_to_tiled_sse41( + xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, + tiling, copy_type); + return; + } +#endif + + _isl_memcpy_linear_to_tiled( + xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, + tiling, copy_type); +} + +void +isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ +#ifdef USE_SSE41 + if (copy_type == ISL_MEMCPY_STREAMING_LOAD) { + _isl_memcpy_tiled_to_linear_sse41( + xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, + tiling, copy_type); + return; + } +#endif + + _isl_memcpy_tiled_to_linear( + xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, has_swizzling, + tiling, copy_type); +} + void PRINTFLIKE(3, 4) UNUSED __isl_finishme(const char *file, int line, const char *fmt, ...) { diff --git a/src/intel/isl/isl.h b/src/intel/isl/isl.h index d53c69adbde..cfac922a3d2 100644 --- a/src/intel/isl/isl.h +++ b/src/intel/isl/isl.h @@ -949,6 +949,12 @@ enum isl_msaa_layout { ISL_MSAA_LAYOUT_ARRAY, }; +typedef enum { + ISL_MEMCPY = 0, + ISL_MEMCPY_BGRA8, + ISL_MEMCPY_STREAMING_LOAD, + ISL_MEMCPY_INVALID, +} isl_memcpy_type; struct isl_device { const struct gen_device_info *info; @@ -2065,6 +2071,32 @@ uint32_t isl_surf_get_depth_format(const struct isl_device *dev, const struct isl_surf *surf); +/** + * @brief performs a copy from linear to tiled surface + * + */ +void +isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type); + +/** + * @brief performs a copy from tiled to linear surface + * + */ +void +isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type); + #ifdef __cplusplus } #endif diff --git a/src/intel/isl/isl_priv.h b/src/intel/isl/isl_priv.h index 871518409ee..993ae13473d 100644 --- a/src/intel/isl/isl_priv.h +++ b/src/intel/isl/isl_priv.h @@ -25,6 +25,7 @@ #define ISL_PRIV_H #include <assert.h> +#include <stddef.h> #include <strings.h> #include "dev/gen_device_info.h" @@ -47,6 +48,8 @@ __isl_finishme(const char *file, int line, const char *fmt, ...); #define MIN(a, b) ((a) < (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) +typedef void *(*isl_mem_copy_fn)(void *dest, const void *src, size_t n); + static inline bool isl_is_pow2(uintmax_t n) { @@ -158,6 +161,42 @@ isl_extent3d_el_to_sa(enum isl_format fmt, struct isl_extent3d extent_el) }; } +void +_isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type); + +void +_isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type); + +void +_isl_memcpy_linear_to_tiled_sse41(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type); + +void +_isl_memcpy_tiled_to_linear_sse41(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type); + /* This is useful for adding the isl_prefix to genX functions */ #define __PASTE2(x, y) x ## y #define __PASTE(x, y) __PASTE2(x, y) diff --git a/src/intel/isl/isl_tiled_memcpy.c b/src/intel/isl/isl_tiled_memcpy.c new file mode 100644 index 00000000000..7df7835f9ab --- /dev/null +++ b/src/intel/isl/isl_tiled_memcpy.c @@ -0,0 +1,1005 @@ +/* + * Mesa 3-D graphics library + * + * Copyright 2012 Intel Corporation + * Copyright 2013 Google + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chad Versace <[email protected]> + * Frank Henigman <[email protected]> + */ + +#include <string.h> + +#include "util/macros.h" +#include "main/macros.h" + +#include "isl_priv.h" + +#if defined(__SSSE3__) +#include <tmmintrin.h> +#elif defined(__SSE2__) +#include <emmintrin.h> +#endif + +#define FILE_DEBUG_FLAG DEBUG_TEXTURE + +#define ALIGN_DOWN(a, b) ROUND_DOWN_TO(a, b) +#define ALIGN_UP(a, b) ALIGN(a, b) + +/* Tile dimensions. Width and span are in bytes, height is in pixels (i.e. + * unitless). A "span" is the most number of bytes we can copy from linear + * to tiled without needing to calculate a new destination address. + */ +static const uint32_t xtile_width = 512; +static const uint32_t xtile_height = 8; +static const uint32_t xtile_span = 64; +static const uint32_t ytile_width = 128; +static const uint32_t ytile_height = 32; +static const uint32_t ytile_span = 16; + +static inline uint32_t +ror(uint32_t n, uint32_t d) +{ + return (n >> d) | (n << (32 - d)); +} + +static inline uint32_t +bswap32(uint32_t n) +{ +#if defined(HAVE___BUILTIN_BSWAP32) + return __builtin_bswap32(n); +#else + return (n >> 24) | + ((n >> 8) & 0x0000ff00) | + ((n << 8) & 0x00ff0000) | + (n << 24); +#endif +} + +/** + * Copy RGBA to BGRA - swap R and B. + */ +static inline void * +rgba8_copy(void *dst, const void *src, size_t bytes) +{ + uint32_t *d = dst; + uint32_t const *s = src; + + assert(bytes % 4 == 0); + + while (bytes >= 4) { + *d = ror(bswap32(*s), 8); + d += 1; + s += 1; + bytes -= 4; + } + return dst; +} + +#ifdef __SSSE3__ +static const uint8_t rgba8_permutation[16] = + { 2,1,0,3, 6,5,4,7, 10,9,8,11, 14,13,12,15 }; + +static inline void +rgba8_copy_16_aligned_dst(void *dst, const void *src) +{ + _mm_store_si128(dst, + _mm_shuffle_epi8(_mm_loadu_si128(src), + *(__m128i *)rgba8_permutation)); +} + +static inline void +rgba8_copy_16_aligned_src(void *dst, const void *src) +{ + _mm_storeu_si128(dst, + _mm_shuffle_epi8(_mm_load_si128(src), + *(__m128i *)rgba8_permutation)); +} + +#elif defined(__SSE2__) +static inline void +rgba8_copy_16_aligned_dst(void *dst, const void *src) +{ + __m128i srcreg, dstreg, agmask, ag, rb, br; + + agmask = _mm_set1_epi32(0xFF00FF00); + srcreg = _mm_loadu_si128((__m128i *)src); + + rb = _mm_andnot_si128(agmask, srcreg); + ag = _mm_and_si128(agmask, srcreg); + br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)), + _MM_SHUFFLE(2, 3, 0, 1)); + dstreg = _mm_or_si128(ag, br); + + _mm_store_si128((__m128i *)dst, dstreg); +} + +static inline void +rgba8_copy_16_aligned_src(void *dst, const void *src) +{ + __m128i srcreg, dstreg, agmask, ag, rb, br; + + agmask = _mm_set1_epi32(0xFF00FF00); + srcreg = _mm_load_si128((__m128i *)src); + + rb = _mm_andnot_si128(agmask, srcreg); + ag = _mm_and_si128(agmask, srcreg); + br = _mm_shufflehi_epi16(_mm_shufflelo_epi16(rb, _MM_SHUFFLE(2, 3, 0, 1)), + _MM_SHUFFLE(2, 3, 0, 1)); + dstreg = _mm_or_si128(ag, br); + + _mm_storeu_si128((__m128i *)dst, dstreg); +} +#endif + +/** + * Copy RGBA to BGRA - swap R and B, with the destination 16-byte aligned. + */ +static inline void * +rgba8_copy_aligned_dst(void *dst, const void *src, size_t bytes) +{ + assert(bytes == 0 || !(((uintptr_t)dst) & 0xf)); + +#if defined(__SSSE3__) || defined(__SSE2__) + if (bytes == 64) { + rgba8_copy_16_aligned_dst(dst + 0, src + 0); + rgba8_copy_16_aligned_dst(dst + 16, src + 16); + rgba8_copy_16_aligned_dst(dst + 32, src + 32); + rgba8_copy_16_aligned_dst(dst + 48, src + 48); + return dst; + } + + while (bytes >= 16) { + rgba8_copy_16_aligned_dst(dst, src); + src += 16; + dst += 16; + bytes -= 16; + } +#endif + + rgba8_copy(dst, src, bytes); + + return dst; +} + +/** + * Copy RGBA to BGRA - swap R and B, with the source 16-byte aligned. + */ +static inline void * +rgba8_copy_aligned_src(void *dst, const void *src, size_t bytes) +{ + assert(bytes == 0 || !(((uintptr_t)src) & 0xf)); + +#if defined(__SSSE3__) || defined(__SSE2__) + if (bytes == 64) { + rgba8_copy_16_aligned_src(dst + 0, src + 0); + rgba8_copy_16_aligned_src(dst + 16, src + 16); + rgba8_copy_16_aligned_src(dst + 32, src + 32); + rgba8_copy_16_aligned_src(dst + 48, src + 48); + return dst; + } + + while (bytes >= 16) { + rgba8_copy_16_aligned_src(dst, src); + src += 16; + dst += 16; + bytes -= 16; + } +#endif + + rgba8_copy(dst, src, bytes); + + return dst; +} + +/** + * Each row from y0 to y1 is copied in three parts: [x0,x1), [x1,x2), [x2,x3). + * These ranges are in bytes, i.e. pixels * bytes-per-pixel. + * The first and last ranges must be shorter than a "span" (the longest linear + * stretch within a tile) and the middle must equal a whole number of spans. + * Ranges may be empty. The region copied must land entirely within one tile. + * 'dst' is the start of the tile and 'src' is the corresponding + * address to copy from, though copying begins at (x0, y0). + * To enable swizzling 'swizzle_bit' must be 1<<6, otherwise zero. + * Swizzling flips bit 6 in the copy destination offset, when certain other + * bits are set in it. + */ +typedef void (*tile_copy_fn)(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t linear_pitch, + uint32_t swizzle_bit, + isl_memcpy_type copy_type); + +/** + * Copy texture data from linear to X tile layout. + * + * \copydoc tile_copy_fn + * + * The mem_copy parameters allow the user to specify an alternative mem_copy + * function that, for instance, may do RGBA -> BGRA swizzling. The first + * function must handle any memory alignment while the second function must + * only handle 16-byte alignment in whichever side (source or destination) is + * tiled. + */ +static inline void +linear_to_xtiled(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t src_pitch, + uint32_t swizzle_bit, + isl_mem_copy_fn mem_copy, + isl_mem_copy_fn mem_copy_align16) +{ + /* The copy destination offset for each range copied is the sum of + * an X offset 'x0' or 'xo' and a Y offset 'yo.' + */ + uint32_t xo, yo; + + src += (ptrdiff_t)y0 * src_pitch; + + for (yo = y0 * xtile_width; yo < y1 * xtile_width; yo += xtile_width) { + /* Bits 9 and 10 of the copy destination offset control swizzling. + * Only 'yo' contributes to those bits in the total offset, + * so calculate 'swizzle' just once per row. + * Move bits 9 and 10 three and four places respectively down + * to bit 6 and xor them. + */ + uint32_t swizzle = ((yo >> 3) ^ (yo >> 4)) & swizzle_bit; + + mem_copy(dst + ((x0 + yo) ^ swizzle), src + x0, x1 - x0); + + for (xo = x1; xo < x2; xo += xtile_span) { + mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + xo, xtile_span); + } + + mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2); + + src += src_pitch; + } +} + +/** + * Copy texture data from linear to Y tile layout. + * + * \copydoc tile_copy_fn + */ +static inline void +linear_to_ytiled(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y3, + char *dst, const char *src, + int32_t src_pitch, + uint32_t swizzle_bit, + isl_mem_copy_fn mem_copy, + isl_mem_copy_fn mem_copy_align16) +{ + /* Y tiles consist of columns that are 'ytile_span' wide (and the same height + * as the tile). Thus the destination offset for (x,y) is the sum of: + * (x % column_width) // position within column + * (x / column_width) * bytes_per_column // column number * bytes per column + * y * column_width + * + * The copy destination offset for each range copied is the sum of + * an X offset 'xo0' or 'xo' and a Y offset 'yo.' + */ + const uint32_t column_width = ytile_span; + const uint32_t bytes_per_column = column_width * ytile_height; + + uint32_t y1 = MIN2(y3, ALIGN_UP(y0, 4)); + uint32_t y2 = MAX2(y1, ALIGN_DOWN(y3, 4)); + + uint32_t xo0 = (x0 % ytile_span) + (x0 / ytile_span) * bytes_per_column; + uint32_t xo1 = (x1 % ytile_span) + (x1 / ytile_span) * bytes_per_column; + + /* Bit 9 of the destination offset control swizzling. + * Only the X offset contributes to bit 9 of the total offset, + * so swizzle can be calculated in advance for these X positions. + * Move bit 9 three places down to bit 6. + */ + uint32_t swizzle0 = (xo0 >> 3) & swizzle_bit; + uint32_t swizzle1 = (xo1 >> 3) & swizzle_bit; + + uint32_t x, yo; + + src += (ptrdiff_t)y0 * src_pitch; + + if (y0 != y1) { + for (yo = y0 * column_width; yo < y1 * column_width; yo += column_width) { + uint32_t xo = xo1; + uint32_t swizzle = swizzle1; + + mem_copy(dst + ((xo0 + yo) ^ swizzle0), src + x0, x1 - x0); + + /* Step by spans/columns. As it happens, the swizzle bit flips + * at each step so we don't need to calculate it explicitly. + */ + for (x = x1; x < x2; x += ytile_span) { + mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x, ytile_span); + xo += bytes_per_column; + swizzle ^= swizzle_bit; + } + + mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2); + + src += src_pitch; + } + } + + for (yo = y1 * column_width; yo < y2 * column_width; yo += 4 * column_width) { + uint32_t xo = xo1; + uint32_t swizzle = swizzle1; + + if (x0 != x1) { + mem_copy(dst + ((xo0 + yo + 0 * column_width) ^ swizzle0), src + x0 + 0 * src_pitch, x1 - x0); + mem_copy(dst + ((xo0 + yo + 1 * column_width) ^ swizzle0), src + x0 + 1 * src_pitch, x1 - x0); + mem_copy(dst + ((xo0 + yo + 2 * column_width) ^ swizzle0), src + x0 + 2 * src_pitch, x1 - x0); + mem_copy(dst + ((xo0 + yo + 3 * column_width) ^ swizzle0), src + x0 + 3 * src_pitch, x1 - x0); + } + + /* Step by spans/columns. As it happens, the swizzle bit flips + * at each step so we don't need to calculate it explicitly. + */ + for (x = x1; x < x2; x += ytile_span) { + mem_copy_align16(dst + ((xo + yo + 0 * column_width) ^ swizzle), src + x + 0 * src_pitch, ytile_span); + mem_copy_align16(dst + ((xo + yo + 1 * column_width) ^ swizzle), src + x + 1 * src_pitch, ytile_span); + mem_copy_align16(dst + ((xo + yo + 2 * column_width) ^ swizzle), src + x + 2 * src_pitch, ytile_span); + mem_copy_align16(dst + ((xo + yo + 3 * column_width) ^ swizzle), src + x + 3 * src_pitch, ytile_span); + xo += bytes_per_column; + swizzle ^= swizzle_bit; + } + + if (x2 != x3) { + mem_copy_align16(dst + ((xo + yo + 0 * column_width) ^ swizzle), src + x2 + 0 * src_pitch, x3 - x2); + mem_copy_align16(dst + ((xo + yo + 1 * column_width) ^ swizzle), src + x2 + 1 * src_pitch, x3 - x2); + mem_copy_align16(dst + ((xo + yo + 2 * column_width) ^ swizzle), src + x2 + 2 * src_pitch, x3 - x2); + mem_copy_align16(dst + ((xo + yo + 3 * column_width) ^ swizzle), src + x2 + 3 * src_pitch, x3 - x2); + } + + src += 4 * src_pitch; + } + + if (y2 != y3) { + for (yo = y2 * column_width; yo < y3 * column_width; yo += column_width) { + uint32_t xo = xo1; + uint32_t swizzle = swizzle1; + + mem_copy(dst + ((xo0 + yo) ^ swizzle0), src + x0, x1 - x0); + + /* Step by spans/columns. As it happens, the swizzle bit flips + * at each step so we don't need to calculate it explicitly. + */ + for (x = x1; x < x2; x += ytile_span) { + mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x, ytile_span); + xo += bytes_per_column; + swizzle ^= swizzle_bit; + } + + mem_copy_align16(dst + ((xo + yo) ^ swizzle), src + x2, x3 - x2); + + src += src_pitch; + } + } +} + +/** + * Copy texture data from X tile layout to linear. + * + * \copydoc tile_copy_fn + */ +static inline void +xtiled_to_linear(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t dst_pitch, + uint32_t swizzle_bit, + isl_mem_copy_fn mem_copy, + isl_mem_copy_fn mem_copy_align16) +{ + /* The copy destination offset for each range copied is the sum of + * an X offset 'x0' or 'xo' and a Y offset 'yo.' + */ + uint32_t xo, yo; + + dst += (ptrdiff_t)y0 * dst_pitch; + + for (yo = y0 * xtile_width; yo < y1 * xtile_width; yo += xtile_width) { + /* Bits 9 and 10 of the copy destination offset control swizzling. + * Only 'yo' contributes to those bits in the total offset, + * so calculate 'swizzle' just once per row. + * Move bits 9 and 10 three and four places respectively down + * to bit 6 and xor them. + */ + uint32_t swizzle = ((yo >> 3) ^ (yo >> 4)) & swizzle_bit; + + mem_copy(dst + x0, src + ((x0 + yo) ^ swizzle), x1 - x0); + + for (xo = x1; xo < x2; xo += xtile_span) { + mem_copy_align16(dst + xo, src + ((xo + yo) ^ swizzle), xtile_span); + } + + mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2); + + dst += dst_pitch; + } +} + + /** + * Copy texture data from Y tile layout to linear. + * + * \copydoc tile_copy_fn + */ +static inline void +ytiled_to_linear(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y3, + char *dst, const char *src, + int32_t dst_pitch, + uint32_t swizzle_bit, + isl_mem_copy_fn mem_copy, + isl_mem_copy_fn mem_copy_align16) +{ + /* Y tiles consist of columns that are 'ytile_span' wide (and the same height + * as the tile). Thus the destination offset for (x,y) is the sum of: + * (x % column_width) // position within column + * (x / column_width) * bytes_per_column // column number * bytes per column + * y * column_width + * + * The copy destination offset for each range copied is the sum of + * an X offset 'xo0' or 'xo' and a Y offset 'yo.' + */ + const uint32_t column_width = ytile_span; + const uint32_t bytes_per_column = column_width * ytile_height; + + uint32_t y1 = MIN2(y3, ALIGN_UP(y0, 4)); + uint32_t y2 = MAX2(y1, ALIGN_DOWN(y3, 4)); + + uint32_t xo0 = (x0 % ytile_span) + (x0 / ytile_span) * bytes_per_column; + uint32_t xo1 = (x1 % ytile_span) + (x1 / ytile_span) * bytes_per_column; + + /* Bit 9 of the destination offset control swizzling. + * Only the X offset contributes to bit 9 of the total offset, + * so swizzle can be calculated in advance for these X positions. + * Move bit 9 three places down to bit 6. + */ + uint32_t swizzle0 = (xo0 >> 3) & swizzle_bit; + uint32_t swizzle1 = (xo1 >> 3) & swizzle_bit; + + uint32_t x, yo; + + dst += (ptrdiff_t)y0 * dst_pitch; + + if (y0 != y1) { + for (yo = y0 * column_width; yo < y1 * column_width; yo += column_width) { + uint32_t xo = xo1; + uint32_t swizzle = swizzle1; + + mem_copy(dst + x0, src + ((xo0 + yo) ^ swizzle0), x1 - x0); + + /* Step by spans/columns. As it happens, the swizzle bit flips + * at each step so we don't need to calculate it explicitly. + */ + for (x = x1; x < x2; x += ytile_span) { + mem_copy_align16(dst + x, src + ((xo + yo) ^ swizzle), ytile_span); + xo += bytes_per_column; + swizzle ^= swizzle_bit; + } + + mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2); + + dst += dst_pitch; + } + } + + for (yo = y1 * column_width; yo < y2 * column_width; yo += 4 * column_width) { + uint32_t xo = xo1; + uint32_t swizzle = swizzle1; + + if (x0 != x1) { + mem_copy(dst + x0 + 0 * dst_pitch, src + ((xo0 + yo + 0 * column_width) ^ swizzle0), x1 - x0); + mem_copy(dst + x0 + 1 * dst_pitch, src + ((xo0 + yo + 1 * column_width) ^ swizzle0), x1 - x0); + mem_copy(dst + x0 + 2 * dst_pitch, src + ((xo0 + yo + 2 * column_width) ^ swizzle0), x1 - x0); + mem_copy(dst + x0 + 3 * dst_pitch, src + ((xo0 + yo + 3 * column_width) ^ swizzle0), x1 - x0); + } + + /* Step by spans/columns. As it happens, the swizzle bit flips + * at each step so we don't need to calculate it explicitly. + */ + for (x = x1; x < x2; x += ytile_span) { + mem_copy_align16(dst + x + 0 * dst_pitch, src + ((xo + yo + 0 * column_width) ^ swizzle), ytile_span); + mem_copy_align16(dst + x + 1 * dst_pitch, src + ((xo + yo + 1 * column_width) ^ swizzle), ytile_span); + mem_copy_align16(dst + x + 2 * dst_pitch, src + ((xo + yo + 2 * column_width) ^ swizzle), ytile_span); + mem_copy_align16(dst + x + 3 * dst_pitch, src + ((xo + yo + 3 * column_width) ^ swizzle), ytile_span); + xo += bytes_per_column; + swizzle ^= swizzle_bit; + } + + if (x2 != x3) { + mem_copy_align16(dst + x2 + 0 * dst_pitch, src + ((xo + yo + 0 * column_width) ^ swizzle), x3 - x2); + mem_copy_align16(dst + x2 + 1 * dst_pitch, src + ((xo + yo + 1 * column_width) ^ swizzle), x3 - x2); + mem_copy_align16(dst + x2 + 2 * dst_pitch, src + ((xo + yo + 2 * column_width) ^ swizzle), x3 - x2); + mem_copy_align16(dst + x2 + 3 * dst_pitch, src + ((xo + yo + 3 * column_width) ^ swizzle), x3 - x2); + } + + dst += 4 * dst_pitch; + } + + if (y2 != y3) { + for (yo = y2 * column_width; yo < y3 * column_width; yo += column_width) { + uint32_t xo = xo1; + uint32_t swizzle = swizzle1; + + mem_copy(dst + x0, src + ((xo0 + yo) ^ swizzle0), x1 - x0); + + /* Step by spans/columns. As it happens, the swizzle bit flips + * at each step so we don't need to calculate it explicitly. + */ + for (x = x1; x < x2; x += ytile_span) { + mem_copy_align16(dst + x, src + ((xo + yo) ^ swizzle), ytile_span); + xo += bytes_per_column; + swizzle ^= swizzle_bit; + } + + mem_copy_align16(dst + x2, src + ((xo + yo) ^ swizzle), x3 - x2); + + dst += dst_pitch; + } + } +} + +#if defined(INLINE_SSE41) +static ALWAYS_INLINE void * +_memcpy_streaming_load(void *dest, const void *src, size_t count) +{ + if (count == 16) { + __m128i val = _mm_stream_load_si128((__m128i *)src); + _mm_storeu_si128((__m128i *)dest, val); + return dest; + } else if (count == 64) { + __m128i val0 = _mm_stream_load_si128(((__m128i *)src) + 0); + __m128i val1 = _mm_stream_load_si128(((__m128i *)src) + 1); + __m128i val2 = _mm_stream_load_si128(((__m128i *)src) + 2); + __m128i val3 = _mm_stream_load_si128(((__m128i *)src) + 3); + _mm_storeu_si128(((__m128i *)dest) + 0, val0); + _mm_storeu_si128(((__m128i *)dest) + 1, val1); + _mm_storeu_si128(((__m128i *)dest) + 2, val2); + _mm_storeu_si128(((__m128i *)dest) + 3, val3); + return dest; + } else { + assert(count < 64); /* and (count < 16) for ytiled */ + return memcpy(dest, src, count); + } +} +#endif + +static isl_mem_copy_fn +choose_copy_function(isl_memcpy_type copy_type) +{ + switch(copy_type) { + case ISL_MEMCPY: + return memcpy; + case ISL_MEMCPY_BGRA8: + return rgba8_copy; + case ISL_MEMCPY_STREAMING_LOAD: +#if defined(INLINE_SSE41) + return _memcpy_streaming_load; +#else + unreachable("ISL_MEMCOPY_STREAMING_LOAD requires sse4.1"); +#endif + case ISL_MEMCPY_INVALID: + unreachable("invalid copy_type"); + } + unreachable("unhandled copy_type"); + return NULL; +} + +/** + * Copy texture data from linear to X tile layout, faster. + * + * Same as \ref linear_to_xtiled but faster, because it passes constant + * parameters for common cases, allowing the compiler to inline code + * optimized for those cases. + * + * \copydoc tile_copy_fn + */ +static FLATTEN void +linear_to_xtiled_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t src_pitch, + uint32_t swizzle_bit, + isl_memcpy_type copy_type) +{ + isl_mem_copy_fn mem_copy = choose_copy_function(copy_type); + + if (x0 == 0 && x3 == xtile_width && y0 == 0 && y1 == xtile_height) { + if (mem_copy == memcpy) + return linear_to_xtiled(0, 0, xtile_width, xtile_width, 0, xtile_height, + dst, src, src_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return linear_to_xtiled(0, 0, xtile_width, xtile_width, 0, xtile_height, + dst, src, src_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_dst); + else + unreachable("not reached"); + } else { + if (mem_copy == memcpy) + return linear_to_xtiled(x0, x1, x2, x3, y0, y1, + dst, src, src_pitch, swizzle_bit, + memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return linear_to_xtiled(x0, x1, x2, x3, y0, y1, + dst, src, src_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_dst); + else + unreachable("not reached"); + } + linear_to_xtiled(x0, x1, x2, x3, y0, y1, + dst, src, src_pitch, swizzle_bit, mem_copy, mem_copy); +} + +/** + * Copy texture data from linear to Y tile layout, faster. + * + * Same as \ref linear_to_ytiled but faster, because it passes constant + * parameters for common cases, allowing the compiler to inline code + * optimized for those cases. + * + * \copydoc tile_copy_fn + */ +static FLATTEN void +linear_to_ytiled_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t src_pitch, + uint32_t swizzle_bit, + isl_memcpy_type copy_type) +{ + isl_mem_copy_fn mem_copy = choose_copy_function(copy_type); + + if (x0 == 0 && x3 == ytile_width && y0 == 0 && y1 == ytile_height) { + if (mem_copy == memcpy) + return linear_to_ytiled(0, 0, ytile_width, ytile_width, 0, ytile_height, + dst, src, src_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return linear_to_ytiled(0, 0, ytile_width, ytile_width, 0, ytile_height, + dst, src, src_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_dst); + else + unreachable("not reached"); + } else { + if (mem_copy == memcpy) + return linear_to_ytiled(x0, x1, x2, x3, y0, y1, + dst, src, src_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return linear_to_ytiled(x0, x1, x2, x3, y0, y1, + dst, src, src_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_dst); + else + unreachable("not reached"); + } + linear_to_ytiled(x0, x1, x2, x3, y0, y1, + dst, src, src_pitch, swizzle_bit, mem_copy, mem_copy); +} + +/** + * Copy texture data from X tile layout to linear, faster. + * + * Same as \ref xtile_to_linear but faster, because it passes constant + * parameters for common cases, allowing the compiler to inline code + * optimized for those cases. + * + * \copydoc tile_copy_fn + */ +static FLATTEN void +xtiled_to_linear_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t dst_pitch, + uint32_t swizzle_bit, + isl_memcpy_type copy_type) +{ + isl_mem_copy_fn mem_copy = choose_copy_function(copy_type); + + if (x0 == 0 && x3 == xtile_width && y0 == 0 && y1 == xtile_height) { + if (mem_copy == memcpy) + return xtiled_to_linear(0, 0, xtile_width, xtile_width, 0, xtile_height, + dst, src, dst_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return xtiled_to_linear(0, 0, xtile_width, xtile_width, 0, xtile_height, + dst, src, dst_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_src); +#if defined(INLINE_SSE41) + else if (mem_copy == _memcpy_streaming_load) + return xtiled_to_linear(0, 0, xtile_width, xtile_width, 0, xtile_height, + dst, src, dst_pitch, swizzle_bit, + memcpy, _memcpy_streaming_load); +#endif + else + unreachable("not reached"); + } else { + if (mem_copy == memcpy) + return xtiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return xtiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_src); +#if defined(INLINE_SSE41) + else if (mem_copy == _memcpy_streaming_load) + return xtiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, + memcpy, _memcpy_streaming_load); +#endif + else + unreachable("not reached"); + } + xtiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, mem_copy, mem_copy); +} + +/** + * Copy texture data from Y tile layout to linear, faster. + * + * Same as \ref ytile_to_linear but faster, because it passes constant + * parameters for common cases, allowing the compiler to inline code + * optimized for those cases. + * + * \copydoc tile_copy_fn + */ +static FLATTEN void +ytiled_to_linear_faster(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t y0, uint32_t y1, + char *dst, const char *src, + int32_t dst_pitch, + uint32_t swizzle_bit, + isl_memcpy_type copy_type) +{ + isl_mem_copy_fn mem_copy = choose_copy_function(copy_type); + + if (x0 == 0 && x3 == ytile_width && y0 == 0 && y1 == ytile_height) { + if (mem_copy == memcpy) + return ytiled_to_linear(0, 0, ytile_width, ytile_width, 0, ytile_height, + dst, src, dst_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return ytiled_to_linear(0, 0, ytile_width, ytile_width, 0, ytile_height, + dst, src, dst_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_src); +#if defined(INLINE_SSE41) + else if (copy_type == ISL_MEMCPY_STREAMING_LOAD) + return ytiled_to_linear(0, 0, ytile_width, ytile_width, 0, ytile_height, + dst, src, dst_pitch, swizzle_bit, + memcpy, _memcpy_streaming_load); +#endif + else + unreachable("not reached"); + } else { + if (mem_copy == memcpy) + return ytiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, memcpy, memcpy); + else if (mem_copy == rgba8_copy) + return ytiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, + rgba8_copy, rgba8_copy_aligned_src); +#if defined(INLINE_SSE41) + else if (copy_type == ISL_MEMCPY_STREAMING_LOAD) + return ytiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, + memcpy, _memcpy_streaming_load); +#endif + else + unreachable("not reached"); + } + ytiled_to_linear(x0, x1, x2, x3, y0, y1, + dst, src, dst_pitch, swizzle_bit, mem_copy, mem_copy); +} + +/** + * Copy from linear to tiled texture. + * + * Divide the region given by X range [xt1, xt2) and Y range [yt1, yt2) into + * pieces that do not cross tile boundaries and copy each piece with a tile + * copy function (\ref tile_copy_fn). + * The X range is in bytes, i.e. pixels * bytes-per-pixel. + * The Y range is in pixels (i.e. unitless). + * 'dst' is the address of (0, 0) in the destination tiled texture. + * 'src' is the address of (xt1, yt1) in the source linear texture. + */ +static void +intel_linear_to_tiled(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ + tile_copy_fn tile_copy; + uint32_t xt0, xt3; + uint32_t yt0, yt3; + uint32_t xt, yt; + uint32_t tw, th, span; + uint32_t swizzle_bit = has_swizzling ? 1<<6 : 0; + + if (tiling == ISL_TILING_X) { + tw = xtile_width; + th = xtile_height; + span = xtile_span; + tile_copy = linear_to_xtiled_faster; + } else if (tiling == ISL_TILING_Y0) { + tw = ytile_width; + th = ytile_height; + span = ytile_span; + tile_copy = linear_to_ytiled_faster; + } else { + unreachable("unsupported tiling"); + } + + /* Round out to tile boundaries. */ + xt0 = ALIGN_DOWN(xt1, tw); + xt3 = ALIGN_UP (xt2, tw); + yt0 = ALIGN_DOWN(yt1, th); + yt3 = ALIGN_UP (yt2, th); + + /* Loop over all tiles to which we have something to copy. + * 'xt' and 'yt' are the origin of the destination tile, whether copying + * copying a full or partial tile. + * tile_copy() copies one tile or partial tile. + * Looping x inside y is the faster memory access pattern. + */ + for (yt = yt0; yt < yt3; yt += th) { + for (xt = xt0; xt < xt3; xt += tw) { + /* The area to update is [x0,x3) x [y0,y1). + * May not want the whole tile, hence the min and max. + */ + uint32_t x0 = MAX2(xt1, xt); + uint32_t y0 = MAX2(yt1, yt); + uint32_t x3 = MIN2(xt2, xt + tw); + uint32_t y1 = MIN2(yt2, yt + th); + + /* [x0,x3) is split into [x0,x1), [x1,x2), [x2,x3) such that + * the middle interval is the longest span-aligned part. + * The sub-ranges could be empty. + */ + uint32_t x1, x2; + x1 = ALIGN_UP(x0, span); + if (x1 > x3) + x1 = x2 = x3; + else + x2 = ALIGN_DOWN(x3, span); + + assert(x0 <= x1 && x1 <= x2 && x2 <= x3); + assert(x1 - x0 < span && x3 - x2 < span); + assert(x3 - x0 <= tw); + assert((x2 - x1) % span == 0); + + /* Translate by (xt,yt) for single-tile copier. */ + tile_copy(x0-xt, x1-xt, x2-xt, x3-xt, + y0-yt, y1-yt, + dst + (ptrdiff_t)xt * th + (ptrdiff_t)yt * dst_pitch, + src + (ptrdiff_t)xt - xt1 + ((ptrdiff_t)yt - yt1) * src_pitch, + src_pitch, + swizzle_bit, + copy_type); + } + } +} + +/** + * Copy from tiled to linear texture. + * + * Divide the region given by X range [xt1, xt2) and Y range [yt1, yt2) into + * pieces that do not cross tile boundaries and copy each piece with a tile + * copy function (\ref tile_copy_fn). + * The X range is in bytes, i.e. pixels * bytes-per-pixel. + * The Y range is in pixels (i.e. unitless). + * 'dst' is the address of (xt1, yt1) in the destination linear texture. + * 'src' is the address of (0, 0) in the source tiled texture. + */ +static void +intel_tiled_to_linear(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ + tile_copy_fn tile_copy; + uint32_t xt0, xt3; + uint32_t yt0, yt3; + uint32_t xt, yt; + uint32_t tw, th, span; + uint32_t swizzle_bit = has_swizzling ? 1<<6 : 0; + + if (tiling == ISL_TILING_X) { + tw = xtile_width; + th = xtile_height; + span = xtile_span; + tile_copy = xtiled_to_linear_faster; + } else if (tiling == ISL_TILING_Y0) { + tw = ytile_width; + th = ytile_height; + span = ytile_span; + tile_copy = ytiled_to_linear_faster; + } else { + unreachable("unsupported tiling"); + } + +#if defined(INLINE_SSE41) + if (copy_type == ISL_MEMCPY_STREAMING_LOAD) { + /* The hidden cacheline sized register used by movntdqa can apparently + * give you stale data, so do an mfence to invalidate it. + */ + _mm_mfence(); + } +#endif + + /* Round out to tile boundaries. */ + xt0 = ALIGN_DOWN(xt1, tw); + xt3 = ALIGN_UP (xt2, tw); + yt0 = ALIGN_DOWN(yt1, th); + yt3 = ALIGN_UP (yt2, th); + + /* Loop over all tiles to which we have something to copy. + * 'xt' and 'yt' are the origin of the destination tile, whether copying + * copying a full or partial tile. + * tile_copy() copies one tile or partial tile. + * Looping x inside y is the faster memory access pattern. + */ + for (yt = yt0; yt < yt3; yt += th) { + for (xt = xt0; xt < xt3; xt += tw) { + /* The area to update is [x0,x3) x [y0,y1). + * May not want the whole tile, hence the min and max. + */ + uint32_t x0 = MAX2(xt1, xt); + uint32_t y0 = MAX2(yt1, yt); + uint32_t x3 = MIN2(xt2, xt + tw); + uint32_t y1 = MIN2(yt2, yt + th); + + /* [x0,x3) is split into [x0,x1), [x1,x2), [x2,x3) such that + * the middle interval is the longest span-aligned part. + * The sub-ranges could be empty. + */ + uint32_t x1, x2; + x1 = ALIGN_UP(x0, span); + if (x1 > x3) + x1 = x2 = x3; + else + x2 = ALIGN_DOWN(x3, span); + + assert(x0 <= x1 && x1 <= x2 && x2 <= x3); + assert(x1 - x0 < span && x3 - x2 < span); + assert(x3 - x0 <= tw); + assert((x2 - x1) % span == 0); + + /* Translate by (xt,yt) for single-tile copier. */ + tile_copy(x0-xt, x1-xt, x2-xt, x3-xt, + y0-yt, y1-yt, + dst + (ptrdiff_t)xt - xt1 + ((ptrdiff_t)yt - yt1) * dst_pitch, + src + (ptrdiff_t)xt * th + (ptrdiff_t)yt * src_pitch, + dst_pitch, + swizzle_bit, + copy_type); + } + } +} diff --git a/src/intel/isl/isl_tiled_memcpy_normal.c b/src/intel/isl/isl_tiled_memcpy_normal.c new file mode 100644 index 00000000000..d55b93d04a0 --- /dev/null +++ b/src/intel/isl/isl_tiled_memcpy_normal.c @@ -0,0 +1,59 @@ +/* + * Mesa 3-D graphics library + * + * Copyright 2012 Intel Corporation + * Copyright 2013 Google + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chad Versace <[email protected]> + * Frank Henigman <[email protected]> + */ + + +#include "isl_tiled_memcpy.c" + +void +_isl_memcpy_linear_to_tiled(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ + intel_linear_to_tiled(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, + has_swizzling, tiling, copy_type); +} + +void +_isl_memcpy_tiled_to_linear(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ + intel_tiled_to_linear(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, + has_swizzling, tiling, copy_type); +} diff --git a/src/intel/isl/isl_tiled_memcpy_sse41.c b/src/intel/isl/isl_tiled_memcpy_sse41.c new file mode 100644 index 00000000000..684a8a8dfa6 --- /dev/null +++ b/src/intel/isl/isl_tiled_memcpy_sse41.c @@ -0,0 +1,60 @@ +/* + * Mesa 3-D graphics library + * + * Copyright 2012 Intel Corporation + * Copyright 2013 Google + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chad Versace <[email protected]> + * Frank Henigman <[email protected]> + */ + +#define INLINE_SSE41 + +#include "isl_tiled_memcpy.c" + +void +_isl_memcpy_linear_to_tiled_sse41(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + uint32_t dst_pitch, int32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ + intel_linear_to_tiled(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, + has_swizzling, tiling, copy_type); +} + +void +_isl_memcpy_tiled_to_linear_sse41(uint32_t xt1, uint32_t xt2, + uint32_t yt1, uint32_t yt2, + char *dst, const char *src, + int32_t dst_pitch, uint32_t src_pitch, + bool has_swizzling, + enum isl_tiling tiling, + isl_memcpy_type copy_type) +{ + intel_tiled_to_linear(xt1, xt2, yt1, yt2, dst, src, dst_pitch, src_pitch, + has_swizzling, tiling, copy_type); +} diff --git a/src/intel/isl/meson.build b/src/intel/isl/meson.build index a6374f6c4f3..79eb6686059 100644 --- a/src/intel/isl/meson.build +++ b/src/intel/isl/meson.build @@ -69,6 +69,39 @@ isl_format_layout_c = custom_target( command : [prog_python, '@INPUT0@', '--csv', '@INPUT1@', '--out', '@OUTPUT@'], ) +files_isl_tiled_memcpy = files( + 'isl_tiled_memcpy_normal.c' +) + +files_isl_tiled_memcpy_sse41 = files( + 'isl_tiled_memcpy_sse41.c', +) + +isl_tiled_memcpy = static_library( + 'isl_tiled_memcpy', + [files_isl_tiled_memcpy], + include_directories : [ + inc_common, inc_intel, inc_drm_uapi, + ], + c_args : [c_vis_args, no_override_init_args, '-msse2'], + extra_files : ['isl_tiled_memcpy.c'] +) + +if with_sse41 + isl_tiled_memcpy_sse41 = static_library( + 'isl_tiled_memcpy_sse41', + [files_isl_tiled_memcpy_sse41], + include_directories : [ + inc_common, inc_intel, inc_drm_uapi, + ], + link_args : ['-Wl,--exclude-libs=ALL'], + c_args : [c_vis_args, no_override_init_args, '-msse2', sse41_args], + extra_files : ['isl_tiled_memcpy.c'] + ) +else + isl_tiled_memcpy_sse41 = [] +endif + libisl_files = files( 'isl.c', 'isl.h', @@ -83,7 +116,7 @@ libisl = static_library( 'isl', [libisl_files, isl_format_layout_c, genX_bits_h], include_directories : [inc_common, inc_intel, inc_drm_uapi], - link_with : isl_gen_libs, + link_with : [isl_gen_libs, isl_tiled_memcpy, isl_tiled_memcpy_sse41], c_args : [c_vis_args, no_override_init_args], ) |