summaryrefslogtreecommitdiffstats
path: root/module/zfs/lzjb.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2010-08-26 10:58:19 -0700
committerBrian Behlendorf <[email protected]>2010-08-31 08:38:49 -0700
commit18a89ba43d3e5e8a31d50838c93ec26d1cb27429 (patch)
treeb2961db9af4af3138f29a146a7b14c743b285c6b /module/zfs/lzjb.c
parentbf701a83c5ec192be6d3afe87ebeee45ce9127f4 (diff)
Fix stack lzjb
Reduce kernel stack usage by lzjb_compress() by moving uint16 array off the stack and on to the heap. The exact performance implications of this I have not measured but we absolutely need to keep stack usage to a minimum. If/when this becomes and issue we optimize. Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs/lzjb.c')
-rw-r--r--module/zfs/lzjb.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/module/zfs/lzjb.c b/module/zfs/lzjb.c
index ad9e423f8..4da30cf17 100644
--- a/module/zfs/lzjb.c
+++ b/module/zfs/lzjb.c
@@ -36,7 +36,7 @@
* source length if compression would overflow the destination buffer.
*/
-#include <sys/types.h>
+#include <sys/zfs_context.h>
#define MATCH_BITS 6
#define MATCH_MIN 3
@@ -54,12 +54,15 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
int copymask = 1 << (NBBY - 1);
int mlen, offset, hash;
uint16_t *hp;
- uint16_t lempel[LEMPEL_SIZE] = { 0 };
+ uint16_t *lempel;
+ lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) {
- if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY)
+ if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
+ kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t));
return (s_len);
+ }
copymask = 1;
copymap = dst;
*dst++ = 0;
@@ -89,6 +92,8 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
*dst++ = *src++;
}
}
+
+ kmem_free(lempel, LEMPEL_SIZE * sizeof (uint16_t));
return (dst - (uchar_t *)d_start);
}