aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--FIXME2
-rw-r--r--include/sys/kmem.h48
-rw-r--r--include/sys/time.h43
-rw-r--r--include/sys/types.h1
-rw-r--r--modules/spl/spl-kmem.c7
-rw-r--r--modules/spl/spl-time.c28
-rw-r--r--modules/splat/splat-kmem.c45
7 files changed, 146 insertions, 28 deletions
diff --git a/FIXME b/FIXME
index 0785df5f8..bba628c45 100644
--- a/FIXME
+++ b/FIXME
@@ -10,3 +10,5 @@ sys/acl.h _ All borrowed from libsolcompat
sys/acl_impl.h _ All borrowed from libsolcompat
* Implement solaris style atomic interfaces
+
+* Fully implement vnode support for ZPL layer to intergrate with VFS.
diff --git a/include/sys/kmem.h b/include/sys/kmem.h
index c5e559cbd..2560b380a 100644
--- a/include/sys/kmem.h
+++ b/include/sys/kmem.h
@@ -5,11 +5,12 @@
extern "C" {
#endif
-#undef DEBUG_KMEM
+#define DEBUG_KMEM
#undef DEBUG_KMEM_UNIMPLEMENTED
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
/*
@@ -23,12 +24,11 @@ extern "C" {
#define KM_FLAGS __GFP_BITS_MASK
#ifdef DEBUG_KMEM
-/* Shim layer memory accounting */
extern atomic_t kmem_alloc_used;
extern unsigned int kmem_alloc_max;
-#endif
+extern atomic_t vmem_alloc_used;
+extern unsigned int vmem_alloc_max;
-#ifdef DEBUG_KMEM
#define __kmem_alloc(size, flags, allocator) \
({ void *_ptr_; \
\
@@ -58,13 +58,40 @@ extern unsigned int kmem_alloc_max;
#define kmem_free(ptr, size) \
({ \
- BUG_ON(!ptr || size < 0); \
+ BUG_ON(!(ptr) || (size) < 0); \
atomic_sub((size), &kmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \
kfree(ptr); \
- (ptr) = (void *)0xdeadbeef; \
})
+#define __vmem_alloc(size, flags) \
+({ void *_ptr_; \
+ \
+ BUG_ON(flags != KM_SLEEP); \
+ \
+ _ptr_ = (void *)vmalloc((size)); \
+ if (_ptr_ == NULL) { \
+ printk("Warning: vmem_alloc(%d, 0x%x) failed at %s:%d " \
+ "(%d/%d)\n", (int)(size), (int)(flags), \
+ __FILE__, __LINE__, \
+ atomic_read(&vmem_alloc_used), vmem_alloc_max); \
+ atomic_add((size), &vmem_alloc_used); \
+ if (unlikely(atomic_read(&vmem_alloc_used) > vmem_alloc_max)) \
+ vmem_alloc_max = atomic_read(&vmem_alloc_used); \
+ } \
+ \
+ _ptr_; \
+})
+
+#define vmem_alloc(size, flags) __vmem_alloc(size, flags)
+
+#define vmem_free(ptr, size) \
+({ \
+ BUG_ON(!(ptr) || (size) < 0); \
+ atomic_sub((size), &vmem_alloc_used); \
+ memset(ptr, 0x5a, (size)); /* Poison */ \
+ vfree(ptr); \
+})
#else
@@ -72,10 +99,17 @@ extern unsigned int kmem_alloc_max;
#define kmem_zalloc(size, flags) kzalloc(size, flags)
#define kmem_free(ptr, size) \
({ \
- BUG_ON(!ptr || size < 0); \
+ BUG_ON(!(ptr) || (size) < 0); \
kfree(ptr); \
})
+#define vmem_alloc(size, flags) vmalloc(size)
+#define vmem_free(ptr, size) \
+({ \
+ BUG_ON(!(ptr) || (size) < 0); \
+ vfree(ptr); \
+})
+
#endif /* DEBUG_KMEM */
diff --git a/include/sys/time.h b/include/sys/time.h
index 297e6ff74..2f03203e6 100644
--- a/include/sys/time.h
+++ b/include/sys/time.h
@@ -14,11 +14,6 @@ extern "C" {
#include <linux/time.h>
#include <sys/types.h>
-extern unsigned long long monotonic_clock(void);
-extern void __gethrestime(timestruc_t *);
-
-#define gethrestime(ts) __gethrestime(ts)
-
#define TIME32_MAX INT32_MAX
#define TIME32_MIN INT32_MIN
@@ -27,12 +22,37 @@ extern void __gethrestime(timestruc_t *);
#define MICROSEC 1000000
#define NANOSEC 1000000000
+/* Already defined in include/linux/time.h */
+#undef CLOCK_THREAD_CPUTIME_ID
+#undef CLOCK_REALTIME
+#undef CLOCK_MONOTONIC
+#undef CLOCK_PROCESS_CPUTIME_ID
+
+typedef enum clock_type {
+ __CLOCK_REALTIME0 = 0, /* obsolete; same as CLOCK_REALTIME */
+ CLOCK_VIRTUAL = 1, /* thread's user-level CPU clock */
+ CLOCK_THREAD_CPUTIME_ID = 2, /* thread's user+system CPU clock */
+ CLOCK_REALTIME = 3, /* wall clock */
+ CLOCK_MONOTONIC = 4, /* high resolution monotonic clock */
+ CLOCK_PROCESS_CPUTIME_ID = 5, /* process's user+system CPU clock */
+ CLOCK_HIGHRES = CLOCK_MONOTONIC, /* alternate name */
+ CLOCK_PROF = CLOCK_THREAD_CPUTIME_ID,/* alternate name */
+} clock_type_t;
+
#define hz \
({ \
BUG_ON(HZ < 100 || HZ > MICROSEC); \
HZ; \
})
+extern void __gethrestime(timestruc_t *);
+extern int __clock_gettime(clock_type_t, timespec_t *);
+extern hrtime_t __gethrtime(void);
+
+#define gethrestime(ts) __gethrestime(ts)
+#define clock_gettime(fl, tp) __clock_gettime(fl, tp)
+#define gethrtime() __gethrtime()
+
static __inline__ time_t
gethrestime_sec(void)
{
@@ -42,19 +62,6 @@ gethrestime_sec(void)
return now.tv_sec;
}
-static __inline__ hrtime_t
-gethrtime(void) {
- /* BUG_ON(cur_timer == timer_none); */
-
- /* Solaris expects a long long here but monotonic_clock() returns an
- * unsigned long long. Note that monotonic_clock() returns the number
- * of nanoseconds passed since kernel initialization. Even for a signed
- * long long this will not "go negative" for ~292 years.
- */
- return monotonic_clock();
-}
-
-
#ifdef __cplusplus
}
#endif
diff --git a/include/sys/types.h b/include/sys/types.h
index 53ba29687..dc660a761 100644
--- a/include/sys/types.h
+++ b/include/sys/types.h
@@ -23,6 +23,7 @@ typedef struct task_struct kthread_t;
typedef struct vmem { } vmem_t;
typedef short pri_t;
typedef struct timespec timestruc_t; /* definition per SVr4 */
+typedef struct timespec timespec_t;
typedef longlong_t hrtime_t;
typedef unsigned short ushort_t;
typedef u_longlong_t len_t;
diff --git a/modules/spl/spl-kmem.c b/modules/spl/spl-kmem.c
index 6442d5824..d3cb2c93a 100644
--- a/modules/spl/spl-kmem.c
+++ b/modules/spl/spl-kmem.c
@@ -7,6 +7,13 @@
/* Shim layer memory accounting */
atomic_t kmem_alloc_used;
unsigned int kmem_alloc_max;
+atomic_t vmem_alloc_used;
+unsigned int vmem_alloc_max;
+
+EXPORT_SYMBOL(kmem_alloc_used);
+EXPORT_SYMBOL(kmem_alloc_max);
+EXPORT_SYMBOL(vmem_alloc_used);
+EXPORT_SYMBOL(vmem_alloc_max);
#endif
/*
diff --git a/modules/spl/spl-time.c b/modules/spl/spl-time.c
index f0ec4c5a0..64b7f9912 100644
--- a/modules/spl/spl-time.c
+++ b/modules/spl/spl-time.c
@@ -7,5 +7,31 @@ __gethrestime(timestruc_t *ts)
{
getnstimeofday((struct timespec *)ts);
}
-
EXPORT_SYMBOL(__gethrestime);
+
+int
+__clock_gettime(clock_type_t type, timespec_t *tp)
+{
+ /* Only support CLOCK_REALTIME+__CLOCK_REALTIME0 for now */
+ BUG_ON(!((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0)));
+
+ getnstimeofday(tp);
+ return 0;
+}
+EXPORT_SYMBOL(__clock_gettime);
+
+/* This function may not be as fast as using monotonic_clock() but it
+ * should be much more portable, if performance becomes as issue we can
+ * look at using monotonic_clock() for x86_64 and x86 arches.
+ */
+hrtime_t
+__gethrtime(void) {
+ timespec_t tv;
+ hrtime_t rc;
+
+ do_posix_clock_monotonic_gettime(&tv);
+ rc = (NSEC_PER_SEC * (hrtime_t)tv.tv_sec) + (hrtime_t)tv.tv_nsec;
+
+ return rc;
+}
+EXPORT_SYMBOL(__gethrtime);
diff --git a/modules/splat/splat-kmem.c b/modules/splat/splat-kmem.c
index 4b798df38..7115ca42c 100644
--- a/modules/splat/splat-kmem.c
+++ b/modules/splat/splat-kmem.c
@@ -20,7 +20,13 @@
#define SPLAT_KMEM_TEST4_NAME "slab_reap"
#define SPLAT_KMEM_TEST4_DESC "Slab reaping test"
+#define SPLAT_KMEM_TEST5_ID 0x0105
+#define SPLAT_KMEM_TEST5_NAME "vmem_alloc"
+#define SPLAT_KMEM_TEST5_DESC "Memory allocation test (vmem_alloc)"
+
#define SPLAT_KMEM_ALLOC_COUNT 10
+#define SPLAT_VMEM_ALLOC_COUNT 10
+
/* XXX - This test may fail under tight memory conditions */
static int
splat_kmem_test1(struct file *file, void *arg)
@@ -29,7 +35,7 @@ splat_kmem_test1(struct file *file, void *arg)
int size = PAGE_SIZE;
int i, count, rc = 0;
- while ((!rc) && (size < (PAGE_SIZE * 16))) {
+ while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
@@ -61,7 +67,7 @@ splat_kmem_test2(struct file *file, void *arg)
int size = PAGE_SIZE;
int i, j, count, rc = 0;
- while ((!rc) && (size < (PAGE_SIZE * 16))) {
+ while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
@@ -317,6 +323,38 @@ splat_kmem_test4(struct file *file, void *arg)
return rc;
}
+static int
+splat_kmem_test5(struct file *file, void *arg)
+{
+ void *ptr[SPLAT_VMEM_ALLOC_COUNT];
+ int size = PAGE_SIZE;
+ int i, count, rc = 0;
+
+ while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
+ count = 0;
+
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
+ ptr[i] = vmem_alloc(size, KM_SLEEP);
+ if (ptr[i])
+ count++;
+ }
+
+ for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
+ if (ptr[i])
+ vmem_free(ptr[i], size);
+
+ splat_vprint(file, SPLAT_KMEM_TEST5_NAME,
+ "%d byte allocations, %d/%d successful\n",
+ size, count, SPLAT_VMEM_ALLOC_COUNT);
+ if (count != SPLAT_VMEM_ALLOC_COUNT)
+ rc = -ENOMEM;
+
+ size *= 2;
+ }
+
+ return rc;
+}
+
splat_subsystem_t *
splat_kmem_init(void)
{
@@ -342,6 +380,8 @@ splat_kmem_init(void)
SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
+ SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
+ SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
return sub;
}
@@ -350,6 +390,7 @@ void
splat_kmem_fini(splat_subsystem_t *sub)
{
ASSERT(sub);
+ SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);