aboutsummaryrefslogtreecommitdiffstats
path: root/lib/libspl/include
diff options
context:
space:
mode:
authorRichard Yao <[email protected]>2022-09-30 18:30:12 -0400
committerGitHub <[email protected]>2022-09-30 15:30:12 -0700
commit55d7afa4adbb4ca569e9c4477a7d121f4dc0bfbd (patch)
tree25df8f99b24895d7fbf8aa44790068fa852600f7 /lib/libspl/include
parentdee8934e8fcd002eb826cf29aa453632d634b826 (diff)
Reduce false positives from Static Analyzers
Both Clang's Static Analyzer and Synopsys' Coverity would ignore assertions. Following Clang's advice, we annotate our assertions: https://clang-analyzer.llvm.org/annotations.html#custom_assertions This makes both Clang's Static Analyzer and Coverity properly identify assertions. This change reduced Clang's reported defects from 246 to 180. It also reduced the false positives reported by Coverityi by 10, while enabling Coverity to find 9 more defects that previously were false negatives. A couple examples of this would be CID-1524417 and CID-1524423. After submitting a build to coverity with the modified assertions, CID-1524417 disappeared while the report for CID-1524423 no longer claimed that the assertion tripped. Coincidentally, it turns out that it is possible to more accurately annotate our headers than the Coverity modelling file permits in the case of format strings. Since we can do that and this patch annotates headers whenever `__coverity_panic__()` would have been used in the model file, we drop all models that use `__coverity_panic__()` from the model file. Upon seeing the success in eliminating false positives involving assertions, it occurred to me that we could also modify our headers to eliminate coverity's false positives involving byte swaps. We now have coverity specific byteswap macros, that do nothing, to disable Coverity's false positives when we do byte swaps. This allowed us to also drop the byteswap definitions from the model file. Lastly, a model file update has been done beyond the mentioned deletions: * The definitions of `umem_alloc_aligned()`, `umem_alloc()` andi `umem_zalloc()` were originally implemented in a way that was intended to inform coverity that when KM_SLEEP has been passed these functions, they do not return NULL. A small error in how this was done was found, so we correct it. * Definitions for umem_cache_alloc() and umem_cache_free() have been added. In practice, no false positives were avoided by making these changes, but in the interest of correctness from future coverity builds, we make them anyway. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Richard Yao <[email protected]> Closes #13902
Diffstat (limited to 'lib/libspl/include')
-rw-r--r--lib/libspl/include/assert.h14
-rw-r--r--lib/libspl/include/os/freebsd/sys/byteorder.h14
-rw-r--r--lib/libspl/include/os/linux/sys/byteorder.h14
-rw-r--r--lib/libspl/include/umem.h3
4 files changed, 44 insertions, 1 deletions
diff --git a/lib/libspl/include/assert.h b/lib/libspl/include/assert.h
index aaaa0af09..c5bf0f0cc 100644
--- a/lib/libspl/include/assert.h
+++ b/lib/libspl/include/assert.h
@@ -34,12 +34,24 @@
#include <stdarg.h>
#include <sys/types.h>
+/* Workaround for non-Clang compilers */
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+/* We need to workaround libspl_set_assert_ok() that we have for zdb */
+#if __has_feature(attribute_analyzer_noreturn) || defined(__COVERITY__)
+#define NORETURN __attribute__((__noreturn__))
+#else
+#define NORETURN
+#endif
+
/* Set to non-zero to avoid abort()ing on an assertion failure */
extern void libspl_set_assert_ok(boolean_t val);
/* printf version of libspl_assert */
extern void libspl_assertf(const char *file, const char *func, int line,
- const char *format, ...);
+ const char *format, ...) NORETURN __attribute__((format(printf, 4, 5)));
static inline int
libspl_assert(const char *buf, const char *file, const char *func, int line)
diff --git a/lib/libspl/include/os/freebsd/sys/byteorder.h b/lib/libspl/include/os/freebsd/sys/byteorder.h
index 8de1104dc..bd6728820 100644
--- a/lib/libspl/include/os/freebsd/sys/byteorder.h
+++ b/lib/libspl/include/os/freebsd/sys/byteorder.h
@@ -59,6 +59,18 @@ extern "C" {
*/
#if !defined(_XPG4_2) || defined(__EXTENSIONS__)
+#ifdef __COVERITY__
+/*
+ * Coverity's taint warnings from byteswapping are false positives for us.
+ * Suppress them by hiding byteswapping from Coverity.
+ */
+#define BSWAP_8(x) ((x) & 0xff)
+#define BSWAP_16(x) ((x) & 0xffff)
+#define BSWAP_32(x) ((x) & 0xffffffff)
+#define BSWAP_64(x) (x)
+
+#else /* __COVERITY__ */
+
/*
* Macros to reverse byte order
*/
@@ -67,6 +79,8 @@ extern "C" {
#define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
#define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
+#endif /* __COVERITY__ */
+
#define BMASK_8(x) ((x) & 0xff)
#define BMASK_16(x) ((x) & 0xffff)
#define BMASK_32(x) ((x) & 0xffffffff)
diff --git a/lib/libspl/include/os/linux/sys/byteorder.h b/lib/libspl/include/os/linux/sys/byteorder.h
index c8413ea76..50f0f1e77 100644
--- a/lib/libspl/include/os/linux/sys/byteorder.h
+++ b/lib/libspl/include/os/linux/sys/byteorder.h
@@ -90,6 +90,18 @@ extern in_port_t ntohs(in_port_t);
#if !defined(_XPG4_2) || defined(__EXTENSIONS__)
+#ifdef __COVERITY__
+/*
+ * Coverity's taint warnings from byteswapping are false positives for us.
+ * Suppress them by hiding byteswapping from Coverity.
+ */
+#define BSWAP_8(x) ((x) & 0xff)
+#define BSWAP_16(x) ((x) & 0xffff)
+#define BSWAP_32(x) ((x) & 0xffffffff)
+#define BSWAP_64(x) (x)
+
+#else /* __COVERITY__ */
+
/*
* Macros to reverse byte order
*/
@@ -98,6 +110,8 @@ extern in_port_t ntohs(in_port_t);
#define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
#define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
+#endif /* __COVERITY__ */
+
#define BMASK_8(x) ((x) & 0xff)
#define BMASK_16(x) ((x) & 0xffff)
#define BMASK_32(x) ((x) & 0xffffffff)
diff --git a/lib/libspl/include/umem.h b/lib/libspl/include/umem.h
index 2a366e294..82976f756 100644
--- a/lib/libspl/include/umem.h
+++ b/lib/libspl/include/umem.h
@@ -83,6 +83,7 @@ const char *_umem_debug_init(void);
const char *_umem_options_init(void);
const char *_umem_logging_init(void);
+__attribute__((alloc_size(1)))
static inline void *
umem_alloc(size_t size, int flags)
{
@@ -95,6 +96,7 @@ umem_alloc(size_t size, int flags)
return (ptr);
}
+__attribute__((alloc_size(1)))
static inline void *
umem_alloc_aligned(size_t size, size_t align, int flags)
{
@@ -116,6 +118,7 @@ umem_alloc_aligned(size_t size, size_t align, int flags)
return (ptr);
}
+__attribute__((alloc_size(1)))
static inline void *
umem_zalloc(size_t size, int flags)
{