aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/coverity
diff options
context:
space:
mode:
authorRichard Yao <[email protected]>2022-09-30 18:30:12 -0400
committerGitHub <[email protected]>2022-09-30 15:30:12 -0700
commit55d7afa4adbb4ca569e9c4477a7d121f4dc0bfbd (patch)
tree25df8f99b24895d7fbf8aa44790068fa852600f7 /contrib/coverity
parentdee8934e8fcd002eb826cf29aa453632d634b826 (diff)
Reduce false positives from Static Analyzers
Both Clang's Static Analyzer and Synopsys' Coverity would ignore assertions. Following Clang's advice, we annotate our assertions: https://clang-analyzer.llvm.org/annotations.html#custom_assertions This makes both Clang's Static Analyzer and Coverity properly identify assertions. This change reduced Clang's reported defects from 246 to 180. It also reduced the false positives reported by Coverityi by 10, while enabling Coverity to find 9 more defects that previously were false negatives. A couple examples of this would be CID-1524417 and CID-1524423. After submitting a build to coverity with the modified assertions, CID-1524417 disappeared while the report for CID-1524423 no longer claimed that the assertion tripped. Coincidentally, it turns out that it is possible to more accurately annotate our headers than the Coverity modelling file permits in the case of format strings. Since we can do that and this patch annotates headers whenever `__coverity_panic__()` would have been used in the model file, we drop all models that use `__coverity_panic__()` from the model file. Upon seeing the success in eliminating false positives involving assertions, it occurred to me that we could also modify our headers to eliminate coverity's false positives involving byte swaps. We now have coverity specific byteswap macros, that do nothing, to disable Coverity's false positives when we do byte swaps. This allowed us to also drop the byteswap definitions from the model file. Lastly, a model file update has been done beyond the mentioned deletions: * The definitions of `umem_alloc_aligned()`, `umem_alloc()` andi `umem_zalloc()` were originally implemented in a way that was intended to inform coverity that when KM_SLEEP has been passed these functions, they do not return NULL. A small error in how this was done was found, so we correct it. * Definitions for umem_cache_alloc() and umem_cache_free() have been added. In practice, no false positives were avoided by making these changes, but in the interest of correctness from future coverity builds, we make them anyway. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Richard Yao <[email protected]> Closes #13902
Diffstat (limited to 'contrib/coverity')
-rw-r--r--contrib/coverity/model.c121
1 files changed, 29 insertions, 92 deletions
diff --git a/contrib/coverity/model.c b/contrib/coverity/model.c
index d27abd038..8baa3a7e2 100644
--- a/contrib/coverity/model.c
+++ b/contrib/coverity/model.c
@@ -31,66 +31,6 @@
int condition0, condition1;
-void
-abort()
-{
- __coverity_panic__();
-}
-
-void
-exit(int status)
-{
- (void) status;
-
- __coverity_panic__();
-}
-
-void
-_exit(int status)
-{
- (void) status;
-
- __coverity_panic__();
-}
-
-void
-zed_log_die(const char *fmt, ...)
-{
- __coverity_format_string_sink__(fmt);
- __coverity_panic__();
-}
-
-void
-panic(const char *fmt, ...)
-{
- __coverity_format_string_sink__(fmt);
- __coverity_panic__();
-}
-
-void
-vpanic(const char *fmt, va_list adx)
-{
- (void) adx;
-
- __coverity_format_string_sink__(fmt);
- __coverity_panic__();
-}
-
-void
-uu_panic(const char *format, ...)
-{
- __coverity_format_string_sink__(format);
- __coverity_panic__();
-}
-
-int
-libspl_assertf(const char *file, const char *func, int line,
- const char *format, ...)
-{
- __coverity_format_string_sink__(format);
- __coverity_panic__();
-}
-
int
ddi_copyin(const void *from, void *to, size_t len, int flags)
{
@@ -125,7 +65,7 @@ umem_alloc_aligned(size_t size, size_t align, int kmflags)
{
(void) align;
- if (UMEM_NOFAIL & kmflags == UMEM_NOFAIL)
+ if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
return (__coverity_alloc__(size));
else if (condition0)
return (__coverity_alloc__(size));
@@ -136,7 +76,7 @@ umem_alloc_aligned(size_t size, size_t align, int kmflags)
void *
umem_alloc(size_t size, int kmflags)
{
- if (UMEM_NOFAIL & kmflags == UMEM_NOFAIL)
+ if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
return (__coverity_alloc__(size));
else if (condition0)
return (__coverity_alloc__(size));
@@ -147,7 +87,7 @@ umem_alloc(size_t size, int kmflags)
void *
umem_zalloc(size_t size, int kmflags)
{
- if (UMEM_NOFAIL & kmflags == UMEM_NOFAIL)
+ if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
return (__coverity_alloc__(size));
else if (condition0)
return (__coverity_alloc__(size));
@@ -163,6 +103,32 @@ umem_free(void *buf, size_t size)
__coverity_free__(buf);
}
+typedef struct {} umem_cache_t;
+
+void *
+umem_cache_alloc(umem_cache_t *skc, int flags)
+{
+ (void) skc;
+
+ if (condition1)
+ __coverity_sleep__();
+
+ if ((UMEM_NOFAIL & flags) == UMEM_NOFAIL)
+ return (__coverity_alloc_nosize__());
+ else if (condition0)
+ return (__coverity_alloc_nosize__());
+ else
+ return (NULL);
+}
+
+void
+umem_cache_free(umem_cache_t *skc, void *obj)
+{
+ (void) skc;
+
+ __coverity_free__(obj);
+}
+
void *
spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
{
@@ -244,13 +210,6 @@ free(void *buf)
}
int
-spl_panic(const char *file, const char *func, int line, const char *fmt, ...)
-{
- __coverity_format_string_sink__(fmt);
- __coverity_panic__();
-}
-
-int
sched_yield(void)
{
__coverity_sleep__();
@@ -398,25 +357,3 @@ __cond_resched(void)
__coverity_sleep__();
}
}
-
-/*
- * An endian-independent filesystem must support doing byte swaps on data. We
- * attempt to suppress taint warnings, which are false positives for us.
- */
-void
-byteswap_uint64_array(void *vbuf, size_t size)
-{
- __coverity_tainted_data_sanitize__(vbuf);
-}
-
-void
-byteswap_uint32_array(void *vbuf, size_t size)
-{
- __coverity_tainted_data_sanitize__(vbuf);
-}
-
-void
-byteswap_uint16_array(void *vbuf, size_t size)
-{
- __coverity_tainted_data_sanitize__(vbuf);
-}