MAX_FROM_BITS() computes the maximum value representable in a number of
bits. The expression for that is an unsigned value, but we explicitly cast
it to a signed int. It looks like this is because one of the main users is
for FD_REF_MAX, which is used to bound fd values, typically stored as a
signed int.
The value MAX_FROM_BITS() is calculating is naturally non-negative, though,
so it makes more sense for it to be unsigned, and to move the case to the
definition of FD_REF_MAX.
Signed-off-by: David Gibson
---
passt.h | 2 +-
util.h | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/passt.h b/passt.h
index 3f5dfb9..0fce637 100644
--- a/passt.h
+++ b/passt.h
@@ -87,7 +87,7 @@ union epoll_ref {
struct {
enum epoll_type type:8;
#define FD_REF_BITS 24
-#define FD_REF_MAX MAX_FROM_BITS(FD_REF_BITS)
+#define FD_REF_MAX ((int)MAX_FROM_BITS(FD_REF_BITS))
int32_t fd:FD_REF_BITS;
union {
union tcp_epoll_ref tcp;
diff --git a/util.h b/util.h
index 78a8fb2..b1106e8 100644
--- a/util.h
+++ b/util.h
@@ -42,7 +42,7 @@
#define ROUND_DOWN(x, y) ((x) & ~((y) - 1))
#define ROUND_UP(x, y) (((x) + (y) - 1) & ~((y) - 1))
-#define MAX_FROM_BITS(n) ((int)((1U << (n)) - 1))
+#define MAX_FROM_BITS(n) (((1U << (n)) - 1))
#define BIT(n) (1UL << (n))
#define BITMAP_BIT(n) (BIT((n) % (sizeof(long) * 8)))
--
2.43.0