diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index 97ae1e90712f0..10a855ffbea61 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -76,7 +76,7 @@ void sched_lock(void) * integer type. */ - DEBUGASSERT(rtcb && rtcb->lockcount < MAX_LOCK_COUNT); + DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); /* A counter is used to support locking. This allows nested lock * operations on this thread (on any CPU) diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index da75abe6bf116..23bb37271240e 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -64,7 +64,7 @@ void sched_unlock(void) /* rtcb may be NULL only during early boot-up phases */ - DEBUGASSERT(rtcb && rtcb->lockcount > 0); + DEBUGASSERT(rtcb->lockcount > 0); /* Check if the lock counter has decremented to zero. If so, * then pre-emption has been re-enabled.