struct {
u8 blocked;
u8 need_qs;
- u8 exp_need_qs;
-
- /* Otherwise the compiler can store garbage here: */
- u8 pad;
} b; /* Bits. */
- u32 s; /* Set of bits. */
+ u16 s; /* Set of bits. */
};
enum perf_event_task_context {
* no need to check for a subsequent expedited GP. (Though we are
* still in a quiescent state in any case.)
*/
- if (blkd_state & RCU_EXP_BLKD &&
- t->rcu_read_unlock_special.b.exp_need_qs) {
- t->rcu_read_unlock_special.b.exp_need_qs = false;
+ if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
rcu_report_exp_rdp(rdp->rsp, rdp, true);
- } else {
- WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
- }
+ else
+ WARN_ON_ONCE(rdp->deferred_qs);
}
/*
* tasks are handled when removing the task from the
* blocked-tasks list below.
*/
- if (special.b.exp_need_qs || rdp->deferred_qs) {
- t->rcu_read_unlock_special.b.exp_need_qs = false;
- rdp->deferred_qs = false;
+ if (rdp->deferred_qs) {
rcu_report_exp_rdp(rcu_state_p, rdp, true);
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);