return READ_ONCE(rcu_normal) &&
rcu_scheduler_active != RCU_SCHEDULER_INIT;
}
-EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
+EXPORT_SYMBOL(rcu_gp_is_normal);
static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
rcu_scheduler_active == RCU_SCHEDULER_INIT;
}
-EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
+EXPORT_SYMBOL(rcu_gp_is_expedited);
/**
* rcu_expedite_gp - Expedite future RCU grace periods
{
atomic_inc(&rcu_expedited_nesting);
}
-EXPORT_SYMBOL_GPL(rcu_expedite_gp);
+EXPORT_SYMBOL(rcu_expedite_gp);
/**
* rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
{
atomic_dec(&rcu_expedited_nesting);
}
-EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
+EXPORT_SYMBOL(rcu_unexpedite_gp);
/*
* Inform RCU of the end of the in-kernel boot sequence.
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
-EXPORT_SYMBOL_GPL(rcu_lock_map);
+EXPORT_SYMBOL(rcu_lock_map);
static struct lock_class_key rcu_bh_lock_key;
struct lockdep_map rcu_bh_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
-EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
+EXPORT_SYMBOL(rcu_bh_lock_map);
static struct lock_class_key rcu_sched_lock_key;
struct lockdep_map rcu_sched_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
-EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
+EXPORT_SYMBOL(rcu_sched_lock_map);
static struct lock_class_key rcu_callback_key;
struct lockdep_map rcu_callback_map =
STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
-EXPORT_SYMBOL_GPL(rcu_callback_map);
+EXPORT_SYMBOL(rcu_callback_map);
int notrace debug_lockdep_rcu_enabled(void)
{
return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
current->lockdep_recursion == 0;
}
-EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+EXPORT_SYMBOL(debug_lockdep_rcu_enabled);
/**
* rcu_read_lock_held() - might we be in RCU read-side critical section?
return 0;
return lock_is_held(&rcu_lock_map);
}
-EXPORT_SYMBOL_GPL(rcu_read_lock_held);
+EXPORT_SYMBOL(rcu_read_lock_held);
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
return 0;
return in_softirq() || irqs_disabled();
}
-EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
+EXPORT_SYMBOL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion);
}
-EXPORT_SYMBOL_GPL(wakeme_after_rcu);
+EXPORT_SYMBOL(wakeme_after_rcu);
void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
struct rcu_synchronize *rs_array)
destroy_rcu_head_on_stack(&rs_array[i].head);
}
}
-EXPORT_SYMBOL_GPL(__wait_rcu_gp);
+EXPORT_SYMBOL(__wait_rcu_gp);
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
void init_rcu_head(struct rcu_head *head)
{
debug_object_init_on_stack(head, &rcuhead_debug_descr);
}
-EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
+EXPORT_SYMBOL(init_rcu_head_on_stack);
/**
* destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
{
debug_object_free(head, &rcuhead_debug_descr);
}
-EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
+EXPORT_SYMBOL(destroy_rcu_head_on_stack);
struct debug_obj_descr rcuhead_debug_descr = {
.name = "rcu_head",
.is_static_object = rcuhead_is_static_object,
};
-EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
+EXPORT_SYMBOL(rcuhead_debug_descr);
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
{
trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
}
-EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
+EXPORT_SYMBOL(do_trace_rcu_torture_read);
#else
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
do { } while (0)
if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
wake_up(&rcu_tasks_cbs_wq);
}
-EXPORT_SYMBOL_GPL(call_rcu_tasks);
+EXPORT_SYMBOL(call_rcu_tasks);
/**
* synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
/* Wait for the grace period. */
wait_rcu_gp(call_rcu_tasks);
}
-EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
+EXPORT_SYMBOL(synchronize_rcu_tasks);
/**
* rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
/* There is only one callback queue, so this is easy. ;-) */
synchronize_rcu_tasks();
}
-EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
+EXPORT_SYMBOL(rcu_barrier_tasks);
/* See if tasks are still holding out, complain if so. */
static void check_holdout_task(struct task_struct *t,