restore_irqs:
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lockdep_register_key);
+EXPORT_SYMBOL(lockdep_register_key);
/* Check whether a key has been registered as a dynamic key. */
static bool is_dynamic_key(const struct lock_class_key *key)
__trace_hardirqs_on_caller();
lockdep_recursion_finish();
}
-EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
+EXPORT_SYMBOL(lockdep_hardirqs_on_prepare);
void noinstr lockdep_hardirqs_on(unsigned long ip)
{
trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events);
}
-EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
+EXPORT_SYMBOL(lockdep_hardirqs_on);
/*
* Hardirqs were disabled:
debug_atomic_inc(redundant_hardirqs_off);
}
}
-EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
+EXPORT_SYMBOL(lockdep_hardirqs_off);
/*
* Softirqs will be enabled:
EXPORT_SYMBOL(lockdep_init_map_waits);
struct lock_class_key __lockdep_no_validate__;
-EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
+EXPORT_SYMBOL(__lockdep_no_validate__);
static void
print_lock_nested_lock_not_held(struct task_struct *curr,
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_set_class);
+EXPORT_SYMBOL(lock_set_class);
void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
{
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_downgrade);
+EXPORT_SYMBOL(lock_downgrade);
/* NMI context !!! */
static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_acquire);
+EXPORT_SYMBOL(lock_acquire);
void lock_release(struct lockdep_map *lock, unsigned long ip)
{
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_release);
+EXPORT_SYMBOL(lock_release);
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
{
return ret;
}
-EXPORT_SYMBOL_GPL(lock_is_held_type);
+EXPORT_SYMBOL(lock_is_held_type);
NOKPROBE_SYMBOL(lock_is_held_type);
struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
return cookie;
}
-EXPORT_SYMBOL_GPL(lock_pin_lock);
+EXPORT_SYMBOL(lock_pin_lock);
void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_repin_lock);
+EXPORT_SYMBOL(lock_repin_lock);
void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
{
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_unpin_lock);
+EXPORT_SYMBOL(lock_unpin_lock);
#ifdef CONFIG_LOCK_STAT
static void print_lock_contention_bug(struct task_struct *curr,
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_contended);
+EXPORT_SYMBOL(lock_contended);
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
lockdep_recursion_finish();
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_acquired);
+EXPORT_SYMBOL(lock_acquired);
#endif
/*
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
}
-EXPORT_SYMBOL_GPL(lockdep_unregister_key);
+EXPORT_SYMBOL(lockdep_unregister_key);
void __init lockdep_init(void)
{
}
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
+EXPORT_SYMBOL(debug_check_no_locks_freed);
static void print_held_locks_bug(void)
{
if (unlikely(current->lockdep_depth > 0))
print_held_locks_bug();
}
-EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
+EXPORT_SYMBOL(debug_check_no_locks_held);
#ifdef __KERNEL__
void debug_show_all_locks(void)
pr_warn("\n");
pr_warn("=============================================\n\n");
}
-EXPORT_SYMBOL_GPL(debug_show_all_locks);
+EXPORT_SYMBOL(debug_show_all_locks);
#endif
/*
}
lockdep_print_held_locks(task);
}
-EXPORT_SYMBOL_GPL(debug_show_held_locks);
+EXPORT_SYMBOL(debug_show_held_locks);
asmlinkage __visible void lockdep_sys_exit(void)
{
pr_warn("\nstack backtrace:\n");
dump_stack();
}
-EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+EXPORT_SYMBOL(lockdep_rcu_suspicious);