EXPORT_SYMBOL(lockdep_init_map);
struct lock_class_key __lockdep_no_validate__;
-EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
+EXPORT_SYMBOL(__lockdep_no_validate__);
static int
print_lock_nested_lock_not_held(struct task_struct *curr,
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_set_class);
+EXPORT_SYMBOL(lock_set_class);
/*
* We are not always called with irqs disabled - do that here,
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_acquire);
+EXPORT_SYMBOL(lock_acquire);
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip)
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_release);
+EXPORT_SYMBOL(lock_release);
int lock_is_held(struct lockdep_map *lock)
{
return ret;
}
-EXPORT_SYMBOL_GPL(lock_is_held);
+EXPORT_SYMBOL(lock_is_held);
void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
{
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_contended);
+EXPORT_SYMBOL(lock_contended);
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(lock_acquired);
+EXPORT_SYMBOL(lock_acquired);
#endif
/*
}
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
+EXPORT_SYMBOL(debug_check_no_locks_freed);
static void print_held_locks_bug(void)
{
if (unlock)
read_unlock(&tasklist_lock);
}
-EXPORT_SYMBOL_GPL(debug_show_all_locks);
+EXPORT_SYMBOL(debug_show_all_locks);
#endif
/*
}
lockdep_print_held_locks(task);
}
-EXPORT_SYMBOL_GPL(debug_show_held_locks);
+EXPORT_SYMBOL(debug_show_held_locks);
asmlinkage __visible void lockdep_sys_exit(void)
{
printk("\nstack backtrace:\n");
dump_stack();
}
-EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+EXPORT_SYMBOL(lockdep_rcu_suspicious);