#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/compiler.h>
#include <asm/processor.h>
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#ifdef CONFIG_ITANIUM
- while (__builtin_expect ((__s32) result == -1, 0))
+ while (unlikely ((__s32) result == -1)
__asm__ __volatile__("mov %0=ar.itc" : "=r"(result) :: "memory");
#endif
return result;
#include <linux/mm.h>
#include <linux/threads.h>
+#include <linux/compiler.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
{
unsigned long *ret = pgd_quicklist;
- if (__builtin_expect(ret != NULL, 1)) {
+ if (likely(ret != NULL)) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
pgd_t *pgd = pgd_alloc_one_fast(mm);
- if (__builtin_expect(pgd == NULL, 0)) {
+ if (unlikely(pgd == NULL)) {
pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
- if (__builtin_expect(pgd != NULL, 1))
+ if (likely(pgd != NULL))
clear_page(pgd);
}
return pgd;
{
unsigned long *ret = (unsigned long *)pmd_quicklist;
- if (__builtin_expect(ret != NULL, 1)) {
+ if (likely(ret != NULL)) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
--pgtable_cache_size;
{
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
- if (__builtin_expect(pmd != NULL, 1))
+ if (likely(pmd != NULL))
clear_page(pmd);
return pmd;
}
{
struct page *pte = alloc_pages(GFP_KERNEL, 0);
- if (__builtin_expect(pte != NULL, 1))
+ if (likely(pte != NULL))
clear_page(page_address(pte));
return pte;
}
{
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
- if (__builtin_expect(pte != NULL, 1))
+ if (likely(pte != NULL))
clear_page(pte);
return pte;
}
#include <linux/config.h>
#include <linux/percpu.h>
+#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
regs->loadrs = 0; \
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
- if (!__builtin_expect (current->mm->dumpable, 1)) { \
+ if (!likely (current->mm->dumpable)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
#ifndef _ASM_IA64_SOFTIRQ_H
#define _ASM_IA64_SOFTIRQ_H
+#include <linux/compiler.h>
+
/*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
#define local_bh_enable() \
do { \
__local_bh_enable(); \
- if (__builtin_expect(local_softirq_pending(), 0) && really_local_bh_count() == 0) \
+ if (unlikely(local_softirq_pending()) && really_local_bh_count() == 0) \
do_softirq(); \
} while (0)