* eliminates "false sharing".
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches. */
- sizes->cs_cachep = kmem_cache_create(
- names->name, sizes->cs_size,
- ARCH_KMALLOC_MINALIGN, 0, NULL, NULL);
- if (!sizes->cs_cachep)
- BUG();
+ sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_size, ARCH_KMALLOC_MINALIGN,
+ SLAB_PANIC, NULL, NULL);
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
offslab_limit /= sizeof(kmem_bufctl_t);
}
- sizes->cs_dmacachep = kmem_cache_create(
- names->name_dma, sizes->cs_size,
- ARCH_KMALLOC_MINALIGN, SLAB_CACHE_DMA, NULL, NULL);
- if (!sizes->cs_dmacachep)
- BUG();
+ sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
+ sizes->cs_size, ARCH_KMALLOC_MINALIGN,
+ (SLAB_CACHE_DMA | SLAB_PANIC), NULL, NULL);
sizes++;
names++;