/* FIXME, who returns -ENOENT? I think nobody */
acl = NULL;
} else {
- acl = ERR_PTR(-EIEIO);
+ acl = ERR_PTR(-EIO);
}
kfree(value);
0);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
- return -EIEIO;
+ return -EIO;
}
btrfs_tree_read_lock(eb);
if (btrfs_header_level(eb) == 0)
ref->parent, 0);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
btrfs_tree_read_lock(eb);
return 0;
}
- return -EIEIO;
+ return -EIO;
}
/*
"csum failed ino %llu extent %llu csum %u wanted %u mirror %d",
btrfs_ino(inode), disk_start, csum, *cb_sum,
cb->mirror_num);
- ret = -EIEIO;
+ ret = -EIO;
goto fail;
}
cb_sum++;
PAGE_CACHE_SIZE);
read_unlock(&em_tree->lock);
if (!em)
- return -EIEIO;
+ return -EIO;
compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
extern struct btrfs_compress_op btrfs_zlib_compress;
extern struct btrfs_compress_op btrfs_lzo_compress;
-#ifndef EIEIO
-static inline int eieio(const char *file, int line, const char *function) {
- printk(KERN_DEBUG "EIO calculated at %s:%d in %s\n", file, line, function);
- return EIO;
-}
-#define EIEIO (eieio(__FILE__, __LINE__, __FUNCTION__))
-#endif
-
#endif
cur = read_tree_block(root, blocknr, gen);
if (!cur || !extent_buffer_uptodate(cur)) {
free_extent_buffer(cur);
- return -EIEIO;
+ return -EIO;
}
} else if (!uptodate) {
err = btrfs_read_buffer(cur, gen);
}
free_extent_buffer(tmp);
btrfs_release_path(p);
- return -EIEIO;
+ return -EIO;
}
/*
if (tmp) {
/*
* If the read above didn't mark this buffer up to date,
- * it will never end up being up to date. Set ret to EIEIO now
+ * it will never end up being up to date. Set ret to EIO now
* and give up so that our caller doesn't loop forever
* on our EAGAINs.
*/
if (!btrfs_buffer_uptodate(tmp, 0, 0))
- ret = -EIEIO;
+ ret = -EIO;
free_extent_buffer(tmp);
}
return ret;
return 0;
}
-#ifndef EIEIO
-static inline int eieio(const char *file, int line, const char *function) {
- printk(KERN_DEBUG "EIO calculated at %s:%d in %s\n", file, line, function);
- return EIO;
-}
-#define EIEIO (eieio(__FILE__, __LINE__, __FUNCTION__))
-#endif
-
#endif
bool count = (nr > 0);
if (trans->aborted)
- return -EIEIO;
+ return -EIO;
path = btrfs_alloc_path();
if (!path)
*/
if (!dev_replace->srcdev &&
!btrfs_test_opt(dev_root, DEGRADED)) {
- ret = -EIEIO;
+ ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
btrfs_warn(fs_info,
}
if (!dev_replace->tgtdev &&
!btrfs_test_opt(dev_root, DEGRADED)) {
- ret = -EIEIO;
+ ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
btrfs_warn(fs_info,
free_extent_map(em);
em = lookup_extent_mapping(em_tree, start, len);
if (!em)
- em = ERR_PTR(-EIEIO);
+ em = ERR_PTR(-EIO);
} else if (ret) {
free_extent_map(em);
em = ERR_PTR(ret);
parent_transid, 0))
break;
else
- ret = -EIEIO;
+ ret = -EIO;
}
/*
if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
BTRFS_LEAF_DATA_SIZE(root)) {
CORRUPT("invalid item offset size pair", leaf, root, 0);
- return -EIEIO;
+ return -EIO;
}
/*
/* Make sure the keys are in the right order */
if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
CORRUPT("bad key order", leaf, root, slot);
- return -EIEIO;
+ return -EIO;
}
/*
if (btrfs_item_offset_nr(leaf, slot) !=
btrfs_item_end_nr(leaf, slot + 1)) {
CORRUPT("slot offset bad", leaf, root, slot);
- return -EIEIO;
+ return -EIO;
}
/*
if (btrfs_item_end_nr(leaf, slot) >
BTRFS_LEAF_DATA_SIZE(root)) {
CORRUPT("slot end outside of leaf", leaf, root, slot);
- return -EIEIO;
+ return -EIO;
}
}
eb->read_mirror = mirror;
if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) {
- ret = -EIEIO;
+ ret = -EIO;
goto err;
}
printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start "
"%llu %llu\n",
eb->fs_info->sb->s_id, found_start, eb->start);
- ret = -EIEIO;
+ ret = -EIO;
goto err;
}
if (check_tree_block_fsid(root, eb)) {
printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
eb->fs_info->sb->s_id, eb->start);
- ret = -EIEIO;
+ ret = -EIO;
goto err;
}
found_level = btrfs_header_level(eb);
if (found_level >= BTRFS_MAX_LEVEL) {
btrfs_err(root->fs_info, "bad tree block level %d",
(int)btrfs_header_level(eb));
- ret = -EIEIO;
+ ret = -EIO;
goto err;
}
ret = csum_tree_block(root, eb, 1);
if (ret) {
- ret = -EIEIO;
+ ret = -EIO;
goto err;
}
/*
* If this is a leaf block and it is corrupt, set the corrupt bit so
* that we don't try and read the other copies of this block, just
- * return -EIEIO.
+ * return -EIO.
*/
if (found_level == 0 && check_leaf(root, eb)) {
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
- ret = -EIEIO;
+ ret = -EIO;
}
if (!ret)
eb->read_mirror = failed_mirror;
atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(root, eb, eb->start, -EIEIO);
- return -EIEIO; /* we fixed nothing */
+ btree_readahead_hook(root, eb, eb->start, -EIO);
+ return -EIO; /* we fixed nothing */
}
static void end_workqueue_bio(struct bio *bio, int err)
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf);
- return -EIEIO;
+ return -EIO;
} else if (extent_buffer_uptodate(buf)) {
*eb = buf;
} else {
ret = -ENOMEM;
goto find_fail;
} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
- ret = -EIEIO;
+ ret = -EIO;
goto read_fail;
}
root->commit_root = btrfs_root_node(root);
if (fs_devices->rw_devices == 0) {
printk(KERN_WARNING "BTRFS: log replay required "
"on RO media\n");
- err = -EIEIO;
+ err = -EIO;
goto fail_qgroup;
}
rcu_str_deref(device->name));
device->nobarriers = 1;
} else if (!bio_flagged(bio, BIO_UPTODATE)) {
- ret = -EIEIO;
+ ret = -EIO;
btrfs_dev_stat_inc_and_print(device,
BTRFS_DEV_STAT_FLUSH_ERRS);
}
}
if (errors_send > info->num_tolerated_disk_barrier_failures ||
errors_wait > info->num_tolerated_disk_barrier_failures)
- return -EIEIO;
+ return -EIO;
return 0;
}
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
/* FUA is masked off if unsupported and can't be the reason */
- btrfs_error(root->fs_info, -EIEIO,
+ btrfs_error(root->fs_info, -EIO,
"%d errors while writing supers", total_errors);
- return -EIEIO;
+ return -EIO;
}
total_errors = 0;
}
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
if (total_errors > max_errors) {
- btrfs_error(root->fs_info, -EIEIO,
+ btrfs_error(root->fs_info, -EIO,
"%d errors while writing supers", total_errors);
- return -EIEIO;
+ return -EIO;
}
return 0;
}
err = -ENOENT;
goto out;
} else if (WARN_ON(ret)) {
- err = -EIEIO;
+ err = -EIO;
goto out;
}
if (!ret)
discarded_bytes += stripe->length;
else if (ret != -EOPNOTSUPP)
- break; /* Logic errors or -ENOMEM, or -EIEIO but I don't know how that could happen JDM */
+ break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
/*
* Just in case we get back EOPNOTSUPP for some reason,
goto again;
}
} else {
- err = -EIEIO;
+ err = -EIO;
goto out;
}
}
/*
* Returns 0 on success or if called with an already aborted transaction.
- * Returns -ENOMEM or -EIEIO on failure and will abort the transaction.
+ * Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
- return (cache->cached == BTRFS_CACHE_ERROR) ? -EIEIO : 0;
+ return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
wait_event(caching_ctl->wait, block_group_cache_done(cache));
if (cache->cached == BTRFS_CACHE_ERROR)
- ret = -EIEIO;
+ ret = -EIO;
put_caching_control(caching_ctl);
return ret;
}
eb = read_tree_block(root, child_bytenr, child_gen);
if (!eb || !extent_buffer_uptodate(eb)) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
next = read_tree_block(root, bytenr, generation);
if (!next || !extent_buffer_uptodate(next)) {
free_extent_buffer(next);
- return -EIEIO;
+ return -EIO;
}
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
- ret = -EIEIO;
+ ret = -EIO;
if (ret < 0)
goto out;
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
- return -EIEIO;
+ return -EIO;
bio->bi_iter.bi_size = 0;
map_length = length;
&map_length, &bbio, mirror_num);
if (ret) {
bio_put(bio);
- return -EIEIO;
+ return -EIO;
}
BUG_ON(mirror_num != bbio->mirror_num);
sector = bbio->stripes[mirror_num-1].physical >> 9;
btrfs_put_bbio(bbio);
if (!dev || !dev->bdev || !dev->writeable) {
bio_put(bio);
- return -EIEIO;
+ return -EIO;
}
bio->bi_bdev = dev->bdev;
bio_add_page(bio, page, length, pg_offset);
/* try to remap that extent elsewhere? */
bio_put(bio);
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
- return -EIEIO;
+ return -EIO;
}
printk_ratelimited_in_rcu(KERN_INFO
if (!em) {
read_unlock(&em_tree->lock);
kfree(failrec);
- return -EIEIO;
+ return -EIO;
}
if (em->start > start || em->start + em->len <= start) {
read_unlock(&em_tree->lock);
if (!em) {
kfree(failrec);
- return -EIEIO;
+ return -EIO;
}
logical = start - em->start;
ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror);
if (!ret) {
free_io_failure(inode, failrec);
- return -EIEIO;
+ return -EIO;
}
if (failed_bio->bi_vcnt > 1)
NULL);
if (!bio) {
free_io_failure(inode, failrec);
- return -EIEIO;
+ return -EIO;
}
pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
if (!uptodate) {
ClearPageUptodate(page);
SetPageError(page);
- ret = ret < 0 ? ret : -EIEIO;
+ ret = ret < 0 ? ret : -EIO;
mapping_set_error(page->mapping, ret);
}
return 0;
* end_bio_extent_readpage as well (if we're lucky, not
* in the !uptodate case). In that case it returns 0 and
* we just go on with the next page in our bio. If it
- * can't handle the error it will return -EIEIO and we
+ * can't handle the error it will return -EIO and we
* remain responsible for that page.
*/
ret = bio_readpage_error(bio, offset, page, start, end,
* IO is started, so we don't want to return > 0
* unless things are going well.
*/
- ret = ret < 0 ? ret : -EIEIO;
+ ret = ret < 0 ? ret : -EIO;
goto done;
}
/*
end_page_writeback(page);
}
if (PageError(page)) {
- ret = ret < 0 ? ret : -EIEIO;
+ ret = ret < 0 ? ret : -EIO;
end_extent_writepage(page, ret, start, page_end);
}
unlock_page(page);
end_page_writeback(p);
if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
end_extent_buffer_writeback(eb);
- ret = -EIEIO;
+ ret = -EIO;
break;
}
offset += PAGE_CACHE_SIZE;
page = eb->pages[i];
wait_on_page_locked(page);
if (!PageUptodate(page))
- ret = -EIEIO;
+ ret = -EIO;
}
return ret;
int mirror_num);
/*
- * When IO fails, either with EIEIO or csum verification fails, we
+ * When IO fails, either with EIO or csum verification fails, we
* try other mirrors that might have a good copy of the data. This
* io_failure_record is used to record state as we go through all the
* mirrors. If another mirror has good data, the page is set up to date
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
- return -EIEIO;
+ return -EIO;
}
}
return 0;
* successful. And we also want to make sure our log doesn't contain
* file extent items pointing to extents that weren't fully written to -
* just like in the non fast fsync path, where we check for the ordered
- * operation's error flag before writing to the log tree and return -EIEIO
+ * operation's error flag before writing to the log tree and return -EIO
* if any of them had this flag set (btrfs_wait_ordered_range) -
* therefore we need to check for errors in the ordered operations,
* which are indicated by ctx.io_err.
ret = btrfs_end_transaction(trans, root);
}
out:
- return ret > 0 ? -EIEIO : ret;
+ return ret > 0 ? -EIO : ret;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"error reading free space cache");
io_ctl_drop_pages(io_ctl);
- return -EIEIO;
+ return -EIO;
}
}
}
"(%Lu) does not match inode (%Lu)\n", *gen,
generation);
io_ctl_unmap_page(io_ctl);
- return -EIEIO;
+ return -EIO;
}
io_ctl->cur += sizeof(u64);
return 0;
printk_ratelimited(KERN_ERR "BTRFS: csum mismatch on free "
"space cache\n");
io_ctl_unmap_page(io_ctl);
- return -EIEIO;
+ return -EIO;
}
return 0;
nolock = btrfs_is_free_space_inode(inode);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
kunmap_atomic(kaddr);
if (csum_expected == 0)
return 0;
- return -EIEIO;
+ return -EIO;
}
/*
goto again;
}
if (!PageUptodate(page)) {
- ret = -EIEIO;
+ ret = -EIO;
goto out_unlock;
}
}
if (em->start > start || extent_map_end(em) <= start) {
btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
em->start, em->len, start, len);
- err = -EIEIO;
+ err = -EIO;
goto out;
}
failed_mirror);
if (!ret) {
free_io_failure(inode, failrec);
- return -EIEIO;
+ return -EIO;
}
if (failed_bio->bi_vcnt > 1)
0, isector, repair_endio, repair_arg);
if (!bio) {
free_io_failure(inode, failrec);
- return -EIEIO;
+ return -EIO;
}
btrfs_debug(BTRFS_I(inode)->root->fs_info,
ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
&map_length, NULL, 0);
if (ret)
- return -EIEIO;
+ return -EIO;
if (map_length >= orig_bio->bi_iter.bi_size) {
bio = orig_bio;
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
- else /* -ENOSPC, -EIEIO, etc */
+ else /* -ENOSPC, -EIO, etc */
ret = VM_FAULT_SIGBUS;
if (reserved)
goto out;
if (!PageUptodate(page)) {
unlock_page(page);
page_cache_release(page);
- ret = -EIEIO;
+ ret = -EIO;
break;
}
}
if (ret != LZO_E_OK) {
printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
ret);
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
break;
if (page_in_index + 1 >= total_pages_in) {
- ret = -EIEIO;
+ ret = -EIO;
goto done;
}
kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "BTRFS: decompress failed\n");
- ret = -EIEIO;
+ ret = -EIO;
break;
}
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "BTRFS: decompress failed!\n");
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
if (out_len < start_byte) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
- ret = -EIEIO;
+ ret = -EIO;
btrfs_put_ordered_extent(ordered);
if (ret || end == 0 || end == start)
break;
/* OK, we have read all the stripes we need to. */
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
- err = -EIEIO;
+ err = -EIO;
rbio_orig_end_io(rbio, err, 0);
return;
return;
cleanup:
- rbio_orig_end_io(rbio, -EIEIO, 0);
+ rbio_orig_end_io(rbio, -EIO, 0);
}
/*
}
/*
- * returns -EIEIO if we had too many failures
+ * returns -EIO if we had too many failures
*/
static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
{
rbio->failb = failed;
atomic_inc(&rbio->error);
} else {
- ret = -EIEIO;
+ ret = -EIO;
}
out:
spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
int failed = find_bio_stripe(rbio, bio);
if (failed < 0)
- return -EIEIO;
+ return -EIO;
return fail_rbio_index(rbio, failed);
}
cleanup:
- rbio_orig_end_io(rbio, -EIEIO, 0);
+ rbio_orig_end_io(rbio, -EIO, 0);
}
static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
return 0;
cleanup:
- rbio_orig_end_io(rbio, -EIEIO, 0);
- return -EIEIO;
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return -EIO;
finish:
validate_rbio_for_rmw(rbio);
* a bad data or Q stripe.
* TODO, we should redo the xor here.
*/
- err = -EIEIO;
+ err = -EIO;
goto cleanup;
}
/*
if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->bbio->raid_map[faila] ==
RAID5_P_STRIPE) {
- err = -EIEIO;
+ err = -EIO;
goto cleanup;
}
/*
return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
- rbio_orig_end_io(rbio, -EIEIO, 0);
+ rbio_orig_end_io(rbio, -EIO, 0);
else
__raid_recover_end_io(rbio);
}
cleanup:
if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
- rbio_orig_end_io(rbio, -EIEIO, 0);
- return -EIEIO;
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return -EIO;
}
/*
if (generic_io)
btrfs_put_bbio(bbio);
kfree(rbio);
- return -EIEIO;
+ return -EIO;
}
if (generic_io) {
eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
if (!eb || !extent_buffer_uptodate(eb)) {
- ret = (!eb) ? -ENOMEM : -EIEIO;
+ ret = (!eb) ? -ENOMEM : -EIO;
free_extent_buffer(eb);
break;
}
eb = read_tree_block(root, bytenr, ptr_gen);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
- return -EIEIO;
+ return -EIO;
}
BUG_ON(btrfs_header_level(eb) != i - 1);
path->nodes[i - 1] = eb;
eb = read_tree_block(root, bytenr, generation);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
- err = -EIEIO;
+ err = -EIO;
goto next;
}
btrfs_tree_lock(eb);
block->key.offset);
if (!eb || !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
- return -EIEIO;
+ return -EIO;
}
WARN_ON(btrfs_header_level(eb) != block->level);
if (block->level == 0)
page_cache_release(page);
btrfs_delalloc_release_metadata(inode,
PAGE_CACHE_SIZE);
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
}
* later scrub will find the bad sector again and that
* there's no dirty page in memory, then.
*/
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
ret = repair_io_failure(inode, offset, PAGE_SIZE,
return 1;
}
- return -EIEIO;
+ return -EIO;
}
static void scrub_fixup_nodatasum(struct btrfs_work *work)
/*
* scrub_handle_errored_block gets called when either verification of the
- * pages failed or the bio failed to read, e.g. with EIEIO. In the latter
+ * pages failed or the bio failed to read, e.g. with EIO. In the latter
* case, this function handles all pages in the bio, even though only one
* may be bad.
* The goal of this function is to repair the errored block by using the
&mapped_length, &bbio, 0, 1);
if (ret || !bbio || mapped_length < sublen) {
btrfs_put_bbio(bbio);
- return -EIEIO;
+ return -EIO;
}
recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
printk_ratelimited(KERN_WARNING "BTRFS: "
"scrub_repair_page_from_good_copy(bdev == NULL) "
"is unexpected!\n");
- return -EIEIO;
+ return -EIO;
}
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
- return -EIEIO;
+ return -EIO;
bio->bi_bdev = page_bad->dev->bdev;
bio->bi_iter.bi_sector = page_bad->physical >> 9;
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
bio_put(bio);
- return -EIEIO;
+ return -EIO;
}
if (btrfsic_submit_bio_wait(WRITE, bio)) {
&sblock_bad->sctx->dev_root->fs_info->
dev_replace.num_write_errors);
bio_put(bio);
- return -EIEIO;
+ return -EIO;
}
bio_put(bio);
}
bio_put(sbio->bio);
sbio->bio = NULL;
mutex_unlock(&wr_ctx->wr_lock);
- return -EIEIO;
+ return -EIO;
}
scrub_wr_submit(sctx);
goto again;
*/
printk_ratelimited(KERN_WARNING
"BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
- bio_endio(sbio->bio, -EIEIO);
+ bio_endio(sbio->bio, -EIO);
} else {
btrfsic_submit_bio(READ, sbio->bio);
}
if (sbio->page_count < 1) {
bio_put(sbio->bio);
sbio->bio = NULL;
- return -EIEIO;
+ return -EIO;
}
scrub_submit(sctx);
goto again;
break;
if (is_dev_replace &&
atomic64_read(&dev_replace->num_write_errors) > 0) {
- ret = -EIEIO;
+ ret = -EIO;
break;
}
if (sctx->stat.malloc_errors > 0) {
struct btrfs_root *root = sctx->dev_root;
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
- return -EIEIO;
+ return -EIO;
/* Seed devices of a new filesystem has their own generation. */
if (scrub_dev->fs_devices != root->fs_info->fs_devices)
if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- return -EIEIO;
+ return -EIO;
}
btrfs_dev_replace_lock(&fs_info->dev_replace);
goto again;
}
if (!PageUptodate(page)) {
- ret = -EIEIO;
+ ret = -EIO;
goto next_page;
}
}
dev = sctx->wr_ctx.tgtdev;
if (!dev)
- return -EIEIO;
+ return -EIO;
if (!dev->bdev) {
printk_ratelimited(KERN_WARNING
"BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
- return -EIEIO;
+ return -EIO;
}
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio) {
leave_with_eio:
bio_put(bio);
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
- return -EIEIO;
+ return -EIO;
}
if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
if (ret < 0)
goto out;
if (ret == 0) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
pos += ret;
if (ret < 0)
goto out;
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
if (!backref_ctx->found_itself) {
/* found a bug in backref code? */
- ret = -EIEIO;
+ ret = -EIO;
btrfs_err(sctx->send_root->fs_info, "did not find backref in "
"send_root. inode=%llu, offset=%llu, "
"disk_byte=%llu found extent=%llu",
if (!PageUptodate(page)) {
unlock_page(page);
page_cache_release(page);
- ret = -EIEIO;
+ ret = -EIO;
break;
}
}
switch (errno) {
case -EIO:
- EIEIO;
errstr = "IO failure";
break;
case -ENOMEM:
* This means that error recovery at the call site is limited to freeing
* any local memory allocations and passing the error code up without
* further cleanup. The transaction should complete as it normally would
- * in the call path but will return -EIEIO.
+ * in the call path but will return -EIO.
*
* We'll complete the cleanup in btrfs_end_transaction and
* btrfs_commit_transaction.
if (trans->aborted ||
test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
wake_up_process(info->transaction_kthread);
- err = -EIEIO;
+ err = -EIO;
}
assert_qgroups_uptodate(trans);
}
if (errors && !werr)
- werr = -EIEIO;
+ werr = -EIO;
return werr;
}
inode = read_one_inode(root, key->objectid);
if (!inode) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
inode = read_one_inode(root, location.objectid);
if (!inode) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
inode = read_one_inode(root, inode_objectid);
if (!inode) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
btrfs_release_path(path);
inode = read_one_inode(root, key.offset);
if (!inode)
- return -EIEIO;
+ return -EIO;
ret = fixup_inode_link_count(trans, root, inode);
iput(inode);
inode = read_one_inode(root, objectid);
if (!inode)
- return -EIEIO;
+ return -EIO;
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
dir = read_one_inode(root, dirid);
if (!dir) {
iput(inode);
- return -EIEIO;
+ return -EIO;
}
ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
dir = read_one_inode(root, key->objectid);
if (!dir)
- return -EIEIO;
+ return -EIO;
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
if (verify_dir_item(root, eb, di))
- return -EIEIO;
+ return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
if (ret)
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
if (verify_dir_item(root, eb, di)) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
inode = read_one_inode(root, location.objectid);
if (!inode) {
kfree(name);
- return -EIEIO;
+ return -EIO;
}
ret = link_to_fixup_dir(trans, root,
return ret;
if (ordered_io_err) {
- ctx->io_err = -EIEIO;
+ ctx->io_err = -EIO;
return 0;
}
num_stripes = btrfs_stack_chunk_num_stripes(chunk);
len += btrfs_chunk_item_size(num_stripes);
} else {
- ret = -EIEIO;
+ ret = -EIO;
break;
}
if (key.objectid == chunk_objectid &&
* the last one to the array of stripes. For READ, it also
* needs to be supported using the same mirror number.
* If the requested block is not left of the left cursor,
- * EIEIO is returned. This can happen because btrfs_num_copies()
+ * EIO is returned. This can happen because btrfs_num_copies()
* returns one more in the dev-replace case.
*/
u64 tmp_length = *length;
* mirror, that means that the requested area
* is not left of the left cursor
*/
- ret = -EIEIO;
+ ret = -EIO;
btrfs_put_bbio(tmp_bbio);
goto out;
}
physical_to_patch_in_first_stripe = physical_of_found;
} else {
WARN_ON(1);
- ret = -EIEIO;
+ ret = -EIO;
btrfs_put_bbio(tmp_bbio);
goto out;
}
if (!em) {
printk(KERN_ERR "BTRFS: couldn't find em for chunk %Lu\n",
chunk_start);
- return -EIEIO;
+ return -EIO;
}
if (em->start != chunk_start) {
printk(KERN_ERR "BTRFS: bad chunk start, em=%Lu, wanted=%Lu\n",
em->start, chunk_start);
free_extent_map(em);
- return -EIEIO;
+ return -EIO;
}
map = (struct map_lookup *)em->bdev;
if (err) {
atomic_inc(&bbio->error);
- if (err == -EIEIO || err == -EREMOTEIO) {
+ if (err == -EIO || err == -EREMOTEIO) {
unsigned int stripe_index =
btrfs_io_bio(bio)->stripe_index;
* beyond the tolerance of the btrfs bio
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
- err = -EIEIO;
+ err = -EIO;
} else {
/*
* this bio is actually up to date, we didn't
struct btrfs_pending_bios *pending_bios;
if (device->missing || !device->bdev) {
- bio_endio(bio, -EIEIO);
+ bio_endio(bio, -EIO);
return;
}
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio->bi_iter.bi_sector = logical >> 9;
- btrfs_end_bbio(bbio, bio, -EIEIO);
+ btrfs_end_bbio(bbio, bio, -EIO);
}
}
uuid, NULL);
if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
free_extent_map(em);
- return -EIEIO;
+ return -EIO;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
devid, uuid);
if (!map->stripes[i].dev) {
free_extent_map(em);
- return -EIEIO;
+ return -EIO;
}
}
map->stripes[i].dev->in_fs_metadata = 1;
device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
if (!device) {
if (!btrfs_test_opt(root, DEGRADED))
- return -EIEIO;
+ return -EIO;
btrfs_warn(root->fs_info, "devid %llu missing", devid);
device = add_missing_dev(root, fs_devices, devid, dev_uuid);
return -ENOMEM;
} else {
if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
- return -EIEIO;
+ return -EIO;
if(!device->bdev && !device->missing) {
/*
if (ret)
break;
} else {
- ret = -EIEIO;
+ ret = -EIO;
break;
}
array_ptr += len;
printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
len, cur_offset);
free_extent_buffer(sb);
- return -EIEIO;
+ return -EIO;
}
int btrfs_read_chunk_tree(struct btrfs_root *root)
mutex_unlock(&root->fs_info->chunk_mutex);
}
-#ifndef EIEIO
-static inline int eieio(const char *file, int line, const char *function) {
- printk(KERN_DEBUG "EIO calculated at %s:%d in %s\n", file, line, function);
- return EIO;
-}
-#define EIEIO (eieio(__FILE__, __LINE__, __FUNCTION__))
-#endif
#endif
if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
printk(KERN_WARNING "BTRFS: deflateInit failed\n");
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
printk(KERN_DEBUG "BTRFS: deflate in loop returned %d\n",
ret);
zlib_deflateEnd(&workspace->strm);
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
zlib_deflateEnd(&workspace->strm);
if (ret != Z_STREAM_END) {
- ret = -EIEIO;
+ ret = -EIO;
goto out;
}
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
printk(KERN_WARNING "BTRFS: inflateInit failed\n");
- return -EIEIO;
+ return -EIO;
}
while (workspace->strm.total_in < srclen) {
ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
}
}
if (ret != Z_STREAM_END)
- ret = -EIEIO;
+ ret = -EIO;
else
ret = 0;
done:
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
printk(KERN_WARNING "BTRFS: inflateInit failed\n");
- return -EIEIO;
+ return -EIO;
}
while (bytes_left > 0) {
total_out = workspace->strm.total_out;
if (total_out == buf_start) {
- ret = -EIEIO;
+ ret = -EIO;
break;
}
}
if (ret != Z_STREAM_END && bytes_left != 0)
- ret = -EIEIO;
+ ret = -EIO;
else
ret = 0;