req->ki_ctx = ctx;
req->ki_cancel = NULL;
req->ki_retry = NULL;
- req->ki_user_obj = NULL;
+ req->ki_obj.user = NULL;
/* Check if the completion queue has enough free space to
* accept an event from this io.
{
req->ki_ctx = NULL;
req->ki_filp = NULL;
- req->ki_user_obj = NULL;
+ req->ki_obj.user = NULL;
kmem_cache_free(kiocb_cachep, req);
ctx->reqs_active--;
* single context. */
if (is_sync_kiocb(iocb)) {
kiocbSetKicked(iocb);
- wake_up_process(iocb->ki_user_obj);
+ wake_up_process(iocb->ki_obj.tsk);
return;
}
spin_unlock_irq(&ctx->ctx_lock);
}
/* sync iocbs put the task here for us */
- wake_up_process(iocb->ki_user_obj);
+ wake_up_process(iocb->ki_obj.tsk);
return ret;
}
event = aio_ring_event(info, tail, KM_IRQ0);
tail = (tail + 1) % info->nr;
- event->obj = (u64)(unsigned long)iocb->ki_user_obj;
+ event->obj = (u64)(unsigned long)iocb->ki_obj.user;
event->data = iocb->ki_user_data;
event->res = res;
event->res2 = res2;
dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
- ctx, tail, iocb, iocb->ki_user_obj, iocb->ki_user_data,
+ ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
res, res2);
/* after flagging the request as done, we
goto out_put_req;
}
- req->ki_user_obj = user_iocb;
+ req->ki_obj.user = user_iocb;
req->ki_user_data = iocb->aio_data;
req->ki_pos = iocb->aio_offset;
/* TODO: use a hash or array, this sucks. */
list_for_each(pos, &ctx->active_reqs) {
struct kiocb *kiocb = list_kiocb(pos);
- if (kiocb->ki_user_obj == iocb && kiocb->ki_key == key)
+ if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
return kiocb;
}
return NULL;
struct io_event tmp;
pr_debug("calling cancel\n");
memset(&tmp, 0, sizeof(tmp));
- tmp.obj = (u64)(unsigned long)kiocb->ki_user_obj;
+ tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
tmp.data = kiocb->ki_user_data;
ret = cancel(kiocb, &tmp);
if (!ret) {