Sets the "nice" value of task p to the given value.
int setscheduler(pid_t pid, int policy, struct sched_param *param)
Sets the scheduling policy and parameters for the given pid.
-void set_cpus_allowed(task_t *p, unsigned long new_mask)
+int set_cpus_allowed(task_t *p, unsigned long new_mask)
Sets a given task's CPU affinity and migrates it to a proper cpu.
Callers must have a valid reference to the task and assure the
task not exit prematurely. No locks can be held during the call.
#define PF_LESS_THROTTLE 0x01000000 /* Throttle me less: I clena memory */
#ifdef CONFIG_SMP
-extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
+extern int set_cpus_allowed(task_t *p, unsigned long new_mask);
#else
-# define set_cpus_allowed(p, new_mask) do { } while (0)
+static inline int set_cpus_allowed(task_t *p, unsigned long new_mask)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_NUMA
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
return -EFAULT;
- new_mask &= cpu_online_map;
- if (!new_mask)
- return -EINVAL;
-
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
!capable(CAP_SYS_NICE))
goto out_unlock;
- retval = 0;
- set_cpus_allowed(p, new_mask);
+ retval = set_cpus_allowed(p, new_mask);
out_unlock:
put_task_struct(p);
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
-void set_cpus_allowed(task_t *p, unsigned long new_mask)
+int set_cpus_allowed(task_t *p, unsigned long new_mask)
{
unsigned long flags;
migration_req_t req;
runqueue_t *rq;
-#if 0 /* FIXME: Grab cpu_lock, return error on this case. --RR */
- new_mask &= cpu_online_map;
- if (!new_mask)
- BUG();
-#endif
+ if (any_online_cpu(new_mask) == NR_CPUS)
+ return -EINVAL;
rq = task_rq_lock(p, &flags);
p->cpus_allowed = new_mask;
*/
if (new_mask & (1UL << task_cpu(p))) {
task_rq_unlock(rq, &flags);
- return;
+ return 0;
}
/*
* If the task is not on a runqueue (and not running), then
if (!p->array && !task_running(rq, p)) {
set_task_cpu(p, any_online_cpu(p->cpus_allowed));
task_rq_unlock(rq, &flags);
- return;
+ return 0;
}
init_completion(&req.done);
req.task = p;
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
+ return 0;
}
/* Move (not current) task off this cpu, onto dest cpu. */