cgroup/cpuset: Fix error handling in remote_partition_disable()
[ Upstream commit 8bf450f3aec3d1bbd725d179502c64b8992588e4 ]
When remote_partition_disable() is called to disable a remote partition,
it always sets the partition to an invalid partition state. It should
only do so if an error code (prs_err) has been set. Correct that and
add proper error code in places where remote_partition_disable() is
called due to error.
Fixes: 181c8e091a ("cgroup/cpuset: Introduce remote partition")
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
40bc55e4fc
commit
9701dcbf5f
@@ -1383,6 +1383,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
|
|||||||
list_add(&cs->remote_sibling, &remote_children);
|
list_add(&cs->remote_sibling, &remote_children);
|
||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
update_unbound_workqueue_cpumask(isolcpus_updated);
|
update_unbound_workqueue_cpumask(isolcpus_updated);
|
||||||
|
cs->prs_err = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
||||||
@@ -1413,9 +1414,11 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
|
|||||||
list_del_init(&cs->remote_sibling);
|
list_del_init(&cs->remote_sibling);
|
||||||
isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
|
isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
|
||||||
NULL, tmp->new_cpus);
|
NULL, tmp->new_cpus);
|
||||||
cs->partition_root_state = -cs->partition_root_state;
|
if (cs->prs_err)
|
||||||
if (!cs->prs_err)
|
cs->partition_root_state = -cs->partition_root_state;
|
||||||
cs->prs_err = PERR_INVCPUS;
|
else
|
||||||
|
cs->partition_root_state = PRS_MEMBER;
|
||||||
|
|
||||||
reset_partition_data(cs);
|
reset_partition_data(cs);
|
||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
update_unbound_workqueue_cpumask(isolcpus_updated);
|
update_unbound_workqueue_cpumask(isolcpus_updated);
|
||||||
@@ -1448,8 +1451,10 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
|
|||||||
|
|
||||||
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
|
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
|
||||||
|
|
||||||
if (cpumask_empty(newmask))
|
if (cpumask_empty(newmask)) {
|
||||||
|
cs->prs_err = PERR_CPUSEMPTY;
|
||||||
goto invalidate;
|
goto invalidate;
|
||||||
|
}
|
||||||
|
|
||||||
adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
|
adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
|
||||||
deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
|
deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
|
||||||
@@ -1459,10 +1464,15 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
|
|||||||
* not allocated to other partitions and there are effective_cpus
|
* not allocated to other partitions and there are effective_cpus
|
||||||
* left in the top cpuset.
|
* left in the top cpuset.
|
||||||
*/
|
*/
|
||||||
if (adding && (!capable(CAP_SYS_ADMIN) ||
|
if (adding) {
|
||||||
cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)))
|
cs->prs_err = PERR_ACCESS;
|
||||||
goto invalidate;
|
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
|
||||||
|
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
|
||||||
|
cs->prs_err = PERR_NOCPUS;
|
||||||
|
if (cs->prs_err)
|
||||||
|
goto invalidate;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irq(&callback_lock);
|
spin_lock_irq(&callback_lock);
|
||||||
if (adding)
|
if (adding)
|
||||||
@@ -1578,7 +1588,7 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
|
|||||||
* The partcmd_update command is used by update_cpumasks_hier() with newmask
|
* The partcmd_update command is used by update_cpumasks_hier() with newmask
|
||||||
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
|
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
|
||||||
* by update_cpumask() with NULL newmask. In both cases, the callers won't
|
* by update_cpumask() with NULL newmask. In both cases, the callers won't
|
||||||
* check for error and so partition_root_state and prs_error will be updated
|
* check for error and so partition_root_state and prs_err will be updated
|
||||||
* directly.
|
* directly.
|
||||||
*/
|
*/
|
||||||
static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
|
static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
|
||||||
@@ -3726,6 +3736,7 @@ retry:
|
|||||||
|
|
||||||
if (remote && cpumask_empty(&new_cpus) &&
|
if (remote && cpumask_empty(&new_cpus) &&
|
||||||
partition_is_populated(cs, NULL)) {
|
partition_is_populated(cs, NULL)) {
|
||||||
|
cs->prs_err = PERR_HOTPLUG;
|
||||||
remote_partition_disable(cs, tmp);
|
remote_partition_disable(cs, tmp);
|
||||||
compute_effective_cpumask(&new_cpus, cs, parent);
|
compute_effective_cpumask(&new_cpus, cs, parent);
|
||||||
remote = false;
|
remote = false;
|
||||||
|
|||||||
Reference in New Issue
Block a user