sched: Fix double_rq_lock() compile warning
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
0cc6d77e55
commit
b78bb868c5
1 changed files with 2 additions and 2 deletions
|
@ -119,8 +119,6 @@
|
||||||
*/
|
*/
|
||||||
#define RUNTIME_INF ((u64)~0ULL)
|
#define RUNTIME_INF ((u64)~0ULL)
|
||||||
|
|
||||||
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
|
|
||||||
|
|
||||||
static inline int rt_policy(int policy)
|
static inline int rt_policy(int policy)
|
||||||
{
|
{
|
||||||
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
|
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
|
||||||
|
@ -1695,6 +1693,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
|
|
||||||
|
static void double_rq_lock(struct rq *rq1, struct rq *rq2);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* fair double_lock_balance: Safely acquires both rq->locks in a fair
|
* fair double_lock_balance: Safely acquires both rq->locks in a fair
|
||||||
* way at the expense of forcing extra atomic operations in all
|
* way at the expense of forcing extra atomic operations in all
|
||||||
|
|
Loading…
Add table
Reference in a new issue