Skip to content

Commit 61c62f5

Browse files
arter97hoang1007
authored andcommitted
block: import ssg from dc994bf8fcc71419426e3f88d2908aa990b9d3ab
squash of: block: import ssg from G998USQU5CVDB block: update ssg from S908BXXU2AVF1 block: ssg: return ELEVATOR_DISCARD_MERGE if possible block: ssg: Fix lock inversion between ioc lock and bfqd lock Change-Id: Ie0542c166bdc7df65d3f0527f881c56758a42921 Signed-off-by: Divyanshu-Modi <divyan.m05@gmail.com>
1 parent 66567c0 commit 61c62f5

File tree

5 files changed

+1395
-0
lines changed

5 files changed

+1395
-0
lines changed

block/Kconfig.iosched

+14
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,20 @@ config MQ_IOSCHED_KYBER
8080
synchronous writes, it will self-tune queue depths to achieve that
8181
goal.
8282

83+
config MQ_IOSCHED_SSG
84+
tristate "SamSung Generic I/O scheduler"
85+
default n
86+
---help---
87+
SamSung Generic IO scheduler.
88+
89+
config MQ_IOSCHED_SSG_CGROUP
90+
tristate "Control Group for SamSung Generic I/O scheduler"
91+
default n
92+
depends on BLK_CGROUP
93+
depends on MQ_IOSCHED_SSG
94+
---help---
95+
Control Group for SamSung Generic IO scheduler.
96+
8397
config IOSCHED_BFQ
8498
tristate "BFQ I/O scheduler"
8599
default n

block/Makefile

+3
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
2424
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
2525
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
2626
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
27+
ssg-$(CONFIG_MQ_IOSCHED_SSG) := ssg-iosched.o
28+
ssg-$(CONFIG_MQ_IOSCHED_SSG_CGROUP) += ssg-cgroup.o
29+
obj-$(CONFIG_MQ_IOSCHED_SSG) += ssg.o
2730
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
2831
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
2932

block/ssg-cgroup.c

+263
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,263 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Control Group of SamSung Generic I/O scheduler
4+
*
5+
* Copyright (C) 2021 Changheun Lee <nanich.lee@samsung.com>
6+
*/
7+
8+
#include <linux/blkdev.h>
9+
#include <linux/blk-mq.h>
10+
11+
#include "blk-mq.h"
12+
#include "blk-mq-tag.h"
13+
#include "ssg-cgroup.h"
14+
15+
16+
17+
static struct blkcg_policy ssg_blkcg_policy;
18+
19+
20+
21+
#define CPD_TO_SSG_BLKCG(_cpd) \
22+
container_of_safe((_cpd), struct ssg_blkcg, cpd)
23+
#define BLKCG_TO_SSG_BLKCG(_blkcg) \
24+
CPD_TO_SSG_BLKCG(blkcg_to_cpd((_blkcg), &ssg_blkcg_policy))
25+
26+
#define PD_TO_SSG_BLKG(_pd) \
27+
container_of_safe((_pd), struct ssg_blkg, pd)
28+
#define BLKG_TO_SSG_BLKG(_blkg) \
29+
PD_TO_SSG_BLKG(blkg_to_pd((_blkg), &ssg_blkcg_policy))
30+
31+
#define CSS_TO_SSG_BLKCG(css) BLKCG_TO_SSG_BLKCG(css_to_blkcg(css))
32+
33+
34+
35+
static struct blkcg_policy_data *ssg_blkcg_cpd_alloc(gfp_t gfp)
36+
{
37+
struct ssg_blkcg *ssg_blkcg;
38+
39+
ssg_blkcg = kzalloc(sizeof(struct ssg_blkcg), gfp);
40+
if (ZERO_OR_NULL_PTR(ssg_blkcg))
41+
return NULL;
42+
43+
return &ssg_blkcg->cpd;
44+
}
45+
46+
static void ssg_blkcg_cpd_init(struct blkcg_policy_data *cpd)
47+
{
48+
struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd);
49+
50+
if (IS_ERR_OR_NULL(ssg_blkcg))
51+
return;
52+
53+
ssg_blkcg->max_available_ratio = 100;
54+
}
55+
56+
static void ssg_blkcg_cpd_free(struct blkcg_policy_data *cpd)
57+
{
58+
struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd);
59+
60+
if (IS_ERR_OR_NULL(ssg_blkcg))
61+
return;
62+
63+
kfree(ssg_blkcg);
64+
}
65+
66+
static void ssg_blkcg_set_shallow_depth(struct ssg_blkcg *ssg_blkcg,
67+
struct ssg_blkg *ssg_blkg, struct blk_mq_tags *tags)
68+
{
69+
unsigned int depth = tags->bitmap_tags.sb.depth;
70+
unsigned int map_nr = tags->bitmap_tags.sb.map_nr;
71+
72+
ssg_blkg->max_available_rqs =
73+
depth * ssg_blkcg->max_available_ratio / 100U;
74+
ssg_blkg->shallow_depth =
75+
max_t(unsigned int, 1, ssg_blkg->max_available_rqs / map_nr);
76+
}
77+
78+
static struct blkg_policy_data *ssg_blkcg_pd_alloc(gfp_t gfp,
79+
struct request_queue *q, struct blkcg *blkcg)
80+
{
81+
struct ssg_blkg *ssg_blkg;
82+
83+
ssg_blkg = kzalloc_node(sizeof(struct ssg_blkg), gfp, q->node);
84+
if (ZERO_OR_NULL_PTR(ssg_blkg))
85+
return NULL;
86+
87+
return &ssg_blkg->pd;
88+
}
89+
90+
static void ssg_blkcg_pd_init(struct blkg_policy_data *pd)
91+
{
92+
struct ssg_blkg *ssg_blkg;
93+
struct ssg_blkcg *ssg_blkcg;
94+
95+
ssg_blkg = PD_TO_SSG_BLKG(pd);
96+
if (IS_ERR_OR_NULL(ssg_blkg))
97+
return;
98+
99+
ssg_blkcg = BLKCG_TO_SSG_BLKCG(pd->blkg->blkcg);
100+
if (IS_ERR_OR_NULL(ssg_blkcg))
101+
return;
102+
103+
atomic_set(&ssg_blkg->current_rqs, 0);
104+
ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg,
105+
pd->blkg->q->queue_hw_ctx[0]->sched_tags);
106+
}
107+
108+
static void ssg_blkcg_pd_free(struct blkg_policy_data *pd)
109+
{
110+
struct ssg_blkg *ssg_blkg = PD_TO_SSG_BLKG(pd);
111+
112+
if (IS_ERR_OR_NULL(ssg_blkg))
113+
return;
114+
115+
kfree(ssg_blkg);
116+
}
117+
118+
unsigned int ssg_blkcg_shallow_depth(struct request_queue *q)
119+
{
120+
struct blkcg_gq *blkg;
121+
struct ssg_blkg *ssg_blkg;
122+
123+
rcu_read_lock();
124+
blkg = blkg_lookup(css_to_blkcg(blkcg_css()), q);
125+
ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
126+
rcu_read_unlock();
127+
128+
if (IS_ERR_OR_NULL(ssg_blkg))
129+
return 0;
130+
131+
if (atomic_read(&ssg_blkg->current_rqs) < ssg_blkg->max_available_rqs)
132+
return 0;
133+
134+
return ssg_blkg->shallow_depth;
135+
}
136+
137+
void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx)
138+
{
139+
struct request_queue *q = hctx->queue;
140+
struct cgroup_subsys_state *pos_css;
141+
struct blkcg_gq *blkg;
142+
struct ssg_blkg *ssg_blkg;
143+
struct ssg_blkcg *ssg_blkcg;
144+
145+
rcu_read_lock();
146+
blkg_for_each_descendant_pre(blkg, pos_css, q->root_blkg) {
147+
ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
148+
if (IS_ERR_OR_NULL(ssg_blkg))
149+
continue;
150+
151+
ssg_blkcg = BLKCG_TO_SSG_BLKCG(blkg->blkcg);
152+
if (IS_ERR_OR_NULL(ssg_blkcg))
153+
continue;
154+
155+
atomic_set(&ssg_blkg->current_rqs, 0);
156+
ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, hctx->sched_tags);
157+
}
158+
rcu_read_unlock();
159+
}
160+
161+
void ssg_blkcg_inc_rq(struct blkcg_gq *blkg)
162+
{
163+
struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
164+
165+
if (IS_ERR_OR_NULL(ssg_blkg))
166+
return;
167+
168+
atomic_inc(&ssg_blkg->current_rqs);
169+
}
170+
171+
void ssg_blkcg_dec_rq(struct blkcg_gq *blkg)
172+
{
173+
struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
174+
175+
if (IS_ERR_OR_NULL(ssg_blkg))
176+
return;
177+
178+
atomic_dec(&ssg_blkg->current_rqs);
179+
}
180+
181+
static int ssg_blkcg_show_max_available_ratio(struct seq_file *sf, void *v)
182+
{
183+
struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(seq_css(sf));
184+
185+
if (IS_ERR_OR_NULL(ssg_blkcg))
186+
return -EINVAL;
187+
188+
seq_printf(sf, "%d\n", ssg_blkcg->max_available_ratio);
189+
190+
return 0;
191+
}
192+
193+
static int ssg_blkcg_set_max_available_ratio(struct cgroup_subsys_state *css,
194+
struct cftype *cftype, u64 ratio)
195+
{
196+
struct blkcg *blkcg = css_to_blkcg(css);
197+
struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(css);
198+
struct blkcg_gq *blkg;
199+
struct ssg_blkg *ssg_blkg;
200+
201+
if (IS_ERR_OR_NULL(ssg_blkcg))
202+
return -EINVAL;
203+
204+
if (ratio > 100)
205+
return -EINVAL;
206+
207+
spin_lock_irq(&blkcg->lock);
208+
ssg_blkcg->max_available_ratio = ratio;
209+
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
210+
ssg_blkg = BLKG_TO_SSG_BLKG(blkg);
211+
if (IS_ERR_OR_NULL(ssg_blkg))
212+
continue;
213+
214+
ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg,
215+
blkg->q->queue_hw_ctx[0]->sched_tags);
216+
}
217+
spin_unlock_irq(&blkcg->lock);
218+
219+
return 0;
220+
}
221+
222+
struct cftype ssg_blkg_files[] = {
223+
{
224+
.name = "ssg.max_available_ratio",
225+
.flags = CFTYPE_NOT_ON_ROOT,
226+
.seq_show = ssg_blkcg_show_max_available_ratio,
227+
.write_u64 = ssg_blkcg_set_max_available_ratio,
228+
},
229+
230+
{} /* terminate */
231+
};
232+
233+
static struct blkcg_policy ssg_blkcg_policy = {
234+
.legacy_cftypes = ssg_blkg_files,
235+
236+
.cpd_alloc_fn = ssg_blkcg_cpd_alloc,
237+
.cpd_init_fn = ssg_blkcg_cpd_init,
238+
.cpd_free_fn = ssg_blkcg_cpd_free,
239+
240+
.pd_alloc_fn = ssg_blkcg_pd_alloc,
241+
.pd_init_fn = ssg_blkcg_pd_init,
242+
.pd_free_fn = ssg_blkcg_pd_free,
243+
};
244+
245+
int ssg_blkcg_activate(struct request_queue *q)
246+
{
247+
return blkcg_activate_policy(q, &ssg_blkcg_policy);
248+
}
249+
250+
void ssg_blkcg_deactivate(struct request_queue *q)
251+
{
252+
blkcg_deactivate_policy(q, &ssg_blkcg_policy);
253+
}
254+
255+
int ssg_blkcg_init(void)
256+
{
257+
return blkcg_policy_register(&ssg_blkcg_policy);
258+
}
259+
260+
void ssg_blkcg_exit(void)
261+
{
262+
blkcg_policy_unregister(&ssg_blkcg_policy);
263+
}

block/ssg-cgroup.h

+65
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef SSG_CGROUP_H
3+
#define SSG_CGROUP_H
4+
#include <linux/blk-cgroup.h>
5+
6+
#if IS_ENABLED(CONFIG_MQ_IOSCHED_SSG_CGROUP)
7+
struct ssg_blkcg {
8+
struct blkcg_policy_data cpd; /* must be the first member */
9+
10+
int max_available_ratio;
11+
};
12+
13+
struct ssg_blkg {
14+
struct blkg_policy_data pd; /* must be the first member */
15+
16+
atomic_t current_rqs;
17+
int max_available_rqs;
18+
unsigned int shallow_depth; /* shallow depth for each tag map to get sched tag */
19+
};
20+
21+
extern int ssg_blkcg_init(void);
22+
extern void ssg_blkcg_exit(void);
23+
extern int ssg_blkcg_activate(struct request_queue *q);
24+
extern void ssg_blkcg_deactivate(struct request_queue *q);
25+
extern unsigned int ssg_blkcg_shallow_depth(struct request_queue *q);
26+
extern void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx);
27+
extern void ssg_blkcg_inc_rq(struct blkcg_gq *blkg);
28+
extern void ssg_blkcg_dec_rq(struct blkcg_gq *blkg);
29+
#else
30+
int ssg_blkcg_init(void)
31+
{
32+
return 0;
33+
}
34+
void ssg_blkcg_exit(void)
35+
{
36+
}
37+
38+
int ssg_blkcg_activate(struct request_queue *q)
39+
{
40+
return 0;
41+
}
42+
43+
void ssg_blkcg_deactivate(struct request_queue *q)
44+
{
45+
}
46+
47+
unsigned int ssg_blkcg_shallow_depth(struct request_queue *q)
48+
{
49+
return 0;
50+
}
51+
52+
void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx)
53+
{
54+
}
55+
56+
void ssg_blkcg_inc_rq(struct blkcg_gq *blkg)
57+
{
58+
}
59+
60+
void ssg_blkcg_dec_rq(struct blkcg_gq *blkg)
61+
{
62+
}
63+
#endif
64+
65+
#endif

0 commit comments

Comments
 (0)