|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Control Group of SamSung Generic I/O scheduler |
| 4 | + * |
| 5 | + * Copyright (C) 2021 Changheun Lee <nanich.lee@samsung.com> |
| 6 | + */ |
| 7 | + |
| 8 | +#include <linux/blkdev.h> |
| 9 | +#include <linux/blk-mq.h> |
| 10 | + |
| 11 | +#include "blk-mq.h" |
| 12 | +#include "blk-mq-tag.h" |
| 13 | +#include "ssg-cgroup.h" |
| 14 | + |
| 15 | + |
| 16 | + |
| 17 | +static struct blkcg_policy ssg_blkcg_policy; |
| 18 | + |
| 19 | + |
| 20 | + |
| 21 | +#define CPD_TO_SSG_BLKCG(_cpd) \ |
| 22 | + container_of_safe((_cpd), struct ssg_blkcg, cpd) |
| 23 | +#define BLKCG_TO_SSG_BLKCG(_blkcg) \ |
| 24 | + CPD_TO_SSG_BLKCG(blkcg_to_cpd((_blkcg), &ssg_blkcg_policy)) |
| 25 | + |
| 26 | +#define PD_TO_SSG_BLKG(_pd) \ |
| 27 | + container_of_safe((_pd), struct ssg_blkg, pd) |
| 28 | +#define BLKG_TO_SSG_BLKG(_blkg) \ |
| 29 | + PD_TO_SSG_BLKG(blkg_to_pd((_blkg), &ssg_blkcg_policy)) |
| 30 | + |
| 31 | +#define CSS_TO_SSG_BLKCG(css) BLKCG_TO_SSG_BLKCG(css_to_blkcg(css)) |
| 32 | + |
| 33 | + |
| 34 | + |
| 35 | +static struct blkcg_policy_data *ssg_blkcg_cpd_alloc(gfp_t gfp) |
| 36 | +{ |
| 37 | + struct ssg_blkcg *ssg_blkcg; |
| 38 | + |
| 39 | + ssg_blkcg = kzalloc(sizeof(struct ssg_blkcg), gfp); |
| 40 | + if (ZERO_OR_NULL_PTR(ssg_blkcg)) |
| 41 | + return NULL; |
| 42 | + |
| 43 | + return &ssg_blkcg->cpd; |
| 44 | +} |
| 45 | + |
| 46 | +static void ssg_blkcg_cpd_init(struct blkcg_policy_data *cpd) |
| 47 | +{ |
| 48 | + struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd); |
| 49 | + |
| 50 | + if (IS_ERR_OR_NULL(ssg_blkcg)) |
| 51 | + return; |
| 52 | + |
| 53 | + ssg_blkcg->max_available_ratio = 100; |
| 54 | +} |
| 55 | + |
| 56 | +static void ssg_blkcg_cpd_free(struct blkcg_policy_data *cpd) |
| 57 | +{ |
| 58 | + struct ssg_blkcg *ssg_blkcg = CPD_TO_SSG_BLKCG(cpd); |
| 59 | + |
| 60 | + if (IS_ERR_OR_NULL(ssg_blkcg)) |
| 61 | + return; |
| 62 | + |
| 63 | + kfree(ssg_blkcg); |
| 64 | +} |
| 65 | + |
| 66 | +static void ssg_blkcg_set_shallow_depth(struct ssg_blkcg *ssg_blkcg, |
| 67 | + struct ssg_blkg *ssg_blkg, struct blk_mq_tags *tags) |
| 68 | +{ |
| 69 | + unsigned int depth = tags->bitmap_tags.sb.depth; |
| 70 | + unsigned int map_nr = tags->bitmap_tags.sb.map_nr; |
| 71 | + |
| 72 | + ssg_blkg->max_available_rqs = |
| 73 | + depth * ssg_blkcg->max_available_ratio / 100U; |
| 74 | + ssg_blkg->shallow_depth = |
| 75 | + max_t(unsigned int, 1, ssg_blkg->max_available_rqs / map_nr); |
| 76 | +} |
| 77 | + |
| 78 | +static struct blkg_policy_data *ssg_blkcg_pd_alloc(gfp_t gfp, |
| 79 | + struct request_queue *q, struct blkcg *blkcg) |
| 80 | +{ |
| 81 | + struct ssg_blkg *ssg_blkg; |
| 82 | + |
| 83 | + ssg_blkg = kzalloc_node(sizeof(struct ssg_blkg), gfp, q->node); |
| 84 | + if (ZERO_OR_NULL_PTR(ssg_blkg)) |
| 85 | + return NULL; |
| 86 | + |
| 87 | + return &ssg_blkg->pd; |
| 88 | +} |
| 89 | + |
| 90 | +static void ssg_blkcg_pd_init(struct blkg_policy_data *pd) |
| 91 | +{ |
| 92 | + struct ssg_blkg *ssg_blkg; |
| 93 | + struct ssg_blkcg *ssg_blkcg; |
| 94 | + |
| 95 | + ssg_blkg = PD_TO_SSG_BLKG(pd); |
| 96 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 97 | + return; |
| 98 | + |
| 99 | + ssg_blkcg = BLKCG_TO_SSG_BLKCG(pd->blkg->blkcg); |
| 100 | + if (IS_ERR_OR_NULL(ssg_blkcg)) |
| 101 | + return; |
| 102 | + |
| 103 | + atomic_set(&ssg_blkg->current_rqs, 0); |
| 104 | + ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, |
| 105 | + pd->blkg->q->queue_hw_ctx[0]->sched_tags); |
| 106 | +} |
| 107 | + |
| 108 | +static void ssg_blkcg_pd_free(struct blkg_policy_data *pd) |
| 109 | +{ |
| 110 | + struct ssg_blkg *ssg_blkg = PD_TO_SSG_BLKG(pd); |
| 111 | + |
| 112 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 113 | + return; |
| 114 | + |
| 115 | + kfree(ssg_blkg); |
| 116 | +} |
| 117 | + |
| 118 | +unsigned int ssg_blkcg_shallow_depth(struct request_queue *q) |
| 119 | +{ |
| 120 | + struct blkcg_gq *blkg; |
| 121 | + struct ssg_blkg *ssg_blkg; |
| 122 | + |
| 123 | + rcu_read_lock(); |
| 124 | + blkg = blkg_lookup(css_to_blkcg(blkcg_css()), q); |
| 125 | + ssg_blkg = BLKG_TO_SSG_BLKG(blkg); |
| 126 | + rcu_read_unlock(); |
| 127 | + |
| 128 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 129 | + return 0; |
| 130 | + |
| 131 | + if (atomic_read(&ssg_blkg->current_rqs) < ssg_blkg->max_available_rqs) |
| 132 | + return 0; |
| 133 | + |
| 134 | + return ssg_blkg->shallow_depth; |
| 135 | +} |
| 136 | + |
| 137 | +void ssg_blkcg_depth_updated(struct blk_mq_hw_ctx *hctx) |
| 138 | +{ |
| 139 | + struct request_queue *q = hctx->queue; |
| 140 | + struct cgroup_subsys_state *pos_css; |
| 141 | + struct blkcg_gq *blkg; |
| 142 | + struct ssg_blkg *ssg_blkg; |
| 143 | + struct ssg_blkcg *ssg_blkcg; |
| 144 | + |
| 145 | + rcu_read_lock(); |
| 146 | + blkg_for_each_descendant_pre(blkg, pos_css, q->root_blkg) { |
| 147 | + ssg_blkg = BLKG_TO_SSG_BLKG(blkg); |
| 148 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 149 | + continue; |
| 150 | + |
| 151 | + ssg_blkcg = BLKCG_TO_SSG_BLKCG(blkg->blkcg); |
| 152 | + if (IS_ERR_OR_NULL(ssg_blkcg)) |
| 153 | + continue; |
| 154 | + |
| 155 | + atomic_set(&ssg_blkg->current_rqs, 0); |
| 156 | + ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, hctx->sched_tags); |
| 157 | + } |
| 158 | + rcu_read_unlock(); |
| 159 | +} |
| 160 | + |
| 161 | +void ssg_blkcg_inc_rq(struct blkcg_gq *blkg) |
| 162 | +{ |
| 163 | + struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg); |
| 164 | + |
| 165 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 166 | + return; |
| 167 | + |
| 168 | + atomic_inc(&ssg_blkg->current_rqs); |
| 169 | +} |
| 170 | + |
| 171 | +void ssg_blkcg_dec_rq(struct blkcg_gq *blkg) |
| 172 | +{ |
| 173 | + struct ssg_blkg *ssg_blkg = BLKG_TO_SSG_BLKG(blkg); |
| 174 | + |
| 175 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 176 | + return; |
| 177 | + |
| 178 | + atomic_dec(&ssg_blkg->current_rqs); |
| 179 | +} |
| 180 | + |
| 181 | +static int ssg_blkcg_show_max_available_ratio(struct seq_file *sf, void *v) |
| 182 | +{ |
| 183 | + struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(seq_css(sf)); |
| 184 | + |
| 185 | + if (IS_ERR_OR_NULL(ssg_blkcg)) |
| 186 | + return -EINVAL; |
| 187 | + |
| 188 | + seq_printf(sf, "%d\n", ssg_blkcg->max_available_ratio); |
| 189 | + |
| 190 | + return 0; |
| 191 | +} |
| 192 | + |
| 193 | +static int ssg_blkcg_set_max_available_ratio(struct cgroup_subsys_state *css, |
| 194 | + struct cftype *cftype, u64 ratio) |
| 195 | +{ |
| 196 | + struct blkcg *blkcg = css_to_blkcg(css); |
| 197 | + struct ssg_blkcg *ssg_blkcg = CSS_TO_SSG_BLKCG(css); |
| 198 | + struct blkcg_gq *blkg; |
| 199 | + struct ssg_blkg *ssg_blkg; |
| 200 | + |
| 201 | + if (IS_ERR_OR_NULL(ssg_blkcg)) |
| 202 | + return -EINVAL; |
| 203 | + |
| 204 | + if (ratio > 100) |
| 205 | + return -EINVAL; |
| 206 | + |
| 207 | + spin_lock_irq(&blkcg->lock); |
| 208 | + ssg_blkcg->max_available_ratio = ratio; |
| 209 | + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
| 210 | + ssg_blkg = BLKG_TO_SSG_BLKG(blkg); |
| 211 | + if (IS_ERR_OR_NULL(ssg_blkg)) |
| 212 | + continue; |
| 213 | + |
| 214 | + ssg_blkcg_set_shallow_depth(ssg_blkcg, ssg_blkg, |
| 215 | + blkg->q->queue_hw_ctx[0]->sched_tags); |
| 216 | + } |
| 217 | + spin_unlock_irq(&blkcg->lock); |
| 218 | + |
| 219 | + return 0; |
| 220 | +} |
| 221 | + |
| 222 | +struct cftype ssg_blkg_files[] = { |
| 223 | + { |
| 224 | + .name = "ssg.max_available_ratio", |
| 225 | + .flags = CFTYPE_NOT_ON_ROOT, |
| 226 | + .seq_show = ssg_blkcg_show_max_available_ratio, |
| 227 | + .write_u64 = ssg_blkcg_set_max_available_ratio, |
| 228 | + }, |
| 229 | + |
| 230 | + {} /* terminate */ |
| 231 | +}; |
| 232 | + |
| 233 | +static struct blkcg_policy ssg_blkcg_policy = { |
| 234 | + .legacy_cftypes = ssg_blkg_files, |
| 235 | + |
| 236 | + .cpd_alloc_fn = ssg_blkcg_cpd_alloc, |
| 237 | + .cpd_init_fn = ssg_blkcg_cpd_init, |
| 238 | + .cpd_free_fn = ssg_blkcg_cpd_free, |
| 239 | + |
| 240 | + .pd_alloc_fn = ssg_blkcg_pd_alloc, |
| 241 | + .pd_init_fn = ssg_blkcg_pd_init, |
| 242 | + .pd_free_fn = ssg_blkcg_pd_free, |
| 243 | +}; |
| 244 | + |
| 245 | +int ssg_blkcg_activate(struct request_queue *q) |
| 246 | +{ |
| 247 | + return blkcg_activate_policy(q, &ssg_blkcg_policy); |
| 248 | +} |
| 249 | + |
| 250 | +void ssg_blkcg_deactivate(struct request_queue *q) |
| 251 | +{ |
| 252 | + blkcg_deactivate_policy(q, &ssg_blkcg_policy); |
| 253 | +} |
| 254 | + |
| 255 | +int ssg_blkcg_init(void) |
| 256 | +{ |
| 257 | + return blkcg_policy_register(&ssg_blkcg_policy); |
| 258 | +} |
| 259 | + |
| 260 | +void ssg_blkcg_exit(void) |
| 261 | +{ |
| 262 | + blkcg_policy_unregister(&ssg_blkcg_policy); |
| 263 | +} |
0 commit comments