1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Clock Protocol
5 * Copyright (C) 2018-2022 ARM Ltd.
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
12 #include "protocols.h"
15 /* Updated only after ALL the mandatory features for that version are merged */
16 #define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
18 enum scmi_clock_protocol_cmd {
19 CLOCK_ATTRIBUTES = 0x3,
20 CLOCK_DESCRIBE_RATES = 0x4,
23 CLOCK_CONFIG_SET = 0x7,
25 CLOCK_RATE_NOTIFY = 0x9,
26 CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
27 CLOCK_CONFIG_GET = 0xB,
28 CLOCK_POSSIBLE_PARENTS_GET = 0xC,
29 CLOCK_PARENT_SET = 0xD,
30 CLOCK_PARENT_GET = 0xE,
40 struct scmi_msg_resp_clock_protocol_attributes {
46 struct scmi_msg_resp_clock_attributes {
48 #define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
49 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
50 #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
51 #define SUPPORTS_PARENT_CLOCK(x) ((x) & BIT(28))
52 u8 name[SCMI_SHORT_NAME_MAX_SIZE];
53 __le32 clock_enable_latency;
56 struct scmi_msg_clock_possible_parents {
61 struct scmi_msg_resp_clock_possible_parents {
62 __le32 num_parent_flags;
63 #define NUM_PARENTS_RETURNED(x) ((x) & 0xff)
64 #define NUM_PARENTS_REMAINING(x) ((x) >> 24)
65 __le32 possible_parents[];
68 struct scmi_msg_clock_set_parent {
73 struct scmi_msg_clock_config_set {
78 /* Valid only from SCMI clock v2.1 */
79 struct scmi_msg_clock_config_set_v2 {
82 #define NULL_OEM_TYPE 0
83 #define REGMASK_OEM_TYPE_SET GENMASK(23, 16)
84 #define REGMASK_CLK_STATE GENMASK(1, 0)
85 __le32 oem_config_val;
88 struct scmi_msg_clock_config_get {
91 #define REGMASK_OEM_TYPE_GET GENMASK(7, 0)
94 struct scmi_msg_resp_clock_config_get {
97 #define IS_CLK_ENABLED(x) le32_get_bits((x), BIT(0))
98 __le32 oem_config_val;
101 struct scmi_msg_clock_describe_rates {
106 struct scmi_msg_resp_clock_describe_rates {
107 __le32 num_rates_flags;
108 #define NUM_RETURNED(x) ((x) & 0xfff)
109 #define RATE_DISCRETE(x) !((x) & BIT(12))
110 #define NUM_REMAINING(x) ((x) >> 16)
115 #define RATE_TO_U64(X) \
118 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
122 struct scmi_clock_set_rate {
124 #define CLOCK_SET_ASYNC BIT(0)
125 #define CLOCK_SET_IGNORE_RESP BIT(1)
126 #define CLOCK_SET_ROUND_UP BIT(2)
127 #define CLOCK_SET_ROUND_AUTO BIT(3)
133 struct scmi_msg_resp_set_rate_complete {
139 struct scmi_msg_clock_rate_notify {
141 __le32 notify_enable;
144 struct scmi_clock_rate_notify_payld {
155 atomic_t cur_async_req;
156 struct scmi_clock_info *clk;
157 int (*clock_config_set)(const struct scmi_protocol_handle *ph,
158 u32 clk_id, enum clk_state state,
159 u8 oem_type, u32 oem_val, bool atomic);
160 int (*clock_config_get)(const struct scmi_protocol_handle *ph,
161 u32 clk_id, u8 oem_type, u32 *attributes,
162 bool *enabled, u32 *oem_val, bool atomic);
165 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
167 CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
171 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
172 struct clock_info *ci)
176 struct scmi_msg_resp_clock_protocol_attributes *attr;
178 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
179 0, sizeof(*attr), &t);
185 ret = ph->xops->do_xfer(ph, t);
187 ci->num_clocks = le16_to_cpu(attr->num_clocks);
188 ci->max_async_req = attr->max_async_req;
191 ph->xops->xfer_put(ph, t);
195 struct scmi_clk_ipriv {
198 struct scmi_clock_info *clk;
201 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
204 struct scmi_msg_clock_possible_parents *msg = message;
205 const struct scmi_clk_ipriv *p = priv;
207 msg->id = cpu_to_le32(p->clk_id);
208 /* Set the number of OPPs to be skipped/already read */
209 msg->skip_parents = cpu_to_le32(desc_index);
212 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
213 const void *response, void *priv)
215 const struct scmi_msg_resp_clock_possible_parents *r = response;
216 struct scmi_clk_ipriv *p = priv;
217 struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
220 flags = le32_to_cpu(r->num_parent_flags);
221 st->num_returned = NUM_PARENTS_RETURNED(flags);
222 st->num_remaining = NUM_PARENTS_REMAINING(flags);
225 * num parents is not declared previously anywhere so we
226 * assume it's returned+remaining on first call.
228 if (!st->max_resources) {
229 p->clk->num_parents = st->num_returned + st->num_remaining;
230 p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
231 sizeof(*p->clk->parents),
233 if (!p->clk->parents) {
234 p->clk->num_parents = 0;
237 st->max_resources = st->num_returned + st->num_remaining;
243 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
244 const void *response,
245 struct scmi_iterator_state *st,
248 const struct scmi_msg_resp_clock_possible_parents *r = response;
249 struct scmi_clk_ipriv *p = priv;
251 u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
253 *parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
258 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
259 struct scmi_clock_info *clk)
261 struct scmi_iterator_ops ops = {
262 .prepare_message = iter_clk_possible_parents_prepare_message,
263 .update_state = iter_clk_possible_parents_update_state,
264 .process_response = iter_clk_possible_parents_process_response,
267 struct scmi_clk_ipriv ppriv = {
275 iter = ph->hops->iter_response_init(ph, &ops, 0,
276 CLOCK_POSSIBLE_PARENTS_GET,
277 sizeof(struct scmi_msg_clock_possible_parents),
280 return PTR_ERR(iter);
282 ret = ph->hops->iter_response_run(iter);
287 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
288 u32 clk_id, struct scmi_clock_info *clk,
294 struct scmi_msg_resp_clock_attributes *attr;
296 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
297 sizeof(clk_id), sizeof(*attr), &t);
301 put_unaligned_le32(clk_id, t->tx.buf);
304 ret = ph->xops->do_xfer(ph, t);
307 attributes = le32_to_cpu(attr->attributes);
308 strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
309 /* clock_enable_latency field is present only since SCMI v3.1 */
310 if (PROTOCOL_REV_MAJOR(version) >= 0x2)
311 latency = le32_to_cpu(attr->clock_enable_latency);
312 clk->enable_latency = latency ? : U32_MAX;
315 ph->xops->xfer_put(ph, t);
318 * If supported overwrite short name with the extended one;
319 * on error just carry on and use already provided short name.
321 if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
322 if (SUPPORTS_EXTENDED_NAMES(attributes))
323 ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
327 if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
328 clk->rate_changed_notifications = true;
329 if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
330 clk->rate_change_requested_notifications = true;
331 if (SUPPORTS_PARENT_CLOCK(attributes))
332 scmi_clock_possible_parents(ph, clk_id, clk);
338 static int rate_cmp_func(const void *_r1, const void *_r2)
340 const u64 *r1 = _r1, *r2 = _r2;
350 static void iter_clk_describe_prepare_message(void *message,
351 const unsigned int desc_index,
354 struct scmi_msg_clock_describe_rates *msg = message;
355 const struct scmi_clk_ipriv *p = priv;
357 msg->id = cpu_to_le32(p->clk_id);
358 /* Set the number of rates to be skipped/already read */
359 msg->rate_index = cpu_to_le32(desc_index);
363 iter_clk_describe_update_state(struct scmi_iterator_state *st,
364 const void *response, void *priv)
367 struct scmi_clk_ipriv *p = priv;
368 const struct scmi_msg_resp_clock_describe_rates *r = response;
370 flags = le32_to_cpu(r->num_rates_flags);
371 st->num_remaining = NUM_REMAINING(flags);
372 st->num_returned = NUM_RETURNED(flags);
373 p->clk->rate_discrete = RATE_DISCRETE(flags);
375 /* Warn about out of spec replies ... */
376 if (!p->clk->rate_discrete &&
377 (st->num_returned != 3 || st->num_remaining != 0)) {
379 "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
380 p->clk->name, st->num_returned, st->num_remaining,
384 * A known quirk: a triplet is returned but num_returned != 3
385 * Check for a safe payload size and fix.
387 if (st->num_returned != 3 && st->num_remaining == 0 &&
388 st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
389 st->num_returned = 3;
390 st->num_remaining = 0;
393 "Cannot fix out-of-spec reply !\n");
402 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
403 const void *response,
404 struct scmi_iterator_state *st, void *priv)
407 struct scmi_clk_ipriv *p = priv;
408 const struct scmi_msg_resp_clock_describe_rates *r = response;
410 if (!p->clk->rate_discrete) {
411 switch (st->desc_index + st->loop_idx) {
413 p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
416 p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
419 p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
426 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
428 *rate = RATE_TO_U64(r->rate[st->loop_idx]);
429 p->clk->list.num_rates++;
436 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
437 struct scmi_clock_info *clk)
441 struct scmi_iterator_ops ops = {
442 .prepare_message = iter_clk_describe_prepare_message,
443 .update_state = iter_clk_describe_update_state,
444 .process_response = iter_clk_describe_process_response,
446 struct scmi_clk_ipriv cpriv = {
452 iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
453 CLOCK_DESCRIBE_RATES,
454 sizeof(struct scmi_msg_clock_describe_rates),
457 return PTR_ERR(iter);
459 ret = ph->hops->iter_response_run(iter);
463 if (!clk->rate_discrete) {
464 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
465 clk->range.min_rate, clk->range.max_rate,
466 clk->range.step_size);
467 } else if (clk->list.num_rates) {
468 sort(clk->list.rates, clk->list.num_rates,
469 sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
476 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
477 u32 clk_id, u64 *value)
482 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
483 sizeof(__le32), sizeof(u64), &t);
487 put_unaligned_le32(clk_id, t->tx.buf);
489 ret = ph->xops->do_xfer(ph, t);
491 *value = get_unaligned_le64(t->rx.buf);
493 ph->xops->xfer_put(ph, t);
497 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
498 u32 clk_id, u64 rate)
503 struct scmi_clock_set_rate *cfg;
504 struct clock_info *ci = ph->get_priv(ph);
506 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
510 if (ci->max_async_req &&
511 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
512 flags |= CLOCK_SET_ASYNC;
515 cfg->flags = cpu_to_le32(flags);
516 cfg->id = cpu_to_le32(clk_id);
517 cfg->value_low = cpu_to_le32(rate & 0xffffffff);
518 cfg->value_high = cpu_to_le32(rate >> 32);
520 if (flags & CLOCK_SET_ASYNC) {
521 ret = ph->xops->do_xfer_with_response(ph, t);
523 struct scmi_msg_resp_set_rate_complete *resp;
526 if (le32_to_cpu(resp->id) == clk_id)
528 "Clk ID %d set async to %llu\n", clk_id,
529 get_unaligned_le64(&resp->rate_low));
534 ret = ph->xops->do_xfer(ph, t);
537 if (ci->max_async_req)
538 atomic_dec(&ci->cur_async_req);
540 ph->xops->xfer_put(ph, t);
545 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
546 enum clk_state state, u8 __unused0, u32 __unused1,
551 struct scmi_msg_clock_config_set *cfg;
553 if (state >= CLK_STATE_RESERVED)
556 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
557 sizeof(*cfg), 0, &t);
561 t->hdr.poll_completion = atomic;
564 cfg->id = cpu_to_le32(clk_id);
565 cfg->attributes = cpu_to_le32(state);
567 ret = ph->xops->do_xfer(ph, t);
569 ph->xops->xfer_put(ph, t);
574 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
579 struct scmi_msg_clock_set_parent *cfg;
580 struct clock_info *ci = ph->get_priv(ph);
581 struct scmi_clock_info *clk;
583 if (clk_id >= ci->num_clocks)
586 clk = ci->clk + clk_id;
588 if (parent_id >= clk->num_parents)
591 ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
592 sizeof(*cfg), 0, &t);
596 t->hdr.poll_completion = false;
599 cfg->id = cpu_to_le32(clk_id);
600 cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
602 ret = ph->xops->do_xfer(ph, t);
604 ph->xops->xfer_put(ph, t);
610 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
616 ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
617 sizeof(__le32), sizeof(u32), &t);
621 put_unaligned_le32(clk_id, t->tx.buf);
623 ret = ph->xops->do_xfer(ph, t);
625 *parent_id = get_unaligned_le32(t->rx.buf);
627 ph->xops->xfer_put(ph, t);
631 /* For SCMI clock v2.1 and onwards */
633 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
634 enum clk_state state, u8 oem_type, u32 oem_val,
640 struct scmi_msg_clock_config_set_v2 *cfg;
642 if (state == CLK_STATE_RESERVED ||
643 (!oem_type && state == CLK_STATE_UNCHANGED))
646 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
647 sizeof(*cfg), 0, &t);
651 t->hdr.poll_completion = atomic;
653 attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
654 FIELD_PREP(REGMASK_CLK_STATE, state);
657 cfg->id = cpu_to_le32(clk_id);
658 cfg->attributes = cpu_to_le32(attrs);
659 /* Clear in any case */
660 cfg->oem_config_val = cpu_to_le32(0);
662 cfg->oem_config_val = cpu_to_le32(oem_val);
664 ret = ph->xops->do_xfer(ph, t);
666 ph->xops->xfer_put(ph, t);
670 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
673 struct clock_info *ci = ph->get_priv(ph);
675 return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
676 NULL_OEM_TYPE, 0, atomic);
679 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
682 struct clock_info *ci = ph->get_priv(ph);
684 return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
685 NULL_OEM_TYPE, 0, atomic);
688 /* For SCMI clock v2.1 and onwards */
690 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
691 u8 oem_type, u32 *attributes, bool *enabled,
692 u32 *oem_val, bool atomic)
697 struct scmi_msg_clock_config_get *cfg;
699 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
700 sizeof(*cfg), 0, &t);
704 t->hdr.poll_completion = atomic;
706 flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
709 cfg->id = cpu_to_le32(clk_id);
710 cfg->flags = cpu_to_le32(flags);
712 ret = ph->xops->do_xfer(ph, t);
714 struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
717 *attributes = le32_to_cpu(resp->attributes);
720 *enabled = IS_CLK_ENABLED(resp->config);
722 if (oem_val && oem_type)
723 *oem_val = le32_to_cpu(resp->oem_config_val);
726 ph->xops->xfer_put(ph, t);
732 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
733 u8 oem_type, u32 *attributes, bool *enabled,
734 u32 *oem_val, bool atomic)
738 struct scmi_msg_resp_clock_attributes *resp;
743 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
744 sizeof(clk_id), sizeof(*resp), &t);
748 t->hdr.poll_completion = atomic;
749 put_unaligned_le32(clk_id, t->tx.buf);
752 ret = ph->xops->do_xfer(ph, t);
754 *enabled = IS_CLK_ENABLED(resp->attributes);
756 ph->xops->xfer_put(ph, t);
761 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
762 u32 clk_id, bool *enabled, bool atomic)
764 struct clock_info *ci = ph->get_priv(ph);
766 return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
767 enabled, NULL, atomic);
770 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
771 u32 clk_id, u8 oem_type, u32 oem_val,
774 struct clock_info *ci = ph->get_priv(ph);
776 return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
777 oem_type, oem_val, atomic);
780 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
781 u32 clk_id, u8 oem_type, u32 *oem_val,
782 u32 *attributes, bool atomic)
784 struct clock_info *ci = ph->get_priv(ph);
786 return ci->clock_config_get(ph, clk_id, oem_type, attributes,
787 NULL, oem_val, atomic);
790 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
792 struct clock_info *ci = ph->get_priv(ph);
794 return ci->num_clocks;
797 static const struct scmi_clock_info *
798 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
800 struct scmi_clock_info *clk;
801 struct clock_info *ci = ph->get_priv(ph);
803 if (clk_id >= ci->num_clocks)
806 clk = ci->clk + clk_id;
813 static const struct scmi_clk_proto_ops clk_proto_ops = {
814 .count_get = scmi_clock_count_get,
815 .info_get = scmi_clock_info_get,
816 .rate_get = scmi_clock_rate_get,
817 .rate_set = scmi_clock_rate_set,
818 .enable = scmi_clock_enable,
819 .disable = scmi_clock_disable,
820 .state_get = scmi_clock_state_get,
821 .config_oem_get = scmi_clock_config_oem_get,
822 .config_oem_set = scmi_clock_config_oem_set,
823 .parent_set = scmi_clock_set_parent,
824 .parent_get = scmi_clock_get_parent,
827 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
828 u32 clk_id, int message_id, bool enable)
832 struct scmi_msg_clock_rate_notify *notify;
834 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
839 notify->clk_id = cpu_to_le32(clk_id);
840 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
842 ret = ph->xops->do_xfer(ph, t);
844 ph->xops->xfer_put(ph, t);
848 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
849 u8 evt_id, u32 src_id, bool enable)
853 if (evt_id >= ARRAY_SIZE(evt_2_cmd))
856 cmd_id = evt_2_cmd[evt_id];
857 ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
859 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
860 evt_id, src_id, ret);
865 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
866 u8 evt_id, ktime_t timestamp,
867 const void *payld, size_t payld_sz,
868 void *report, u32 *src_id)
870 const struct scmi_clock_rate_notify_payld *p = payld;
871 struct scmi_clock_rate_notif_report *r = report;
873 if (sizeof(*p) != payld_sz ||
874 (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
875 evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
878 r->timestamp = timestamp;
879 r->agent_id = le32_to_cpu(p->agent_id);
880 r->clock_id = le32_to_cpu(p->clock_id);
881 r->rate = get_unaligned_le64(&p->rate_low);
882 *src_id = r->clock_id;
887 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
889 struct clock_info *ci = ph->get_priv(ph);
894 return ci->num_clocks;
897 static const struct scmi_event clk_events[] = {
899 .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
900 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
901 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
904 .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
905 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
906 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
910 static const struct scmi_event_ops clk_event_ops = {
911 .get_num_sources = scmi_clk_get_num_sources,
912 .set_notify_enabled = scmi_clk_set_notify_enabled,
913 .fill_custom_report = scmi_clk_fill_custom_report,
916 static const struct scmi_protocol_events clk_protocol_events = {
917 .queue_sz = SCMI_PROTO_QUEUE_SZ,
918 .ops = &clk_event_ops,
920 .num_events = ARRAY_SIZE(clk_events),
923 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
927 struct clock_info *cinfo;
929 ret = ph->xops->version_get(ph, &version);
933 dev_dbg(ph->dev, "Clock Version %d.%d\n",
934 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
936 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
940 ret = scmi_clock_protocol_attributes_get(ph, cinfo);
944 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
945 sizeof(*cinfo->clk), GFP_KERNEL);
949 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
950 struct scmi_clock_info *clk = cinfo->clk + clkid;
952 ret = scmi_clock_attributes_get(ph, clkid, clk, version);
954 scmi_clock_describe_rates_get(ph, clkid, clk);
957 if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
958 cinfo->clock_config_set = scmi_clock_config_set_v2;
959 cinfo->clock_config_get = scmi_clock_config_get_v2;
961 cinfo->clock_config_set = scmi_clock_config_set;
962 cinfo->clock_config_get = scmi_clock_config_get;
965 cinfo->version = version;
966 return ph->set_priv(ph, cinfo, version);
969 static const struct scmi_protocol scmi_clock = {
970 .id = SCMI_PROTOCOL_CLOCK,
971 .owner = THIS_MODULE,
972 .instance_init = &scmi_clock_protocol_init,
973 .ops = &clk_proto_ops,
974 .events = &clk_protocol_events,
975 .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
978 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)