diff mbox

[RFC,1/2] occ: Add support for OPAL-OCC command/response interface

Message ID 1496637615-11396-2-git-send-email-shilpa.bhat@linux.vnet.ibm.com
State Superseded
Headers show

Commit Message

Shilpasri G Bhat June 5, 2017, 4:40 a.m. UTC
This patch adds support for a shared memory based command/response
interface between OCC and OPAL. In HOMER, there is an OPAL command
buffer and an OCC response buffer which is used to send inband
commands to OCC.

The OPAL-OCC command/response sequence is as follows:

1. Check if both 'OCC Progress' bit in OCC response flag and 'Cmd Ready'
   bit in OPAL command flag are set to zero. If yes then proceed with
   below steps to send a command to OCC.
2. Write the command value, request ID and command specific data
   to the OPAL command buffer.
3. Clear the response flag and set the 'Cmd Ready' bit in OPAL command
   flag to indicate command is ready.
4. OCC will poll the command flag every 4ms to check if 'Cmd Ready' bit
   is set by OPAL. If the bit is set then OCC will set the 'OCC Progress'
   bit.
5. OCC will process the command and write the response to the OCC response
   buffer and set the 'Rsp Ready' bit in the response flag and sends an
   interrupt.
8. OPAL will receive the interrupt and queue the response to the host.

Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
---
 hw/occ.c           | 453 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
 include/opal-api.h |  68 +++++++-
 2 files changed, 516 insertions(+), 5 deletions(-)

Comments

Cyril Bur June 6, 2017, 5:41 a.m. UTC | #1
On Mon, 2017-06-05 at 10:10 +0530, Shilpasri G Bhat wrote:
> This patch adds support for a shared memory based command/response
> interface between OCC and OPAL. In HOMER, there is an OPAL command
> buffer and an OCC response buffer which is used to send inband
> commands to OCC.
> 
> The OPAL-OCC command/response sequence is as follows:
> 
> 1. Check if both 'OCC Progress' bit in OCC response flag and 'Cmd Ready'
>    bit in OPAL command flag are set to zero. If yes then proceed with
>    below steps to send a command to OCC.
> 2. Write the command value, request ID and command specific data
>    to the OPAL command buffer.
> 3. Clear the response flag and set the 'Cmd Ready' bit in OPAL command
>    flag to indicate command is ready.
> 4. OCC will poll the command flag every 4ms to check if 'Cmd Ready' bit
>    is set by OPAL. If the bit is set then OCC will set the 'OCC Progress'
>    bit.
> 5. OCC will process the command and write the response to the OCC response
>    buffer and set the 'Rsp Ready' bit in the response flag and sends an
>    interrupt.
> 8. OPAL will receive the interrupt and queue the response to the host.
> 

Hi Shilpasri,

Looks good! 

I have a question about retries, from what I can see this allows
write_occ_cmd() to memcpy() no matter what, which sounds good, however,
you've called it 'retry' does that mean theres an expectation that the
caller wants to retry the same command? As far as I can tell the caller
could 'retry' anything.

Other comments inline.

> Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
> ---
>  hw/occ.c           | 453 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
>  include/opal-api.h |  68 +++++++-
>  2 files changed, 516 insertions(+), 5 deletions(-)
> 
> diff --git a/hw/occ.c b/hw/occ.c
> index 4be4df4..aa01bba 100644
> --- a/hw/occ.c
> +++ b/hw/occ.c
> @@ -119,6 +119,87 @@ struct occ_pstate_table {
>  } __packed;
>  
>  /**
> + * OPAL-OCC Command Response Interface
> + *
> + * OPAL-OCC Command Buffer
> + *
> + * ---------------------------------------------------------------------
> + * | OPAL  |  Cmd    | OPAL |	       | Cmd Data | Cmd Data | OPAL    |
> + * | Cmd   | Request | OCC  | Reserved | Length   | Length   | Cmd     |
> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
> + * ---------------------------------------------------------------------
> + * |  ….OPAL Command Data up to max of Cmd Data Length 4090 bytes      |
> + * |								       |
> + * ---------------------------------------------------------------------
> + *
> + * OPAL Command Flag
> + *
> + * -----------------------------------------------------------------
> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
> + * -----------------------------------------------------------------
> + * |Cmd    |       |       |       |       |       |       |       |
> + * |Ready  |	   |	   |	   |	   |	   |	   |	   |
> + * -----------------------------------------------------------------
> + *
> + * struct opal_command_buffer -	Defines the layout of OPAL command buffer
> + * @flag:			Provides general status of the command
> + * @request_id:			Token to identify request
> + * @cmd:			Command sent
> + * @data_size:			Command data length
> + * @data:			Command specific data
> + * @spare:			Unused byte
> + */
> +struct opal_command_buffer {
> +	u8 flag;
> +	u8 request_id;
> +	u8 cmd;
> +	u8 spare;
> +	u16 data_size;
> +	u8 data[MAX_OPAL_CMD_DATA_LENGTH];
> +} __packed;
> +
> +/**
> + * OPAL-OCC Response Buffer
> + *
> + * ---------------------------------------------------------------------
> + * | OCC   |  Cmd    | OPAL | Response | Rsp Data | Rsp Data | OPAL    |
> + * | Rsp   | Request | OCC  |  Status  | Length   | Length   | Rsp     |
> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
> + * ---------------------------------------------------------------------
> + * |  ….OPAL Response Data up to max of Rsp Data Length 8698 bytes     |
> + * |								       |
> + * ---------------------------------------------------------------------
> + *
> + * OCC Response Flag
> + *
> + * -----------------------------------------------------------------
> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
> + * -----------------------------------------------------------------
> + * |       |       |       |       |       |       |OCC in  | Rsp  |
> + * |       |	   |	   |	   |	   |	   |progress|Ready |
> + * -----------------------------------------------------------------
> + *
> + * struct occ_response_buffer -	Defines the layout of OCC response buffer
> + * @flag:			Provides general status of the response
> + * @request_id:			Token to identify request
> + * @cmd:			Command requested
> + * @status:			Indicates success/failure status of
> + *				the command
> + * @data_size:			Response data length
> + * @data:			Response specific data
> + */
> +struct occ_response_buffer {
> +	u8 flag;
> +	u8 request_id;
> +	u8 cmd;
> +	u8 status;
> +	u16 data_size;
> +	u8 data[MAX_OCC_RSP_DATA_LENGTH];
> +} __packed;
> +
> +/**
>   * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
>   *
>   * struct occ_dynamic_data -	Contains runtime attributes
> @@ -135,6 +216,8 @@ struct occ_pstate_table {
>   * @max_pwr_cap:		Maximum allowed system power cap in Watts
>   * @cur_pwr_cap:		Current system power cap
>   * @spare/reserved:		Unused data
> + * @cmd:			Opal Command Buffer
> + * @rsp:			OCC Response Buffer
>   */
>  struct occ_dynamic_data {
>  	u8 occ_state;
> @@ -150,7 +233,9 @@ struct occ_dynamic_data {
>  	u16 min_pwr_cap;
>  	u16 max_pwr_cap;
>  	u16 cur_pwr_cap;
> -	u64 reserved;
> +	u8 pad[112];
> +	struct opal_command_buffer cmd;
> +	struct occ_response_buffer rsp;
>  } __packed;
>  
>  static bool occ_reset;
> @@ -842,6 +927,363 @@ done:
>  	unlock(&occ_lock);
>  }
>  
> +#define OCC_RSP_READY		0x01
> +#define OCC_IN_PROGRESS		0x02
> +#define OPAL_CMD_READY		0x80
> +
> +enum occ_state {
> +	OCC_STATE_NOT_RUNNING	= 0x00,
> +	OCC_STATE_STANDBY	= 0x01,
> +	OCC_STATE_OBSERVATION	= 0x02,
> +	OCC_STATE_ACTIVE	= 0x03,
> +	OCC_STATE_SAFE		= 0x04,
> +	OCC_STATE_CHAR		= 0x05, /* Characterization */

Not to be confused with the type... I can't think of a better name...

> +};
> +
> +enum occ_role {
> +	OCC_ROLE_SLAVE		= 0x0,
> +	OCC_ROLE_MASTER		= 0x1,
> +};
> +
> +enum occ_cmd_value {
> +	OCC_CMD_VALUE_AMESTER_PASS_THRU		= 0x41,
> +	OCC_CMD_VALUE_CLEAR_SENSOR_DATA		= 0xD0,
> +	OCC_CMD_VALUE_SET_POWER_CAP		= 0xD1,
> +	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO	= 0xD2,
> +};
> +
> +struct opal_occ_cmd_info {
> +	enum	occ_cmd_value value;
> +	int	timeout_ms;
> +	u16	state_mask;
> +	u8	role_mask;
> +};
> +
> +static struct opal_occ_cmd_info occ_cmds[] = {
> +	{	OCC_CMD_VALUE_AMESTER_PASS_THRU,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHAR),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +	{	OCC_CMD_VALUE_CLEAR_SENSOR_DATA,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHAR),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +	{	OCC_CMD_VALUE_SET_POWER_CAP,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHAR),
> +		PPC_BIT8(OCC_ROLE_MASTER)
> +	},
> +	{	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO,
> +		1000,
> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> +		PPC_BIT16(OCC_STATE_ACTIVE) |
> +		PPC_BIT16(OCC_STATE_CHAR),
> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> +	},
> +};
> +
> +static struct cmd_interface {
> +	struct lock queue_lock;
> +	struct timer timeout;
> +	struct opal_command_buffer *cmd;
> +	struct occ_response_buffer *rsp;
> +	struct opal_occ_cmd_rsp_msg *msg;
> +	u32 id;
> +	u8 occ_role;
> +	u8 *occ_state;
> +	bool cmd_in_progress;
> +} *chips;
> +
> +struct occ_rsp_msg {
> +	struct list_node link;
> +	u8 token;
> +	int rc;
> +};
> +
> +static LIST_HEAD(rsp_msg_list);
> +static struct lock rsp_msg_lock = LOCK_UNLOCKED;
> +static bool rsp_msg_in_use;
> +
> +static inline struct cmd_interface *get_chip_cmd_interface(int chip_id)
> +{
> +	struct proc_chip *chip;
> +	int i = 0;
> +
> +	for_each_chip(chip) {
> +		if (chips[i].id == chip_id)
> +			return &chips[i];
> +		i++;
> +	}
> +
> +	return NULL;
> +}

You seem to have abused the for_each_chip() macro to loop on your chips
structure. It should work, in the init your chips->id is equal to each
chip->id.
Would it not make more sense to store the size of your chips array so
that you can 
for (i = 0; i < nr_chips; i++) { 
instead of
for_each_chip(chip) {




Cyril


> +
> +static inline bool occ_in_progress(struct cmd_interface *chip)
> +{
> +	return (chip->rsp->flag == OCC_IN_PROGRESS);
> +}
> +
> +static int write_occ_cmd(struct cmd_interface *chip, bool retry)
> +{
> +	struct opal_command_buffer *cmd = chip->cmd;
> +	struct opal_occ_cmd_rsp_msg *msg = chip->msg;
> +
> +	if (!retry && occ_in_progress(chip)) {
> +		chip->cmd_in_progress = false;
> +		return OPAL_OCC_BUSY;
> +	}
> +
> +	cmd->flag = chip->rsp->flag = 0;
> +	cmd->cmd = occ_cmds[msg->cmd].value;
> +	cmd->request_id = msg->request_id;
> +	cmd->data_size = msg->cdata_size;
> +	memcpy(&cmd->data, msg->cdata, msg->cdata_size);
> +	cmd->flag = OPAL_CMD_READY;
> +
> +	schedule_timer(&chip->timeout,
> +		       msecs_to_tb(occ_cmds[msg->cmd].timeout_ms));
> +
> +	return OPAL_ASYNC_COMPLETION;
> +}
> +
> +static int64_t opal_occ_command(int chip_id, struct opal_occ_cmd_rsp_msg *msg,
> +				bool retry)
> +{
> +	struct cmd_interface *chip;
> +	int rc;
> +
> +	if (!msg || !opal_addr_valid(msg->cdata) ||
> +	    !opal_addr_valid(msg->rdata))
> +		return OPAL_PARAMETER;
> +
> +	if (msg->cmd >= OCC_CMD_LAST)
> +		return OPAL_UNSUPPORTED;
> +
> +	if (msg->cdata_size > MAX_OPAL_CMD_DATA_LENGTH)
> +		return OPAL_PARAMETER;
> +
> +	chip = get_chip_cmd_interface(chip_id);
> +	if (!chip)
> +		return OPAL_PARAMETER;
> +
> +	if (!(PPC_BIT8(chip->occ_role) & occ_cmds[msg->cmd].role_mask))
> +		return OPAL_PARAMETER;
> +
> +	if (!(PPC_BIT16(*chip->occ_state) & occ_cmds[msg->cmd].state_mask))
> +		return OPAL_OCC_INVALID_STATE;
> +
> +	lock(&chip->queue_lock);
> +	if (chip->cmd_in_progress) {
> +		rc = OPAL_OCC_BUSY;
> +		goto out;
> +	}
> +
> +	chip->msg = msg;
> +	chip->cmd_in_progress = true;
> +	rc = write_occ_cmd(chip, retry);
> +out:
> +	unlock(&chip->queue_lock);
> +	return rc;
> +}
> +
> +static inline bool sanity_check_opal_cmd(struct opal_command_buffer *cmd,
> +					 struct opal_occ_cmd_rsp_msg *msg)
> +{
> +	return ((cmd->cmd == occ_cmds[msg->cmd].value) &&
> +		(cmd->request_id == msg->request_id) &&
> +		(cmd->data_size == msg->cdata_size));
> +}
> +
> +static inline bool check_occ_rsp(struct opal_command_buffer *cmd,
> +				 struct occ_response_buffer *rsp)
> +{
> +	if (cmd->cmd != rsp->cmd) {
> +		prlog(PR_WARNING, "OCC: Command value mismatch in OCC response"
> +		      "rsp->cmd = %d cmd->cmd = %d\n", rsp->cmd, cmd->cmd);
> +		return false;
> +	}
> +
> +	if (cmd->request_id != rsp->request_id) {
> +		prlog(PR_WARNING, "OCC: Request ID mismatch in OCC response"
> +		      "rsp->request_id = %d cmd->request_id = %d\n",
> +		      rsp->request_id, cmd->request_id);
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
> +static void occ_rsp_msg_consumed(void *data __unused);
> +
> +static inline int queue_occ_rsp_msg(int token, int rc)
> +{
> +	return opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, occ_rsp_msg_consumed,
> +			      token, rc);
> +}
> +
> +static void occ_rsp_msg_consumed(void *data __unused)
> +{
> +	struct occ_rsp_msg *item;
> +
> +	lock(&rsp_msg_lock);
> +
> +	/* Queue next message */
> +	item = list_pop(&rsp_msg_list, struct occ_rsp_msg, link);
> +	if (!item) {
> +		rsp_msg_in_use = false;
> +		goto exit;
> +	}
> +
> +	if (!queue_occ_rsp_msg(item->token, item->rc)) {
> +		rsp_msg_in_use = true;
> +		free(item);
> +	} else {
> +		rsp_msg_in_use = false;
> +		list_add(&rsp_msg_list, &item->link);
> +	}
> +exit:
> +	unlock(&rsp_msg_lock);
> +}
> +
> +static void send_occ_rsp_msg(u8 token, int rc)
> +{
> +	struct occ_rsp_msg *item;
> +
> +	lock(&rsp_msg_lock);
> +	if (!rsp_msg_in_use && !queue_occ_rsp_msg(token, rc)) {
> +		rsp_msg_in_use = true;
> +		goto out;
> +	}
> +
> +	item = malloc(sizeof(*item));
> +	if (!item)
> +		goto out;
> +
> +	item->token = token;
> +	item->rc = rc;
> +	item->link.next = item->link.prev = NULL;
> +	list_add_tail(&rsp_msg_list, &item->link);
> +out:
> +	unlock(&rsp_msg_lock);
> +}
> +
> +static void occ_cmd_timeout_handler(struct timer *t __unused, void *data,
> +				    uint64_t now __unused)
> +{
> +	struct cmd_interface *chip = data;
> +
> +	lock(&chip->queue_lock);
> +	if (!chip->cmd_in_progress)
> +		goto exit;
> +
> +	chip->cmd_in_progress = false;
> +	send_occ_rsp_msg(chip->msg->request_id, OPAL_OCC_CMD_TIMEOUT);
> +exit:
> +	unlock(&chip->queue_lock);
> +}
> +
> +static void read_occ_rsp(struct occ_response_buffer *rsp,
> +			 struct opal_occ_cmd_rsp_msg *msg)
> +{
> +	/* Copy response to host buffer */
> +	msg->status = rsp->status;
> +	msg->rdata_size = rsp->data_size;
> +	memcpy(msg->rdata, rsp->data, rsp->data_size);
> +
> +	/* Clear the OCC response flag */
> +	rsp->flag = 0;
> +}
> +
> +static void handle_occ_rsp(uint32_t chip_id)
> +{
> +	struct cmd_interface *chip;
> +	struct opal_command_buffer *cmd;
> +	struct occ_response_buffer *rsp;
> +	struct opal_occ_cmd_rsp_msg *msg;
> +
> +	chip = get_chip_cmd_interface(chip_id);
> +	if (!chip)
> +		return;
> +
> +	cmd = chip->cmd;
> +	rsp = chip->rsp;
> +	msg = chip->msg;
> +
> +	/*Read rsp*/
> +	if (rsp->flag != OCC_RSP_READY)
> +		return;
> +	lock(&chip->queue_lock);
> +	if (!chip->cmd_in_progress)
> +		goto exit;
> +
> +	chip->cmd_in_progress = false;
> +	cancel_timer(&chip->timeout);
> +	if (!sanity_check_opal_cmd(cmd, chip->msg) ||
> +	    !check_occ_rsp(cmd, rsp)) {
> +		send_occ_rsp_msg(msg->request_id, OPAL_OCC_CMD_MISMATCH);
> +		goto exit;
> +	}
> +
> +	read_occ_rsp(chip->rsp, msg);
> +	send_occ_rsp_msg(msg->request_id, OPAL_SUCCESS);
> +exit:
> +	unlock(&chip->queue_lock);
> +}
> +
> +static void occ_cmd_interface_init(void)
> +{
> +	struct dt_node *power_mgt;
> +	struct occ_dynamic_data *data;
> +	struct occ_pstate_table *pdata;
> +	struct proc_chip *chip;
> +	int i = 0, nr_chips = 0;
> +
> +	chip = next_chip(NULL);
> +	pdata = get_occ_pstate_table(chip);
> +	if (pdata->version != 0x90)
> +		return;
> +
> +	for_each_chip(chip)
> +		nr_chips++;
> +
> +	chips = malloc(sizeof(*chips) * nr_chips);
> +	assert(chips);
> +
> +	for_each_chip(chip) {
> +		pdata = get_occ_pstate_table(chip);
> +		data = get_occ_dynamic_data(chip);
> +		chips[i].id = chip->id;
> +		chips[i].occ_role = pdata->v9.occ_role;
> +		chips[i].occ_state = &data->occ_state;
> +		chips[i].cmd = &data->cmd;
> +		chips[i].rsp = &data->rsp;
> +		init_lock(&chips[i].queue_lock);
> +		chips[i].cmd_in_progress = false;
> +		init_timer(&chips[i].timeout, occ_cmd_timeout_handler,
> +			   &chips[i]);
> +		i++;
> +	}
> +
> +	power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
> +	if (!power_mgt) {
> +		prerror("OCC: dt node /ibm,opal/power-mgt not found\n");
> +		free(chips);
> +		return;
> +	}
> +
> +	dt_add_property_string(power_mgt, "compatible",
> +			       "ibm,opal-occ-cmd-rsp-interface");
> +	opal_register(OPAL_OCC_COMMAND, opal_occ_command, 3);
> +}
> +
>  /* CPU-OCC PState init */
>  /* Called after OCC init on P8 and P9 */
>  void occ_pstates_init(void)
> @@ -909,6 +1351,9 @@ void occ_pstates_init(void)
>  		chip->throttle = 0;
>  	opal_add_poller(occ_throttle_poll, NULL);
>  	occ_pstates_initialized = true;
> +
> +	/* Init OPAL-OCC command-response interface */
> +	occ_cmd_interface_init();
>  }
>  
>  struct occ_load_req {
> @@ -1394,8 +1839,10 @@ void occ_p9_interrupt(uint32_t chip_id)
>  	if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
>  		prd_tmgt_interrupt(chip_id);
>  
> -	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM)
> +	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM) {
>  		occ_throttle_poll(NULL);
> +		handle_occ_rsp(chip_id);
> +	}
>  
>  	/* We may have masked-out OCB_OCI_OCIMISC_IRQ in the previous
>  	 * OCCMISC_AND write. Check if there are any new source bits set,
> @@ -1417,5 +1864,3 @@ void occ_fsp_init(void)
>  	if (fsp_present())
>  		fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
>  }
> -
> -
> diff --git a/include/opal-api.h b/include/opal-api.h
> index 80033c6..4acbd44 100644
> --- a/include/opal-api.h
> +++ b/include/opal-api.h
> @@ -55,6 +55,10 @@
>  #define OPAL_XSCOM_CTR_OFFLINED	-30
>  #define OPAL_XIVE_PROVISIONING	-31
>  #define OPAL_XIVE_FREE_ACTIVE	-32
> +#define OPAL_OCC_INVALID_STATE	-33
> +#define OPAL_OCC_BUSY		-34
> +#define OPAL_OCC_CMD_TIMEOUT	-35
> +#define OPAL_OCC_CMD_MISMATCH -36
>  
>  /* API Tokens (in r0) */
>  #define OPAL_INVALID_CALL		       -1
> @@ -204,7 +208,8 @@
>  #define OPAL_NPU_INIT_CONTEXT			146
>  #define OPAL_NPU_DESTROY_CONTEXT		147
>  #define OPAL_NPU_MAP_LPAR			148
> -#define OPAL_LAST				148
> +#define OPAL_OCC_COMMAND			149
> +#define OPAL_LAST				149
>  
>  /* Device tree flags */
>  
> @@ -1021,6 +1026,67 @@ typedef struct oppanel_line {
>  	__be64 line_len;
>  } oppanel_line_t;
>  
> +enum occ_cmd {
> +	OCC_CMD_AMESTER_PASS_THRU = 0,
> +	OCC_CMD_CLEAR_SENSOR_DATA,
> +	OCC_CMD_SET_POWER_CAP,
> +	OCC_CMD_SET_POWER_SHIFTING_RATIO,
> +	OCC_CMD_LAST
> +};
> +
> +enum occ_cmd_data_length {
> +	OCC_CMD_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
> +	OCC_CMD_DL_CLEAR_SENSOR_DATA		= 4,
> +	OCC_CMD_DL_SET_POWER_CAP		= 2,
> +	OCC_CMD_DL_SET_POWER_SHIFTING_RATIO	= 1,
> +};
> +
> +enum occ_rsp_data_length {
> +	OCC_RSP_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
> +	OCC_RSP_DL_CLEAR_SENSOR_DATA		= 4,
> +	OCC_RSP_DL_SET_POWER_CAP		= 2,
> +	OCC_RSP_DL_SET_POWER_SHIFTING_RATIO	= 1,
> +};
> +
> +enum occ_sensor_limit_group {
> +	OCC_SENSOR_LIMIT_GROUP_CSM		= 0x10,
> +	OCC_SENSOR_LIMIT_GROUP_PROFILER		= 0x20,
> +	OCC_SENSOR_LIMIT_GROUP_JOB_SCHED	= 0x30,
> +};
> +
> +#define MAX_OPAL_CMD_DATA_LENGTH	4090
> +#define MAX_OCC_RSP_DATA_LENGTH		8698
> +
> +enum occ_response_status {
> +	OCC_SUCCESS			= 0x00,
> +	OCC_INVALID_COMMAND		= 0x11,
> +	OCC_INVALID_CMD_DATA_LENGTH	= 0x12,
> +	OCC_INVALID_DATA		= 0x13,
> +	OCC_INTERNAL_ERROR		= 0x15,
> +};
> +
> +struct opal_occ_cmd_rsp_msg {
> +	u8 *cdata;
> +	u8 *rdata;
> +	__be16 cdata_size;
> +	__be16 rdata_size;
> +	u8 cmd;
> +	u8 request_id;
> +	u8 status;
> +};
> +
> +struct opal_occ_cmd_data {
> +	u16 size;
> +	u8 cmd;
> +	u8 data[];
> +};
> +
> +struct opal_occ_rsp_data {
> +	u16 size;
> +	u8 status;
> +	u8 data[];
> +};
> +
>  enum opal_prd_msg_type {
>  	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
>  	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */
Oliver O'Halloran June 6, 2017, 6:31 a.m. UTC | #2
> diff --git a/include/opal-api.h b/include/opal-api.h
> index 80033c6..4acbd44 100644
> --- a/include/opal-api.h
> +++ b/include/opal-api.h
> @@ -55,6 +55,10 @@
>  #define OPAL_XSCOM_CTR_OFFLINED        -30
>  #define OPAL_XIVE_PROVISIONING -31
>  #define OPAL_XIVE_FREE_ACTIVE  -32
> +#define OPAL_OCC_INVALID_STATE -33
> +#define OPAL_OCC_BUSY          -34
> +#define OPAL_OCC_CMD_TIMEOUT   -35
> +#define OPAL_OCC_CMD_MISMATCH -36
>
>  /* API Tokens (in r0) */
>  #define OPAL_INVALID_CALL                     -1
> @@ -204,7 +208,8 @@
>  #define OPAL_NPU_INIT_CONTEXT                  146
>  #define OPAL_NPU_DESTROY_CONTEXT               147
>  #define OPAL_NPU_MAP_LPAR                      148
> -#define OPAL_LAST                              148
> +#define OPAL_OCC_COMMAND                       149
> +#define OPAL_LAST                              149
>
>  /* Device tree flags */
>
> @@ -1021,6 +1026,67 @@ typedef struct oppanel_line {
>         __be64 line_len;
>  } oppanel_line_t;
>
> +enum occ_cmd {
> +       OCC_CMD_AMESTER_PASS_THRU = 0,
> +       OCC_CMD_CLEAR_SENSOR_DATA,
> +       OCC_CMD_SET_POWER_CAP,
> +       OCC_CMD_SET_POWER_SHIFTING_RATIO,
> +       OCC_CMD_LAST
> +};
> +
> +enum occ_cmd_data_length {
> +       OCC_CMD_DL_AMESTER_PASS_THRU            = 0, /* Variable data length */
> +       OCC_CMD_DL_CLEAR_SENSOR_DATA            = 4,
> +       OCC_CMD_DL_SET_POWER_CAP                = 2,
> +       OCC_CMD_DL_SET_POWER_SHIFTING_RATIO     = 1,
> +};
> +
> +enum occ_rsp_data_length {
> +       OCC_RSP_DL_AMESTER_PASS_THRU            = 0, /* Variable data length */
> +       OCC_RSP_DL_CLEAR_SENSOR_DATA            = 4,
> +       OCC_RSP_DL_SET_POWER_CAP                = 2,
> +       OCC_RSP_DL_SET_POWER_SHIFTING_RATIO     = 1,
> +};
> +
> +enum occ_sensor_limit_group {
> +       OCC_SENSOR_LIMIT_GROUP_CSM              = 0x10,
> +       OCC_SENSOR_LIMIT_GROUP_PROFILER         = 0x20,
> +       OCC_SENSOR_LIMIT_GROUP_JOB_SCHED        = 0x30,
> +};
> +
> +#define MAX_OPAL_CMD_DATA_LENGTH       4090
> +#define MAX_OCC_RSP_DATA_LENGTH                8698
> +
> +enum occ_response_status {
> +       OCC_SUCCESS                     = 0x00,
> +       OCC_INVALID_COMMAND             = 0x11,
> +       OCC_INVALID_CMD_DATA_LENGTH     = 0x12,
> +       OCC_INVALID_DATA                = 0x13,
> +       OCC_INTERNAL_ERROR              = 0x15,
> +};
> +
> +struct opal_occ_cmd_rsp_msg {
> +       u8 *cdata;
> +       u8 *rdata;

I don't think this is safe to have pointers in OPAL API structures. As
far as I know there's no way to annotate a pointer as big endian so
they've always been re-encoded as a __be64 or passed as function
arguments since registers are endian independent. Re-encoding them as
an integer is probably a good idea since we should be using __pa() on
pointers passed from linux to skiboot anyway.

> +       __be16 cdata_size;
> +       __be16 rdata_size;
> +       u8 cmd;
> +       u8 request_id;
> +       u8 status;
> +};

> +
> +struct opal_occ_cmd_data {
> +       u16 size;
should this be __be16?

> +       u8 cmd;
> +       u8 data[];
> +};
> +
> +struct opal_occ_rsp_data {
> +       u16 size;
same here

> +       u8 status;
> +       u8 data[];
> +};
> +

Looks fine otherwise
Shilpasri G Bhat June 7, 2017, 3:49 a.m. UTC | #3
Hi,

On 06/06/2017 11:11 AM, Cyril Bur wrote:
> On Mon, 2017-06-05 at 10:10 +0530, Shilpasri G Bhat wrote:
>> This patch adds support for a shared memory based command/response
>> interface between OCC and OPAL. In HOMER, there is an OPAL command
>> buffer and an OCC response buffer which is used to send inband
>> commands to OCC.
>>
>> The OPAL-OCC command/response sequence is as follows:
>>
>> 1. Check if both 'OCC Progress' bit in OCC response flag and 'Cmd Ready'
>>    bit in OPAL command flag are set to zero. If yes then proceed with
>>    below steps to send a command to OCC.
>> 2. Write the command value, request ID and command specific data
>>    to the OPAL command buffer.
>> 3. Clear the response flag and set the 'Cmd Ready' bit in OPAL command
>>    flag to indicate command is ready.
>> 4. OCC will poll the command flag every 4ms to check if 'Cmd Ready' bit
>>    is set by OPAL. If the bit is set then OCC will set the 'OCC Progress'
>>    bit.
>> 5. OCC will process the command and write the response to the OCC response
>>    buffer and set the 'Rsp Ready' bit in the response flag and sends an
>>    interrupt.
>> 8. OPAL will receive the interrupt and queue the response to the host.
>>
> 
> Hi Shilpasri,
> 
> Looks good! 
> 
> I have a question about retries, from what I can see this allows
> write_occ_cmd() to memcpy() no matter what, which sounds good, however,
> you've called it 'retry' does that mean theres an expectation that the
> caller wants to retry the same command? As far as I can tell the caller
> could 'retry' anything.

The caller retries the same command one more time on command failures due to
timeout and command response mismatch. When retry=true OCC_PROGRESS_BIT check is
ignored while writing the same command.

Thanks for the review. I will take care of the rest of the comments.

Regards,
Shilpa

> 
> Other comments inline.
> 
>> Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
>> ---
>>  hw/occ.c           | 453 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
>>  include/opal-api.h |  68 +++++++-
>>  2 files changed, 516 insertions(+), 5 deletions(-)
>>
>> diff --git a/hw/occ.c b/hw/occ.c
>> index 4be4df4..aa01bba 100644
>> --- a/hw/occ.c
>> +++ b/hw/occ.c
>> @@ -119,6 +119,87 @@ struct occ_pstate_table {
>>  } __packed;
>>  
>>  /**
>> + * OPAL-OCC Command Response Interface
>> + *
>> + * OPAL-OCC Command Buffer
>> + *
>> + * ---------------------------------------------------------------------
>> + * | OPAL  |  Cmd    | OPAL |	       | Cmd Data | Cmd Data | OPAL    |
>> + * | Cmd   | Request | OCC  | Reserved | Length   | Length   | Cmd     |
>> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
>> + * ---------------------------------------------------------------------
>> + * |  ….OPAL Command Data up to max of Cmd Data Length 4090 bytes      |
>> + * |								       |
>> + * ---------------------------------------------------------------------
>> + *
>> + * OPAL Command Flag
>> + *
>> + * -----------------------------------------------------------------
>> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
>> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
>> + * -----------------------------------------------------------------
>> + * |Cmd    |       |       |       |       |       |       |       |
>> + * |Ready  |	   |	   |	   |	   |	   |	   |	   |
>> + * -----------------------------------------------------------------
>> + *
>> + * struct opal_command_buffer -	Defines the layout of OPAL command buffer
>> + * @flag:			Provides general status of the command
>> + * @request_id:			Token to identify request
>> + * @cmd:			Command sent
>> + * @data_size:			Command data length
>> + * @data:			Command specific data
>> + * @spare:			Unused byte
>> + */
>> +struct opal_command_buffer {
>> +	u8 flag;
>> +	u8 request_id;
>> +	u8 cmd;
>> +	u8 spare;
>> +	u16 data_size;
>> +	u8 data[MAX_OPAL_CMD_DATA_LENGTH];
>> +} __packed;
>> +
>> +/**
>> + * OPAL-OCC Response Buffer
>> + *
>> + * ---------------------------------------------------------------------
>> + * | OCC   |  Cmd    | OPAL | Response | Rsp Data | Rsp Data | OPAL    |
>> + * | Rsp   | Request | OCC  |  Status  | Length   | Length   | Rsp     |
>> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
>> + * ---------------------------------------------------------------------
>> + * |  ….OPAL Response Data up to max of Rsp Data Length 8698 bytes     |
>> + * |								       |
>> + * ---------------------------------------------------------------------
>> + *
>> + * OCC Response Flag
>> + *
>> + * -----------------------------------------------------------------
>> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
>> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
>> + * -----------------------------------------------------------------
>> + * |       |       |       |       |       |       |OCC in  | Rsp  |
>> + * |       |	   |	   |	   |	   |	   |progress|Ready |
>> + * -----------------------------------------------------------------
>> + *
>> + * struct occ_response_buffer -	Defines the layout of OCC response buffer
>> + * @flag:			Provides general status of the response
>> + * @request_id:			Token to identify request
>> + * @cmd:			Command requested
>> + * @status:			Indicates success/failure status of
>> + *				the command
>> + * @data_size:			Response data length
>> + * @data:			Response specific data
>> + */
>> +struct occ_response_buffer {
>> +	u8 flag;
>> +	u8 request_id;
>> +	u8 cmd;
>> +	u8 status;
>> +	u16 data_size;
>> +	u8 data[MAX_OCC_RSP_DATA_LENGTH];
>> +} __packed;
>> +
>> +/**
>>   * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
>>   *
>>   * struct occ_dynamic_data -	Contains runtime attributes
>> @@ -135,6 +216,8 @@ struct occ_pstate_table {
>>   * @max_pwr_cap:		Maximum allowed system power cap in Watts
>>   * @cur_pwr_cap:		Current system power cap
>>   * @spare/reserved:		Unused data
>> + * @cmd:			Opal Command Buffer
>> + * @rsp:			OCC Response Buffer
>>   */
>>  struct occ_dynamic_data {
>>  	u8 occ_state;
>> @@ -150,7 +233,9 @@ struct occ_dynamic_data {
>>  	u16 min_pwr_cap;
>>  	u16 max_pwr_cap;
>>  	u16 cur_pwr_cap;
>> -	u64 reserved;
>> +	u8 pad[112];
>> +	struct opal_command_buffer cmd;
>> +	struct occ_response_buffer rsp;
>>  } __packed;
>>  
>>  static bool occ_reset;
>> @@ -842,6 +927,363 @@ done:
>>  	unlock(&occ_lock);
>>  }
>>  
>> +#define OCC_RSP_READY		0x01
>> +#define OCC_IN_PROGRESS		0x02
>> +#define OPAL_CMD_READY		0x80
>> +
>> +enum occ_state {
>> +	OCC_STATE_NOT_RUNNING	= 0x00,
>> +	OCC_STATE_STANDBY	= 0x01,
>> +	OCC_STATE_OBSERVATION	= 0x02,
>> +	OCC_STATE_ACTIVE	= 0x03,
>> +	OCC_STATE_SAFE		= 0x04,
>> +	OCC_STATE_CHAR		= 0x05, /* Characterization */
> 
> Not to be confused with the type... I can't think of a better name...
> 
>> +};
>> +
>> +enum occ_role {
>> +	OCC_ROLE_SLAVE		= 0x0,
>> +	OCC_ROLE_MASTER		= 0x1,
>> +};
>> +
>> +enum occ_cmd_value {
>> +	OCC_CMD_VALUE_AMESTER_PASS_THRU		= 0x41,
>> +	OCC_CMD_VALUE_CLEAR_SENSOR_DATA		= 0xD0,
>> +	OCC_CMD_VALUE_SET_POWER_CAP		= 0xD1,
>> +	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO	= 0xD2,
>> +};
>> +
>> +struct opal_occ_cmd_info {
>> +	enum	occ_cmd_value value;
>> +	int	timeout_ms;
>> +	u16	state_mask;
>> +	u8	role_mask;
>> +};
>> +
>> +static struct opal_occ_cmd_info occ_cmds[] = {
>> +	{	OCC_CMD_VALUE_AMESTER_PASS_THRU,
>> +		1000,
>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>> +		PPC_BIT16(OCC_STATE_CHAR),
>> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
>> +	},
>> +	{	OCC_CMD_VALUE_CLEAR_SENSOR_DATA,
>> +		1000,
>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>> +		PPC_BIT16(OCC_STATE_CHAR),
>> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
>> +	},
>> +	{	OCC_CMD_VALUE_SET_POWER_CAP,
>> +		1000,
>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>> +		PPC_BIT16(OCC_STATE_CHAR),
>> +		PPC_BIT8(OCC_ROLE_MASTER)
>> +	},
>> +	{	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO,
>> +		1000,
>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>> +		PPC_BIT16(OCC_STATE_CHAR),
>> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
>> +	},
>> +};
>> +
>> +static struct cmd_interface {
>> +	struct lock queue_lock;
>> +	struct timer timeout;
>> +	struct opal_command_buffer *cmd;
>> +	struct occ_response_buffer *rsp;
>> +	struct opal_occ_cmd_rsp_msg *msg;
>> +	u32 id;
>> +	u8 occ_role;
>> +	u8 *occ_state;
>> +	bool cmd_in_progress;
>> +} *chips;
>> +
>> +struct occ_rsp_msg {
>> +	struct list_node link;
>> +	u8 token;
>> +	int rc;
>> +};
>> +
>> +static LIST_HEAD(rsp_msg_list);
>> +static struct lock rsp_msg_lock = LOCK_UNLOCKED;
>> +static bool rsp_msg_in_use;
>> +
>> +static inline struct cmd_interface *get_chip_cmd_interface(int chip_id)
>> +{
>> +	struct proc_chip *chip;
>> +	int i = 0;
>> +
>> +	for_each_chip(chip) {
>> +		if (chips[i].id == chip_id)
>> +			return &chips[i];
>> +		i++;
>> +	}
>> +
>> +	return NULL;
>> +}
> 
> You seem to have abused the for_each_chip() macro to loop on your chips
> structure. It should work, in the init your chips->id is equal to each
> chip->id.
> Would it not make more sense to store the size of your chips array so
> that you can 
> for (i = 0; i < nr_chips; i++) { 
> instead of
> for_each_chip(chip) {
> 
> 
> 
> 
> Cyril
> 
> 
>> +
>> +static inline bool occ_in_progress(struct cmd_interface *chip)
>> +{
>> +	return (chip->rsp->flag == OCC_IN_PROGRESS);
>> +}
>> +
>> +static int write_occ_cmd(struct cmd_interface *chip, bool retry)
>> +{
>> +	struct opal_command_buffer *cmd = chip->cmd;
>> +	struct opal_occ_cmd_rsp_msg *msg = chip->msg;
>> +
>> +	if (!retry && occ_in_progress(chip)) {
>> +		chip->cmd_in_progress = false;
>> +		return OPAL_OCC_BUSY;
>> +	}
>> +
>> +	cmd->flag = chip->rsp->flag = 0;
>> +	cmd->cmd = occ_cmds[msg->cmd].value;
>> +	cmd->request_id = msg->request_id;
>> +	cmd->data_size = msg->cdata_size;
>> +	memcpy(&cmd->data, msg->cdata, msg->cdata_size);
>> +	cmd->flag = OPAL_CMD_READY;
>> +
>> +	schedule_timer(&chip->timeout,
>> +		       msecs_to_tb(occ_cmds[msg->cmd].timeout_ms));
>> +
>> +	return OPAL_ASYNC_COMPLETION;
>> +}
>> +
>> +static int64_t opal_occ_command(int chip_id, struct opal_occ_cmd_rsp_msg *msg,
>> +				bool retry)
>> +{
>> +	struct cmd_interface *chip;
>> +	int rc;
>> +
>> +	if (!msg || !opal_addr_valid(msg->cdata) ||
>> +	    !opal_addr_valid(msg->rdata))
>> +		return OPAL_PARAMETER;
>> +
>> +	if (msg->cmd >= OCC_CMD_LAST)
>> +		return OPAL_UNSUPPORTED;
>> +
>> +	if (msg->cdata_size > MAX_OPAL_CMD_DATA_LENGTH)
>> +		return OPAL_PARAMETER;
>> +
>> +	chip = get_chip_cmd_interface(chip_id);
>> +	if (!chip)
>> +		return OPAL_PARAMETER;
>> +
>> +	if (!(PPC_BIT8(chip->occ_role) & occ_cmds[msg->cmd].role_mask))
>> +		return OPAL_PARAMETER;
>> +
>> +	if (!(PPC_BIT16(*chip->occ_state) & occ_cmds[msg->cmd].state_mask))
>> +		return OPAL_OCC_INVALID_STATE;
>> +
>> +	lock(&chip->queue_lock);
>> +	if (chip->cmd_in_progress) {
>> +		rc = OPAL_OCC_BUSY;
>> +		goto out;
>> +	}
>> +
>> +	chip->msg = msg;
>> +	chip->cmd_in_progress = true;
>> +	rc = write_occ_cmd(chip, retry);
>> +out:
>> +	unlock(&chip->queue_lock);
>> +	return rc;
>> +}
>> +
>> +static inline bool sanity_check_opal_cmd(struct opal_command_buffer *cmd,
>> +					 struct opal_occ_cmd_rsp_msg *msg)
>> +{
>> +	return ((cmd->cmd == occ_cmds[msg->cmd].value) &&
>> +		(cmd->request_id == msg->request_id) &&
>> +		(cmd->data_size == msg->cdata_size));
>> +}
>> +
>> +static inline bool check_occ_rsp(struct opal_command_buffer *cmd,
>> +				 struct occ_response_buffer *rsp)
>> +{
>> +	if (cmd->cmd != rsp->cmd) {
>> +		prlog(PR_WARNING, "OCC: Command value mismatch in OCC response"
>> +		      "rsp->cmd = %d cmd->cmd = %d\n", rsp->cmd, cmd->cmd);
>> +		return false;
>> +	}
>> +
>> +	if (cmd->request_id != rsp->request_id) {
>> +		prlog(PR_WARNING, "OCC: Request ID mismatch in OCC response"
>> +		      "rsp->request_id = %d cmd->request_id = %d\n",
>> +		      rsp->request_id, cmd->request_id);
>> +		return false;
>> +	}
>> +
>> +	return true;
>> +}
>> +
>> +static void occ_rsp_msg_consumed(void *data __unused);
>> +
>> +static inline int queue_occ_rsp_msg(int token, int rc)
>> +{
>> +	return opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, occ_rsp_msg_consumed,
>> +			      token, rc);
>> +}
>> +
>> +static void occ_rsp_msg_consumed(void *data __unused)
>> +{
>> +	struct occ_rsp_msg *item;
>> +
>> +	lock(&rsp_msg_lock);
>> +
>> +	/* Queue next message */
>> +	item = list_pop(&rsp_msg_list, struct occ_rsp_msg, link);
>> +	if (!item) {
>> +		rsp_msg_in_use = false;
>> +		goto exit;
>> +	}
>> +
>> +	if (!queue_occ_rsp_msg(item->token, item->rc)) {
>> +		rsp_msg_in_use = true;
>> +		free(item);
>> +	} else {
>> +		rsp_msg_in_use = false;
>> +		list_add(&rsp_msg_list, &item->link);
>> +	}
>> +exit:
>> +	unlock(&rsp_msg_lock);
>> +}
>> +
>> +static void send_occ_rsp_msg(u8 token, int rc)
>> +{
>> +	struct occ_rsp_msg *item;
>> +
>> +	lock(&rsp_msg_lock);
>> +	if (!rsp_msg_in_use && !queue_occ_rsp_msg(token, rc)) {
>> +		rsp_msg_in_use = true;
>> +		goto out;
>> +	}
>> +
>> +	item = malloc(sizeof(*item));
>> +	if (!item)
>> +		goto out;
>> +
>> +	item->token = token;
>> +	item->rc = rc;
>> +	item->link.next = item->link.prev = NULL;
>> +	list_add_tail(&rsp_msg_list, &item->link);
>> +out:
>> +	unlock(&rsp_msg_lock);
>> +}
>> +
>> +static void occ_cmd_timeout_handler(struct timer *t __unused, void *data,
>> +				    uint64_t now __unused)
>> +{
>> +	struct cmd_interface *chip = data;
>> +
>> +	lock(&chip->queue_lock);
>> +	if (!chip->cmd_in_progress)
>> +		goto exit;
>> +
>> +	chip->cmd_in_progress = false;
>> +	send_occ_rsp_msg(chip->msg->request_id, OPAL_OCC_CMD_TIMEOUT);
>> +exit:
>> +	unlock(&chip->queue_lock);
>> +}
>> +
>> +static void read_occ_rsp(struct occ_response_buffer *rsp,
>> +			 struct opal_occ_cmd_rsp_msg *msg)
>> +{
>> +	/* Copy response to host buffer */
>> +	msg->status = rsp->status;
>> +	msg->rdata_size = rsp->data_size;
>> +	memcpy(msg->rdata, rsp->data, rsp->data_size);
>> +
>> +	/* Clear the OCC response flag */
>> +	rsp->flag = 0;
>> +}
>> +
>> +static void handle_occ_rsp(uint32_t chip_id)
>> +{
>> +	struct cmd_interface *chip;
>> +	struct opal_command_buffer *cmd;
>> +	struct occ_response_buffer *rsp;
>> +	struct opal_occ_cmd_rsp_msg *msg;
>> +
>> +	chip = get_chip_cmd_interface(chip_id);
>> +	if (!chip)
>> +		return;
>> +
>> +	cmd = chip->cmd;
>> +	rsp = chip->rsp;
>> +	msg = chip->msg;
>> +
>> +	/*Read rsp*/
>> +	if (rsp->flag != OCC_RSP_READY)
>> +		return;
>> +	lock(&chip->queue_lock);
>> +	if (!chip->cmd_in_progress)
>> +		goto exit;
>> +
>> +	chip->cmd_in_progress = false;
>> +	cancel_timer(&chip->timeout);
>> +	if (!sanity_check_opal_cmd(cmd, chip->msg) ||
>> +	    !check_occ_rsp(cmd, rsp)) {
>> +		send_occ_rsp_msg(msg->request_id, OPAL_OCC_CMD_MISMATCH);
>> +		goto exit;
>> +	}
>> +
>> +	read_occ_rsp(chip->rsp, msg);
>> +	send_occ_rsp_msg(msg->request_id, OPAL_SUCCESS);
>> +exit:
>> +	unlock(&chip->queue_lock);
>> +}
>> +
>> +static void occ_cmd_interface_init(void)
>> +{
>> +	struct dt_node *power_mgt;
>> +	struct occ_dynamic_data *data;
>> +	struct occ_pstate_table *pdata;
>> +	struct proc_chip *chip;
>> +	int i = 0, nr_chips = 0;
>> +
>> +	chip = next_chip(NULL);
>> +	pdata = get_occ_pstate_table(chip);
>> +	if (pdata->version != 0x90)
>> +		return;
>> +
>> +	for_each_chip(chip)
>> +		nr_chips++;
>> +
>> +	chips = malloc(sizeof(*chips) * nr_chips);
>> +	assert(chips);
>> +
>> +	for_each_chip(chip) {
>> +		pdata = get_occ_pstate_table(chip);
>> +		data = get_occ_dynamic_data(chip);
>> +		chips[i].id = chip->id;
>> +		chips[i].occ_role = pdata->v9.occ_role;
>> +		chips[i].occ_state = &data->occ_state;
>> +		chips[i].cmd = &data->cmd;
>> +		chips[i].rsp = &data->rsp;
>> +		init_lock(&chips[i].queue_lock);
>> +		chips[i].cmd_in_progress = false;
>> +		init_timer(&chips[i].timeout, occ_cmd_timeout_handler,
>> +			   &chips[i]);
>> +		i++;
>> +	}
>> +
>> +	power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
>> +	if (!power_mgt) {
>> +		prerror("OCC: dt node /ibm,opal/power-mgt not found\n");
>> +		free(chips);
>> +		return;
>> +	}
>> +
>> +	dt_add_property_string(power_mgt, "compatible",
>> +			       "ibm,opal-occ-cmd-rsp-interface");
>> +	opal_register(OPAL_OCC_COMMAND, opal_occ_command, 3);
>> +}
>> +
>>  /* CPU-OCC PState init */
>>  /* Called after OCC init on P8 and P9 */
>>  void occ_pstates_init(void)
>> @@ -909,6 +1351,9 @@ void occ_pstates_init(void)
>>  		chip->throttle = 0;
>>  	opal_add_poller(occ_throttle_poll, NULL);
>>  	occ_pstates_initialized = true;
>> +
>> +	/* Init OPAL-OCC command-response interface */
>> +	occ_cmd_interface_init();
>>  }
>>  
>>  struct occ_load_req {
>> @@ -1394,8 +1839,10 @@ void occ_p9_interrupt(uint32_t chip_id)
>>  	if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
>>  		prd_tmgt_interrupt(chip_id);
>>  
>> -	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM)
>> +	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM) {
>>  		occ_throttle_poll(NULL);
>> +		handle_occ_rsp(chip_id);
>> +	}
>>  
>>  	/* We may have masked-out OCB_OCI_OCIMISC_IRQ in the previous
>>  	 * OCCMISC_AND write. Check if there are any new source bits set,
>> @@ -1417,5 +1864,3 @@ void occ_fsp_init(void)
>>  	if (fsp_present())
>>  		fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
>>  }
>> -
>> -
>> diff --git a/include/opal-api.h b/include/opal-api.h
>> index 80033c6..4acbd44 100644
>> --- a/include/opal-api.h
>> +++ b/include/opal-api.h
>> @@ -55,6 +55,10 @@
>>  #define OPAL_XSCOM_CTR_OFFLINED	-30
>>  #define OPAL_XIVE_PROVISIONING	-31
>>  #define OPAL_XIVE_FREE_ACTIVE	-32
>> +#define OPAL_OCC_INVALID_STATE	-33
>> +#define OPAL_OCC_BUSY		-34
>> +#define OPAL_OCC_CMD_TIMEOUT	-35
>> +#define OPAL_OCC_CMD_MISMATCH -36
>>  
>>  /* API Tokens (in r0) */
>>  #define OPAL_INVALID_CALL		       -1
>> @@ -204,7 +208,8 @@
>>  #define OPAL_NPU_INIT_CONTEXT			146
>>  #define OPAL_NPU_DESTROY_CONTEXT		147
>>  #define OPAL_NPU_MAP_LPAR			148
>> -#define OPAL_LAST				148
>> +#define OPAL_OCC_COMMAND			149
>> +#define OPAL_LAST				149
>>  
>>  /* Device tree flags */
>>  
>> @@ -1021,6 +1026,67 @@ typedef struct oppanel_line {
>>  	__be64 line_len;
>>  } oppanel_line_t;
>>  
>> +enum occ_cmd {
>> +	OCC_CMD_AMESTER_PASS_THRU = 0,
>> +	OCC_CMD_CLEAR_SENSOR_DATA,
>> +	OCC_CMD_SET_POWER_CAP,
>> +	OCC_CMD_SET_POWER_SHIFTING_RATIO,
>> +	OCC_CMD_LAST
>> +};
>> +
>> +enum occ_cmd_data_length {
>> +	OCC_CMD_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
>> +	OCC_CMD_DL_CLEAR_SENSOR_DATA		= 4,
>> +	OCC_CMD_DL_SET_POWER_CAP		= 2,
>> +	OCC_CMD_DL_SET_POWER_SHIFTING_RATIO	= 1,
>> +};
>> +
>> +enum occ_rsp_data_length {
>> +	OCC_RSP_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
>> +	OCC_RSP_DL_CLEAR_SENSOR_DATA		= 4,
>> +	OCC_RSP_DL_SET_POWER_CAP		= 2,
>> +	OCC_RSP_DL_SET_POWER_SHIFTING_RATIO	= 1,
>> +};
>> +
>> +enum occ_sensor_limit_group {
>> +	OCC_SENSOR_LIMIT_GROUP_CSM		= 0x10,
>> +	OCC_SENSOR_LIMIT_GROUP_PROFILER		= 0x20,
>> +	OCC_SENSOR_LIMIT_GROUP_JOB_SCHED	= 0x30,
>> +};
>> +
>> +#define MAX_OPAL_CMD_DATA_LENGTH	4090
>> +#define MAX_OCC_RSP_DATA_LENGTH		8698
>> +
>> +enum occ_response_status {
>> +	OCC_SUCCESS			= 0x00,
>> +	OCC_INVALID_COMMAND		= 0x11,
>> +	OCC_INVALID_CMD_DATA_LENGTH	= 0x12,
>> +	OCC_INVALID_DATA		= 0x13,
>> +	OCC_INTERNAL_ERROR		= 0x15,
>> +};
>> +
>> +struct opal_occ_cmd_rsp_msg {
>> +	u8 *cdata;
>> +	u8 *rdata;
>> +	__be16 cdata_size;
>> +	__be16 rdata_size;
>> +	u8 cmd;
>> +	u8 request_id;
>> +	u8 status;
>> +};
>> +
>> +struct opal_occ_cmd_data {
>> +	u16 size;
>> +	u8 cmd;
>> +	u8 data[];
>> +};
>> +
>> +struct opal_occ_rsp_data {
>> +	u16 size;
>> +	u8 status;
>> +	u8 data[];
>> +};
>> +
>>  enum opal_prd_msg_type {
>>  	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
>>  	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */
>
Cyril Bur June 7, 2017, 6:39 a.m. UTC | #4
On Wed, 2017-06-07 at 09:19 +0530, Shilpasri G Bhat wrote:
> Hi,
> 
> On 06/06/2017 11:11 AM, Cyril Bur wrote:
> > On Mon, 2017-06-05 at 10:10 +0530, Shilpasri G Bhat wrote:
> > > This patch adds support for a shared memory based command/response
> > > interface between OCC and OPAL. In HOMER, there is an OPAL command
> > > buffer and an OCC response buffer which is used to send inband
> > > commands to OCC.
> > > 
> > > The OPAL-OCC command/response sequence is as follows:
> > > 
> > > 1. Check if both 'OCC Progress' bit in OCC response flag and 'Cmd Ready'
> > >    bit in OPAL command flag are set to zero. If yes then proceed with
> > >    below steps to send a command to OCC.
> > > 2. Write the command value, request ID and command specific data
> > >    to the OPAL command buffer.
> > > 3. Clear the response flag and set the 'Cmd Ready' bit in OPAL command
> > >    flag to indicate command is ready.
> > > 4. OCC will poll the command flag every 4ms to check if 'Cmd Ready' bit
> > >    is set by OPAL. If the bit is set then OCC will set the 'OCC Progress'
> > >    bit.
> > > 5. OCC will process the command and write the response to the OCC response
> > >    buffer and set the 'Rsp Ready' bit in the response flag and sends an
> > >    interrupt.
> > > 8. OPAL will receive the interrupt and queue the response to the host.
> > > 
> > 
> > Hi Shilpasri,
> > 
> > Looks good! 
> > 
> > I have a question about retries, from what I can see this allows
> > write_occ_cmd() to memcpy() no matter what, which sounds good, however,
> > you've called it 'retry' does that mean theres an expectation that the
> > caller wants to retry the same command? As far as I can tell the caller
> > could 'retry' anything.
> 
> The caller retries the same command one more time on command failures due to
> timeout and command response mismatch. When retry=true OCC_PROGRESS_BIT check is
> ignored while writing the same command.
> 

Ah right, yes, sorry I neglected to realise that in this case Linux is
the caller. My badly formulated point was that in skiboot you rely on
the caller to retry the same command. I suppose my question is: Do we
trust Linux or should there be some check to enforce that a retry is
only for the same command.

Cyril

> Thanks for the review. I will take care of the rest of the comments.
> 
> Regards,
> Shilpa
> 
> > 
> > Other comments inline.
> > 
> > > Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
> > > ---
> > >  hw/occ.c           | 453 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
> > >  include/opal-api.h |  68 +++++++-
> > >  2 files changed, 516 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/hw/occ.c b/hw/occ.c
> > > index 4be4df4..aa01bba 100644
> > > --- a/hw/occ.c
> > > +++ b/hw/occ.c
> > > @@ -119,6 +119,87 @@ struct occ_pstate_table {
> > >  } __packed;
> > >  
> > >  /**
> > > + * OPAL-OCC Command Response Interface
> > > + *
> > > + * OPAL-OCC Command Buffer
> > > + *
> > > + * ---------------------------------------------------------------------
> > > + * | OPAL  |  Cmd    | OPAL |	       | Cmd Data | Cmd Data | OPAL    |
> > > + * | Cmd   | Request | OCC  | Reserved | Length   | Length   | Cmd     |
> > > + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
> > > + * ---------------------------------------------------------------------
> > > + * |  ….OPAL Command Data up to max of Cmd Data Length 4090 bytes      |
> > > + * |								       |
> > > + * ---------------------------------------------------------------------
> > > + *
> > > + * OPAL Command Flag
> > > + *
> > > + * -----------------------------------------------------------------
> > > + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
> > > + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
> > > + * -----------------------------------------------------------------
> > > + * |Cmd    |       |       |       |       |       |       |       |
> > > + * |Ready  |	   |	   |	   |	   |	   |	   |	   |
> > > + * -----------------------------------------------------------------
> > > + *
> > > + * struct opal_command_buffer -	Defines the layout of OPAL command buffer
> > > + * @flag:			Provides general status of the command
> > > + * @request_id:			Token to identify request
> > > + * @cmd:			Command sent
> > > + * @data_size:			Command data length
> > > + * @data:			Command specific data
> > > + * @spare:			Unused byte
> > > + */
> > > +struct opal_command_buffer {
> > > +	u8 flag;
> > > +	u8 request_id;
> > > +	u8 cmd;
> > > +	u8 spare;
> > > +	u16 data_size;
> > > +	u8 data[MAX_OPAL_CMD_DATA_LENGTH];
> > > +} __packed;
> > > +
> > > +/**
> > > + * OPAL-OCC Response Buffer
> > > + *
> > > + * ---------------------------------------------------------------------
> > > + * | OCC   |  Cmd    | OPAL | Response | Rsp Data | Rsp Data | OPAL    |
> > > + * | Rsp   | Request | OCC  |  Status  | Length   | Length   | Rsp     |
> > > + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
> > > + * ---------------------------------------------------------------------
> > > + * |  ….OPAL Response Data up to max of Rsp Data Length 8698 bytes     |
> > > + * |								       |
> > > + * ---------------------------------------------------------------------
> > > + *
> > > + * OCC Response Flag
> > > + *
> > > + * -----------------------------------------------------------------
> > > + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
> > > + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
> > > + * -----------------------------------------------------------------
> > > + * |       |       |       |       |       |       |OCC in  | Rsp  |
> > > + * |       |	   |	   |	   |	   |	   |progress|Ready |
> > > + * -----------------------------------------------------------------
> > > + *
> > > + * struct occ_response_buffer -	Defines the layout of OCC response buffer
> > > + * @flag:			Provides general status of the response
> > > + * @request_id:			Token to identify request
> > > + * @cmd:			Command requested
> > > + * @status:			Indicates success/failure status of
> > > + *				the command
> > > + * @data_size:			Response data length
> > > + * @data:			Response specific data
> > > + */
> > > +struct occ_response_buffer {
> > > +	u8 flag;
> > > +	u8 request_id;
> > > +	u8 cmd;
> > > +	u8 status;
> > > +	u16 data_size;
> > > +	u8 data[MAX_OCC_RSP_DATA_LENGTH];
> > > +} __packed;
> > > +
> > > +/**
> > >   * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
> > >   *
> > >   * struct occ_dynamic_data -	Contains runtime attributes
> > > @@ -135,6 +216,8 @@ struct occ_pstate_table {
> > >   * @max_pwr_cap:		Maximum allowed system power cap in Watts
> > >   * @cur_pwr_cap:		Current system power cap
> > >   * @spare/reserved:		Unused data
> > > + * @cmd:			Opal Command Buffer
> > > + * @rsp:			OCC Response Buffer
> > >   */
> > >  struct occ_dynamic_data {
> > >  	u8 occ_state;
> > > @@ -150,7 +233,9 @@ struct occ_dynamic_data {
> > >  	u16 min_pwr_cap;
> > >  	u16 max_pwr_cap;
> > >  	u16 cur_pwr_cap;
> > > -	u64 reserved;
> > > +	u8 pad[112];
> > > +	struct opal_command_buffer cmd;
> > > +	struct occ_response_buffer rsp;
> > >  } __packed;
> > >  
> > >  static bool occ_reset;
> > > @@ -842,6 +927,363 @@ done:
> > >  	unlock(&occ_lock);
> > >  }
> > >  
> > > +#define OCC_RSP_READY		0x01
> > > +#define OCC_IN_PROGRESS		0x02
> > > +#define OPAL_CMD_READY		0x80
> > > +
> > > +enum occ_state {
> > > +	OCC_STATE_NOT_RUNNING	= 0x00,
> > > +	OCC_STATE_STANDBY	= 0x01,
> > > +	OCC_STATE_OBSERVATION	= 0x02,
> > > +	OCC_STATE_ACTIVE	= 0x03,
> > > +	OCC_STATE_SAFE		= 0x04,
> > > +	OCC_STATE_CHAR		= 0x05, /* Characterization */
> > 
> > Not to be confused with the type... I can't think of a better name...
> > 
> > > +};
> > > +
> > > +enum occ_role {
> > > +	OCC_ROLE_SLAVE		= 0x0,
> > > +	OCC_ROLE_MASTER		= 0x1,
> > > +};
> > > +
> > > +enum occ_cmd_value {
> > > +	OCC_CMD_VALUE_AMESTER_PASS_THRU		= 0x41,
> > > +	OCC_CMD_VALUE_CLEAR_SENSOR_DATA		= 0xD0,
> > > +	OCC_CMD_VALUE_SET_POWER_CAP		= 0xD1,
> > > +	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO	= 0xD2,
> > > +};
> > > +
> > > +struct opal_occ_cmd_info {
> > > +	enum	occ_cmd_value value;
> > > +	int	timeout_ms;
> > > +	u16	state_mask;
> > > +	u8	role_mask;
> > > +};
> > > +
> > > +static struct opal_occ_cmd_info occ_cmds[] = {
> > > +	{	OCC_CMD_VALUE_AMESTER_PASS_THRU,
> > > +		1000,
> > > +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> > > +		PPC_BIT16(OCC_STATE_ACTIVE) |
> > > +		PPC_BIT16(OCC_STATE_CHAR),
> > > +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> > > +	},
> > > +	{	OCC_CMD_VALUE_CLEAR_SENSOR_DATA,
> > > +		1000,
> > > +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> > > +		PPC_BIT16(OCC_STATE_ACTIVE) |
> > > +		PPC_BIT16(OCC_STATE_CHAR),
> > > +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> > > +	},
> > > +	{	OCC_CMD_VALUE_SET_POWER_CAP,
> > > +		1000,
> > > +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> > > +		PPC_BIT16(OCC_STATE_ACTIVE) |
> > > +		PPC_BIT16(OCC_STATE_CHAR),
> > > +		PPC_BIT8(OCC_ROLE_MASTER)
> > > +	},
> > > +	{	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO,
> > > +		1000,
> > > +		PPC_BIT16(OCC_STATE_OBSERVATION) |
> > > +		PPC_BIT16(OCC_STATE_ACTIVE) |
> > > +		PPC_BIT16(OCC_STATE_CHAR),
> > > +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
> > > +	},
> > > +};
> > > +
> > > +static struct cmd_interface {
> > > +	struct lock queue_lock;
> > > +	struct timer timeout;
> > > +	struct opal_command_buffer *cmd;
> > > +	struct occ_response_buffer *rsp;
> > > +	struct opal_occ_cmd_rsp_msg *msg;
> > > +	u32 id;
> > > +	u8 occ_role;
> > > +	u8 *occ_state;
> > > +	bool cmd_in_progress;
> > > +} *chips;
> > > +
> > > +struct occ_rsp_msg {
> > > +	struct list_node link;
> > > +	u8 token;
> > > +	int rc;
> > > +};
> > > +
> > > +static LIST_HEAD(rsp_msg_list);
> > > +static struct lock rsp_msg_lock = LOCK_UNLOCKED;
> > > +static bool rsp_msg_in_use;
> > > +
> > > +static inline struct cmd_interface *get_chip_cmd_interface(int chip_id)
> > > +{
> > > +	struct proc_chip *chip;
> > > +	int i = 0;
> > > +
> > > +	for_each_chip(chip) {
> > > +		if (chips[i].id == chip_id)
> > > +			return &chips[i];
> > > +		i++;
> > > +	}
> > > +
> > > +	return NULL;
> > > +}
> > 
> > You seem to have abused the for_each_chip() macro to loop on your chips
> > structure. It should work, in the init your chips->id is equal to each
> > chip->id.
> > Would it not make more sense to store the size of your chips array so
> > that you can 
> > for (i = 0; i < nr_chips; i++) { 
> > instead of
> > for_each_chip(chip) {
> > 
> > 
> > 
> > 
> > Cyril
> > 
> > 
> > > +
> > > +static inline bool occ_in_progress(struct cmd_interface *chip)
> > > +{
> > > +	return (chip->rsp->flag == OCC_IN_PROGRESS);
> > > +}
> > > +
> > > +static int write_occ_cmd(struct cmd_interface *chip, bool retry)
> > > +{
> > > +	struct opal_command_buffer *cmd = chip->cmd;
> > > +	struct opal_occ_cmd_rsp_msg *msg = chip->msg;
> > > +
> > > +	if (!retry && occ_in_progress(chip)) {
> > > +		chip->cmd_in_progress = false;
> > > +		return OPAL_OCC_BUSY;
> > > +	}
> > > +
> > > +	cmd->flag = chip->rsp->flag = 0;
> > > +	cmd->cmd = occ_cmds[msg->cmd].value;
> > > +	cmd->request_id = msg->request_id;
> > > +	cmd->data_size = msg->cdata_size;
> > > +	memcpy(&cmd->data, msg->cdata, msg->cdata_size);
> > > +	cmd->flag = OPAL_CMD_READY;
> > > +
> > > +	schedule_timer(&chip->timeout,
> > > +		       msecs_to_tb(occ_cmds[msg->cmd].timeout_ms));
> > > +
> > > +	return OPAL_ASYNC_COMPLETION;
> > > +}
> > > +
> > > +static int64_t opal_occ_command(int chip_id, struct opal_occ_cmd_rsp_msg *msg,
> > > +				bool retry)
> > > +{
> > > +	struct cmd_interface *chip;
> > > +	int rc;
> > > +
> > > +	if (!msg || !opal_addr_valid(msg->cdata) ||
> > > +	    !opal_addr_valid(msg->rdata))
> > > +		return OPAL_PARAMETER;
> > > +
> > > +	if (msg->cmd >= OCC_CMD_LAST)
> > > +		return OPAL_UNSUPPORTED;
> > > +
> > > +	if (msg->cdata_size > MAX_OPAL_CMD_DATA_LENGTH)
> > > +		return OPAL_PARAMETER;
> > > +
> > > +	chip = get_chip_cmd_interface(chip_id);
> > > +	if (!chip)
> > > +		return OPAL_PARAMETER;
> > > +
> > > +	if (!(PPC_BIT8(chip->occ_role) & occ_cmds[msg->cmd].role_mask))
> > > +		return OPAL_PARAMETER;
> > > +
> > > +	if (!(PPC_BIT16(*chip->occ_state) & occ_cmds[msg->cmd].state_mask))
> > > +		return OPAL_OCC_INVALID_STATE;
> > > +
> > > +	lock(&chip->queue_lock);
> > > +	if (chip->cmd_in_progress) {
> > > +		rc = OPAL_OCC_BUSY;
> > > +		goto out;
> > > +	}
> > > +
> > > +	chip->msg = msg;
> > > +	chip->cmd_in_progress = true;
> > > +	rc = write_occ_cmd(chip, retry);
> > > +out:
> > > +	unlock(&chip->queue_lock);
> > > +	return rc;
> > > +}
> > > +
> > > +static inline bool sanity_check_opal_cmd(struct opal_command_buffer *cmd,
> > > +					 struct opal_occ_cmd_rsp_msg *msg)
> > > +{
> > > +	return ((cmd->cmd == occ_cmds[msg->cmd].value) &&
> > > +		(cmd->request_id == msg->request_id) &&
> > > +		(cmd->data_size == msg->cdata_size));
> > > +}
> > > +
> > > +static inline bool check_occ_rsp(struct opal_command_buffer *cmd,
> > > +				 struct occ_response_buffer *rsp)
> > > +{
> > > +	if (cmd->cmd != rsp->cmd) {
> > > +		prlog(PR_WARNING, "OCC: Command value mismatch in OCC response"
> > > +		      "rsp->cmd = %d cmd->cmd = %d\n", rsp->cmd, cmd->cmd);
> > > +		return false;
> > > +	}
> > > +
> > > +	if (cmd->request_id != rsp->request_id) {
> > > +		prlog(PR_WARNING, "OCC: Request ID mismatch in OCC response"
> > > +		      "rsp->request_id = %d cmd->request_id = %d\n",
> > > +		      rsp->request_id, cmd->request_id);
> > > +		return false;
> > > +	}
> > > +
> > > +	return true;
> > > +}
> > > +
> > > +static void occ_rsp_msg_consumed(void *data __unused);
> > > +
> > > +static inline int queue_occ_rsp_msg(int token, int rc)
> > > +{
> > > +	return opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, occ_rsp_msg_consumed,
> > > +			      token, rc);
> > > +}
> > > +
> > > +static void occ_rsp_msg_consumed(void *data __unused)
> > > +{
> > > +	struct occ_rsp_msg *item;
> > > +
> > > +	lock(&rsp_msg_lock);
> > > +
> > > +	/* Queue next message */
> > > +	item = list_pop(&rsp_msg_list, struct occ_rsp_msg, link);
> > > +	if (!item) {
> > > +		rsp_msg_in_use = false;
> > > +		goto exit;
> > > +	}
> > > +
> > > +	if (!queue_occ_rsp_msg(item->token, item->rc)) {
> > > +		rsp_msg_in_use = true;
> > > +		free(item);
> > > +	} else {
> > > +		rsp_msg_in_use = false;
> > > +		list_add(&rsp_msg_list, &item->link);
> > > +	}
> > > +exit:
> > > +	unlock(&rsp_msg_lock);
> > > +}
> > > +
> > > +static void send_occ_rsp_msg(u8 token, int rc)
> > > +{
> > > +	struct occ_rsp_msg *item;
> > > +
> > > +	lock(&rsp_msg_lock);
> > > +	if (!rsp_msg_in_use && !queue_occ_rsp_msg(token, rc)) {
> > > +		rsp_msg_in_use = true;
> > > +		goto out;
> > > +	}
> > > +
> > > +	item = malloc(sizeof(*item));
> > > +	if (!item)
> > > +		goto out;
> > > +
> > > +	item->token = token;
> > > +	item->rc = rc;
> > > +	item->link.next = item->link.prev = NULL;
> > > +	list_add_tail(&rsp_msg_list, &item->link);
> > > +out:
> > > +	unlock(&rsp_msg_lock);
> > > +}
> > > +
> > > +static void occ_cmd_timeout_handler(struct timer *t __unused, void *data,
> > > +				    uint64_t now __unused)
> > > +{
> > > +	struct cmd_interface *chip = data;
> > > +
> > > +	lock(&chip->queue_lock);
> > > +	if (!chip->cmd_in_progress)
> > > +		goto exit;
> > > +
> > > +	chip->cmd_in_progress = false;
> > > +	send_occ_rsp_msg(chip->msg->request_id, OPAL_OCC_CMD_TIMEOUT);
> > > +exit:
> > > +	unlock(&chip->queue_lock);
> > > +}
> > > +
> > > +static void read_occ_rsp(struct occ_response_buffer *rsp,
> > > +			 struct opal_occ_cmd_rsp_msg *msg)
> > > +{
> > > +	/* Copy response to host buffer */
> > > +	msg->status = rsp->status;
> > > +	msg->rdata_size = rsp->data_size;
> > > +	memcpy(msg->rdata, rsp->data, rsp->data_size);
> > > +
> > > +	/* Clear the OCC response flag */
> > > +	rsp->flag = 0;
> > > +}
> > > +
> > > +static void handle_occ_rsp(uint32_t chip_id)
> > > +{
> > > +	struct cmd_interface *chip;
> > > +	struct opal_command_buffer *cmd;
> > > +	struct occ_response_buffer *rsp;
> > > +	struct opal_occ_cmd_rsp_msg *msg;
> > > +
> > > +	chip = get_chip_cmd_interface(chip_id);
> > > +	if (!chip)
> > > +		return;
> > > +
> > > +	cmd = chip->cmd;
> > > +	rsp = chip->rsp;
> > > +	msg = chip->msg;
> > > +
> > > +	/*Read rsp*/
> > > +	if (rsp->flag != OCC_RSP_READY)
> > > +		return;
> > > +	lock(&chip->queue_lock);
> > > +	if (!chip->cmd_in_progress)
> > > +		goto exit;
> > > +
> > > +	chip->cmd_in_progress = false;
> > > +	cancel_timer(&chip->timeout);
> > > +	if (!sanity_check_opal_cmd(cmd, chip->msg) ||
> > > +	    !check_occ_rsp(cmd, rsp)) {
> > > +		send_occ_rsp_msg(msg->request_id, OPAL_OCC_CMD_MISMATCH);
> > > +		goto exit;
> > > +	}
> > > +
> > > +	read_occ_rsp(chip->rsp, msg);
> > > +	send_occ_rsp_msg(msg->request_id, OPAL_SUCCESS);
> > > +exit:
> > > +	unlock(&chip->queue_lock);
> > > +}
> > > +
> > > +static void occ_cmd_interface_init(void)
> > > +{
> > > +	struct dt_node *power_mgt;
> > > +	struct occ_dynamic_data *data;
> > > +	struct occ_pstate_table *pdata;
> > > +	struct proc_chip *chip;
> > > +	int i = 0, nr_chips = 0;
> > > +
> > > +	chip = next_chip(NULL);
> > > +	pdata = get_occ_pstate_table(chip);
> > > +	if (pdata->version != 0x90)
> > > +		return;
> > > +
> > > +	for_each_chip(chip)
> > > +		nr_chips++;
> > > +
> > > +	chips = malloc(sizeof(*chips) * nr_chips);
> > > +	assert(chips);
> > > +
> > > +	for_each_chip(chip) {
> > > +		pdata = get_occ_pstate_table(chip);
> > > +		data = get_occ_dynamic_data(chip);
> > > +		chips[i].id = chip->id;
> > > +		chips[i].occ_role = pdata->v9.occ_role;
> > > +		chips[i].occ_state = &data->occ_state;
> > > +		chips[i].cmd = &data->cmd;
> > > +		chips[i].rsp = &data->rsp;
> > > +		init_lock(&chips[i].queue_lock);
> > > +		chips[i].cmd_in_progress = false;
> > > +		init_timer(&chips[i].timeout, occ_cmd_timeout_handler,
> > > +			   &chips[i]);
> > > +		i++;
> > > +	}
> > > +
> > > +	power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
> > > +	if (!power_mgt) {
> > > +		prerror("OCC: dt node /ibm,opal/power-mgt not found\n");
> > > +		free(chips);
> > > +		return;
> > > +	}
> > > +
> > > +	dt_add_property_string(power_mgt, "compatible",
> > > +			       "ibm,opal-occ-cmd-rsp-interface");
> > > +	opal_register(OPAL_OCC_COMMAND, opal_occ_command, 3);
> > > +}
> > > +
> > >  /* CPU-OCC PState init */
> > >  /* Called after OCC init on P8 and P9 */
> > >  void occ_pstates_init(void)
> > > @@ -909,6 +1351,9 @@ void occ_pstates_init(void)
> > >  		chip->throttle = 0;
> > >  	opal_add_poller(occ_throttle_poll, NULL);
> > >  	occ_pstates_initialized = true;
> > > +
> > > +	/* Init OPAL-OCC command-response interface */
> > > +	occ_cmd_interface_init();
> > >  }
> > >  
> > >  struct occ_load_req {
> > > @@ -1394,8 +1839,10 @@ void occ_p9_interrupt(uint32_t chip_id)
> > >  	if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
> > >  		prd_tmgt_interrupt(chip_id);
> > >  
> > > -	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM)
> > > +	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM) {
> > >  		occ_throttle_poll(NULL);
> > > +		handle_occ_rsp(chip_id);
> > > +	}
> > >  
> > >  	/* We may have masked-out OCB_OCI_OCIMISC_IRQ in the previous
> > >  	 * OCCMISC_AND write. Check if there are any new source bits set,
> > > @@ -1417,5 +1864,3 @@ void occ_fsp_init(void)
> > >  	if (fsp_present())
> > >  		fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
> > >  }
> > > -
> > > -
> > > diff --git a/include/opal-api.h b/include/opal-api.h
> > > index 80033c6..4acbd44 100644
> > > --- a/include/opal-api.h
> > > +++ b/include/opal-api.h
> > > @@ -55,6 +55,10 @@
> > >  #define OPAL_XSCOM_CTR_OFFLINED	-30
> > >  #define OPAL_XIVE_PROVISIONING	-31
> > >  #define OPAL_XIVE_FREE_ACTIVE	-32
> > > +#define OPAL_OCC_INVALID_STATE	-33
> > > +#define OPAL_OCC_BUSY		-34
> > > +#define OPAL_OCC_CMD_TIMEOUT	-35
> > > +#define OPAL_OCC_CMD_MISMATCH -36
> > >  
> > >  /* API Tokens (in r0) */
> > >  #define OPAL_INVALID_CALL		       -1
> > > @@ -204,7 +208,8 @@
> > >  #define OPAL_NPU_INIT_CONTEXT			146
> > >  #define OPAL_NPU_DESTROY_CONTEXT		147
> > >  #define OPAL_NPU_MAP_LPAR			148
> > > -#define OPAL_LAST				148
> > > +#define OPAL_OCC_COMMAND			149
> > > +#define OPAL_LAST				149
> > >  
> > >  /* Device tree flags */
> > >  
> > > @@ -1021,6 +1026,67 @@ typedef struct oppanel_line {
> > >  	__be64 line_len;
> > >  } oppanel_line_t;
> > >  
> > > +enum occ_cmd {
> > > +	OCC_CMD_AMESTER_PASS_THRU = 0,
> > > +	OCC_CMD_CLEAR_SENSOR_DATA,
> > > +	OCC_CMD_SET_POWER_CAP,
> > > +	OCC_CMD_SET_POWER_SHIFTING_RATIO,
> > > +	OCC_CMD_LAST
> > > +};
> > > +
> > > +enum occ_cmd_data_length {
> > > +	OCC_CMD_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
> > > +	OCC_CMD_DL_CLEAR_SENSOR_DATA		= 4,
> > > +	OCC_CMD_DL_SET_POWER_CAP		= 2,
> > > +	OCC_CMD_DL_SET_POWER_SHIFTING_RATIO	= 1,
> > > +};
> > > +
> > > +enum occ_rsp_data_length {
> > > +	OCC_RSP_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
> > > +	OCC_RSP_DL_CLEAR_SENSOR_DATA		= 4,
> > > +	OCC_RSP_DL_SET_POWER_CAP		= 2,
> > > +	OCC_RSP_DL_SET_POWER_SHIFTING_RATIO	= 1,
> > > +};
> > > +
> > > +enum occ_sensor_limit_group {
> > > +	OCC_SENSOR_LIMIT_GROUP_CSM		= 0x10,
> > > +	OCC_SENSOR_LIMIT_GROUP_PROFILER		= 0x20,
> > > +	OCC_SENSOR_LIMIT_GROUP_JOB_SCHED	= 0x30,
> > > +};
> > > +
> > > +#define MAX_OPAL_CMD_DATA_LENGTH	4090
> > > +#define MAX_OCC_RSP_DATA_LENGTH		8698
> > > +
> > > +enum occ_response_status {
> > > +	OCC_SUCCESS			= 0x00,
> > > +	OCC_INVALID_COMMAND		= 0x11,
> > > +	OCC_INVALID_CMD_DATA_LENGTH	= 0x12,
> > > +	OCC_INVALID_DATA		= 0x13,
> > > +	OCC_INTERNAL_ERROR		= 0x15,
> > > +};
> > > +
> > > +struct opal_occ_cmd_rsp_msg {
> > > +	u8 *cdata;
> > > +	u8 *rdata;
> > > +	__be16 cdata_size;
> > > +	__be16 rdata_size;
> > > +	u8 cmd;
> > > +	u8 request_id;
> > > +	u8 status;
> > > +};
> > > +
> > > +struct opal_occ_cmd_data {
> > > +	u16 size;
> > > +	u8 cmd;
> > > +	u8 data[];
> > > +};
> > > +
> > > +struct opal_occ_rsp_data {
> > > +	u16 size;
> > > +	u8 status;
> > > +	u8 data[];
> > > +};
> > > +
> > >  enum opal_prd_msg_type {
> > >  	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
> > >  	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */
> 
>
Shilpasri G Bhat June 7, 2017, 7:02 a.m. UTC | #5
On 06/07/2017 12:09 PM, Cyril Bur wrote:
> On Wed, 2017-06-07 at 09:19 +0530, Shilpasri G Bhat wrote:
>> Hi,
>>
>> On 06/06/2017 11:11 AM, Cyril Bur wrote:
>>> On Mon, 2017-06-05 at 10:10 +0530, Shilpasri G Bhat wrote:
>>>> This patch adds support for a shared memory based command/response
>>>> interface between OCC and OPAL. In HOMER, there is an OPAL command
>>>> buffer and an OCC response buffer which is used to send inband
>>>> commands to OCC.
>>>>
>>>> The OPAL-OCC command/response sequence is as follows:
>>>>
>>>> 1. Check if both 'OCC Progress' bit in OCC response flag and 'Cmd Ready'
>>>>    bit in OPAL command flag are set to zero. If yes then proceed with
>>>>    below steps to send a command to OCC.
>>>> 2. Write the command value, request ID and command specific data
>>>>    to the OPAL command buffer.
>>>> 3. Clear the response flag and set the 'Cmd Ready' bit in OPAL command
>>>>    flag to indicate command is ready.
>>>> 4. OCC will poll the command flag every 4ms to check if 'Cmd Ready' bit
>>>>    is set by OPAL. If the bit is set then OCC will set the 'OCC Progress'
>>>>    bit.
>>>> 5. OCC will process the command and write the response to the OCC response
>>>>    buffer and set the 'Rsp Ready' bit in the response flag and sends an
>>>>    interrupt.
>>>> 8. OPAL will receive the interrupt and queue the response to the host.
>>>>
>>>
>>> Hi Shilpasri,
>>>
>>> Looks good! 
>>>
>>> I have a question about retries, from what I can see this allows
>>> write_occ_cmd() to memcpy() no matter what, which sounds good, however,
>>> you've called it 'retry' does that mean theres an expectation that the
>>> caller wants to retry the same command? As far as I can tell the caller
>>> could 'retry' anything.
>>
>> The caller retries the same command one more time on command failures due to
>> timeout and command response mismatch. When retry=true OCC_PROGRESS_BIT check is
>> ignored while writing the same command.
>>
> 
> Ah right, yes, sorry I neglected to realise that in this case Linux is
> the caller. My badly formulated point was that in skiboot you rely on
> the caller to retry the same command. I suppose my question is: Do we
> trust Linux or should there be some check to enforce that a retry is
> only for the same command.
> 
> Cyril

Hmm.
Yup I agree. Checking for the command on retry will be a right thing to do.

Thanks and Regards,
Shilpa

> 
>> Thanks for the review. I will take care of the rest of the comments.
>>
>> Regards,
>> Shilpa
>>
>>>
>>> Other comments inline.
>>>
>>>> Signed-off-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com>
>>>> ---
>>>>  hw/occ.c           | 453 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
>>>>  include/opal-api.h |  68 +++++++-
>>>>  2 files changed, 516 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/hw/occ.c b/hw/occ.c
>>>> index 4be4df4..aa01bba 100644
>>>> --- a/hw/occ.c
>>>> +++ b/hw/occ.c
>>>> @@ -119,6 +119,87 @@ struct occ_pstate_table {
>>>>  } __packed;
>>>>  
>>>>  /**
>>>> + * OPAL-OCC Command Response Interface
>>>> + *
>>>> + * OPAL-OCC Command Buffer
>>>> + *
>>>> + * ---------------------------------------------------------------------
>>>> + * | OPAL  |  Cmd    | OPAL |	       | Cmd Data | Cmd Data | OPAL    |
>>>> + * | Cmd   | Request | OCC  | Reserved | Length   | Length   | Cmd     |
>>>> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
>>>> + * ---------------------------------------------------------------------
>>>> + * |  ….OPAL Command Data up to max of Cmd Data Length 4090 bytes      |
>>>> + * |								       |
>>>> + * ---------------------------------------------------------------------
>>>> + *
>>>> + * OPAL Command Flag
>>>> + *
>>>> + * -----------------------------------------------------------------
>>>> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
>>>> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
>>>> + * -----------------------------------------------------------------
>>>> + * |Cmd    |       |       |       |       |       |       |       |
>>>> + * |Ready  |	   |	   |	   |	   |	   |	   |	   |
>>>> + * -----------------------------------------------------------------
>>>> + *
>>>> + * struct opal_command_buffer -	Defines the layout of OPAL command buffer
>>>> + * @flag:			Provides general status of the command
>>>> + * @request_id:			Token to identify request
>>>> + * @cmd:			Command sent
>>>> + * @data_size:			Command data length
>>>> + * @data:			Command specific data
>>>> + * @spare:			Unused byte
>>>> + */
>>>> +struct opal_command_buffer {
>>>> +	u8 flag;
>>>> +	u8 request_id;
>>>> +	u8 cmd;
>>>> +	u8 spare;
>>>> +	u16 data_size;
>>>> +	u8 data[MAX_OPAL_CMD_DATA_LENGTH];
>>>> +} __packed;
>>>> +
>>>> +/**
>>>> + * OPAL-OCC Response Buffer
>>>> + *
>>>> + * ---------------------------------------------------------------------
>>>> + * | OCC   |  Cmd    | OPAL | Response | Rsp Data | Rsp Data | OPAL    |
>>>> + * | Rsp   | Request | OCC  |  Status  | Length   | Length   | Rsp     |
>>>> + * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
>>>> + * ---------------------------------------------------------------------
>>>> + * |  ….OPAL Response Data up to max of Rsp Data Length 8698 bytes     |
>>>> + * |								       |
>>>> + * ---------------------------------------------------------------------
>>>> + *
>>>> + * OCC Response Flag
>>>> + *
>>>> + * -----------------------------------------------------------------
>>>> + * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
>>>> + * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
>>>> + * -----------------------------------------------------------------
>>>> + * |       |       |       |       |       |       |OCC in  | Rsp  |
>>>> + * |       |	   |	   |	   |	   |	   |progress|Ready |
>>>> + * -----------------------------------------------------------------
>>>> + *
>>>> + * struct occ_response_buffer -	Defines the layout of OCC response buffer
>>>> + * @flag:			Provides general status of the response
>>>> + * @request_id:			Token to identify request
>>>> + * @cmd:			Command requested
>>>> + * @status:			Indicates success/failure status of
>>>> + *				the command
>>>> + * @data_size:			Response data length
>>>> + * @data:			Response specific data
>>>> + */
>>>> +struct occ_response_buffer {
>>>> +	u8 flag;
>>>> +	u8 request_id;
>>>> +	u8 cmd;
>>>> +	u8 status;
>>>> +	u16 data_size;
>>>> +	u8 data[MAX_OCC_RSP_DATA_LENGTH];
>>>> +} __packed;
>>>> +
>>>> +/**
>>>>   * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
>>>>   *
>>>>   * struct occ_dynamic_data -	Contains runtime attributes
>>>> @@ -135,6 +216,8 @@ struct occ_pstate_table {
>>>>   * @max_pwr_cap:		Maximum allowed system power cap in Watts
>>>>   * @cur_pwr_cap:		Current system power cap
>>>>   * @spare/reserved:		Unused data
>>>> + * @cmd:			Opal Command Buffer
>>>> + * @rsp:			OCC Response Buffer
>>>>   */
>>>>  struct occ_dynamic_data {
>>>>  	u8 occ_state;
>>>> @@ -150,7 +233,9 @@ struct occ_dynamic_data {
>>>>  	u16 min_pwr_cap;
>>>>  	u16 max_pwr_cap;
>>>>  	u16 cur_pwr_cap;
>>>> -	u64 reserved;
>>>> +	u8 pad[112];
>>>> +	struct opal_command_buffer cmd;
>>>> +	struct occ_response_buffer rsp;
>>>>  } __packed;
>>>>  
>>>>  static bool occ_reset;
>>>> @@ -842,6 +927,363 @@ done:
>>>>  	unlock(&occ_lock);
>>>>  }
>>>>  
>>>> +#define OCC_RSP_READY		0x01
>>>> +#define OCC_IN_PROGRESS		0x02
>>>> +#define OPAL_CMD_READY		0x80
>>>> +
>>>> +enum occ_state {
>>>> +	OCC_STATE_NOT_RUNNING	= 0x00,
>>>> +	OCC_STATE_STANDBY	= 0x01,
>>>> +	OCC_STATE_OBSERVATION	= 0x02,
>>>> +	OCC_STATE_ACTIVE	= 0x03,
>>>> +	OCC_STATE_SAFE		= 0x04,
>>>> +	OCC_STATE_CHAR		= 0x05, /* Characterization */
>>>
>>> Not to be confused with the type... I can't think of a better name...
>>>
>>>> +};
>>>> +
>>>> +enum occ_role {
>>>> +	OCC_ROLE_SLAVE		= 0x0,
>>>> +	OCC_ROLE_MASTER		= 0x1,
>>>> +};
>>>> +
>>>> +enum occ_cmd_value {
>>>> +	OCC_CMD_VALUE_AMESTER_PASS_THRU		= 0x41,
>>>> +	OCC_CMD_VALUE_CLEAR_SENSOR_DATA		= 0xD0,
>>>> +	OCC_CMD_VALUE_SET_POWER_CAP		= 0xD1,
>>>> +	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO	= 0xD2,
>>>> +};
>>>> +
>>>> +struct opal_occ_cmd_info {
>>>> +	enum	occ_cmd_value value;
>>>> +	int	timeout_ms;
>>>> +	u16	state_mask;
>>>> +	u8	role_mask;
>>>> +};
>>>> +
>>>> +static struct opal_occ_cmd_info occ_cmds[] = {
>>>> +	{	OCC_CMD_VALUE_AMESTER_PASS_THRU,
>>>> +		1000,
>>>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>>>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>>>> +		PPC_BIT16(OCC_STATE_CHAR),
>>>> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
>>>> +	},
>>>> +	{	OCC_CMD_VALUE_CLEAR_SENSOR_DATA,
>>>> +		1000,
>>>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>>>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>>>> +		PPC_BIT16(OCC_STATE_CHAR),
>>>> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
>>>> +	},
>>>> +	{	OCC_CMD_VALUE_SET_POWER_CAP,
>>>> +		1000,
>>>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>>>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>>>> +		PPC_BIT16(OCC_STATE_CHAR),
>>>> +		PPC_BIT8(OCC_ROLE_MASTER)
>>>> +	},
>>>> +	{	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO,
>>>> +		1000,
>>>> +		PPC_BIT16(OCC_STATE_OBSERVATION) |
>>>> +		PPC_BIT16(OCC_STATE_ACTIVE) |
>>>> +		PPC_BIT16(OCC_STATE_CHAR),
>>>> +		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
>>>> +	},
>>>> +};
>>>> +
>>>> +static struct cmd_interface {
>>>> +	struct lock queue_lock;
>>>> +	struct timer timeout;
>>>> +	struct opal_command_buffer *cmd;
>>>> +	struct occ_response_buffer *rsp;
>>>> +	struct opal_occ_cmd_rsp_msg *msg;
>>>> +	u32 id;
>>>> +	u8 occ_role;
>>>> +	u8 *occ_state;
>>>> +	bool cmd_in_progress;
>>>> +} *chips;
>>>> +
>>>> +struct occ_rsp_msg {
>>>> +	struct list_node link;
>>>> +	u8 token;
>>>> +	int rc;
>>>> +};
>>>> +
>>>> +static LIST_HEAD(rsp_msg_list);
>>>> +static struct lock rsp_msg_lock = LOCK_UNLOCKED;
>>>> +static bool rsp_msg_in_use;
>>>> +
>>>> +static inline struct cmd_interface *get_chip_cmd_interface(int chip_id)
>>>> +{
>>>> +	struct proc_chip *chip;
>>>> +	int i = 0;
>>>> +
>>>> +	for_each_chip(chip) {
>>>> +		if (chips[i].id == chip_id)
>>>> +			return &chips[i];
>>>> +		i++;
>>>> +	}
>>>> +
>>>> +	return NULL;
>>>> +}
>>>
>>> You seem to have abused the for_each_chip() macro to loop on your chips
>>> structure. It should work, in the init your chips->id is equal to each
>>> chip->id.
>>> Would it not make more sense to store the size of your chips array so
>>> that you can 
>>> for (i = 0; i < nr_chips; i++) { 
>>> instead of
>>> for_each_chip(chip) {
>>>
>>>
>>>
>>>
>>> Cyril
>>>
>>>
>>>> +
>>>> +static inline bool occ_in_progress(struct cmd_interface *chip)
>>>> +{
>>>> +	return (chip->rsp->flag == OCC_IN_PROGRESS);
>>>> +}
>>>> +
>>>> +static int write_occ_cmd(struct cmd_interface *chip, bool retry)
>>>> +{
>>>> +	struct opal_command_buffer *cmd = chip->cmd;
>>>> +	struct opal_occ_cmd_rsp_msg *msg = chip->msg;
>>>> +
>>>> +	if (!retry && occ_in_progress(chip)) {
>>>> +		chip->cmd_in_progress = false;
>>>> +		return OPAL_OCC_BUSY;
>>>> +	}
>>>> +
>>>> +	cmd->flag = chip->rsp->flag = 0;
>>>> +	cmd->cmd = occ_cmds[msg->cmd].value;
>>>> +	cmd->request_id = msg->request_id;
>>>> +	cmd->data_size = msg->cdata_size;
>>>> +	memcpy(&cmd->data, msg->cdata, msg->cdata_size);
>>>> +	cmd->flag = OPAL_CMD_READY;
>>>> +
>>>> +	schedule_timer(&chip->timeout,
>>>> +		       msecs_to_tb(occ_cmds[msg->cmd].timeout_ms));
>>>> +
>>>> +	return OPAL_ASYNC_COMPLETION;
>>>> +}
>>>> +
>>>> +static int64_t opal_occ_command(int chip_id, struct opal_occ_cmd_rsp_msg *msg,
>>>> +				bool retry)
>>>> +{
>>>> +	struct cmd_interface *chip;
>>>> +	int rc;
>>>> +
>>>> +	if (!msg || !opal_addr_valid(msg->cdata) ||
>>>> +	    !opal_addr_valid(msg->rdata))
>>>> +		return OPAL_PARAMETER;
>>>> +
>>>> +	if (msg->cmd >= OCC_CMD_LAST)
>>>> +		return OPAL_UNSUPPORTED;
>>>> +
>>>> +	if (msg->cdata_size > MAX_OPAL_CMD_DATA_LENGTH)
>>>> +		return OPAL_PARAMETER;
>>>> +
>>>> +	chip = get_chip_cmd_interface(chip_id);
>>>> +	if (!chip)
>>>> +		return OPAL_PARAMETER;
>>>> +
>>>> +	if (!(PPC_BIT8(chip->occ_role) & occ_cmds[msg->cmd].role_mask))
>>>> +		return OPAL_PARAMETER;
>>>> +
>>>> +	if (!(PPC_BIT16(*chip->occ_state) & occ_cmds[msg->cmd].state_mask))
>>>> +		return OPAL_OCC_INVALID_STATE;
>>>> +
>>>> +	lock(&chip->queue_lock);
>>>> +	if (chip->cmd_in_progress) {
>>>> +		rc = OPAL_OCC_BUSY;
>>>> +		goto out;
>>>> +	}
>>>> +
>>>> +	chip->msg = msg;
>>>> +	chip->cmd_in_progress = true;
>>>> +	rc = write_occ_cmd(chip, retry);
>>>> +out:
>>>> +	unlock(&chip->queue_lock);
>>>> +	return rc;
>>>> +}
>>>> +
>>>> +static inline bool sanity_check_opal_cmd(struct opal_command_buffer *cmd,
>>>> +					 struct opal_occ_cmd_rsp_msg *msg)
>>>> +{
>>>> +	return ((cmd->cmd == occ_cmds[msg->cmd].value) &&
>>>> +		(cmd->request_id == msg->request_id) &&
>>>> +		(cmd->data_size == msg->cdata_size));
>>>> +}
>>>> +
>>>> +static inline bool check_occ_rsp(struct opal_command_buffer *cmd,
>>>> +				 struct occ_response_buffer *rsp)
>>>> +{
>>>> +	if (cmd->cmd != rsp->cmd) {
>>>> +		prlog(PR_WARNING, "OCC: Command value mismatch in OCC response"
>>>> +		      "rsp->cmd = %d cmd->cmd = %d\n", rsp->cmd, cmd->cmd);
>>>> +		return false;
>>>> +	}
>>>> +
>>>> +	if (cmd->request_id != rsp->request_id) {
>>>> +		prlog(PR_WARNING, "OCC: Request ID mismatch in OCC response"
>>>> +		      "rsp->request_id = %d cmd->request_id = %d\n",
>>>> +		      rsp->request_id, cmd->request_id);
>>>> +		return false;
>>>> +	}
>>>> +
>>>> +	return true;
>>>> +}
>>>> +
>>>> +static void occ_rsp_msg_consumed(void *data __unused);
>>>> +
>>>> +static inline int queue_occ_rsp_msg(int token, int rc)
>>>> +{
>>>> +	return opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, occ_rsp_msg_consumed,
>>>> +			      token, rc);
>>>> +}
>>>> +
>>>> +static void occ_rsp_msg_consumed(void *data __unused)
>>>> +{
>>>> +	struct occ_rsp_msg *item;
>>>> +
>>>> +	lock(&rsp_msg_lock);
>>>> +
>>>> +	/* Queue next message */
>>>> +	item = list_pop(&rsp_msg_list, struct occ_rsp_msg, link);
>>>> +	if (!item) {
>>>> +		rsp_msg_in_use = false;
>>>> +		goto exit;
>>>> +	}
>>>> +
>>>> +	if (!queue_occ_rsp_msg(item->token, item->rc)) {
>>>> +		rsp_msg_in_use = true;
>>>> +		free(item);
>>>> +	} else {
>>>> +		rsp_msg_in_use = false;
>>>> +		list_add(&rsp_msg_list, &item->link);
>>>> +	}
>>>> +exit:
>>>> +	unlock(&rsp_msg_lock);
>>>> +}
>>>> +
>>>> +static void send_occ_rsp_msg(u8 token, int rc)
>>>> +{
>>>> +	struct occ_rsp_msg *item;
>>>> +
>>>> +	lock(&rsp_msg_lock);
>>>> +	if (!rsp_msg_in_use && !queue_occ_rsp_msg(token, rc)) {
>>>> +		rsp_msg_in_use = true;
>>>> +		goto out;
>>>> +	}
>>>> +
>>>> +	item = malloc(sizeof(*item));
>>>> +	if (!item)
>>>> +		goto out;
>>>> +
>>>> +	item->token = token;
>>>> +	item->rc = rc;
>>>> +	item->link.next = item->link.prev = NULL;
>>>> +	list_add_tail(&rsp_msg_list, &item->link);
>>>> +out:
>>>> +	unlock(&rsp_msg_lock);
>>>> +}
>>>> +
>>>> +static void occ_cmd_timeout_handler(struct timer *t __unused, void *data,
>>>> +				    uint64_t now __unused)
>>>> +{
>>>> +	struct cmd_interface *chip = data;
>>>> +
>>>> +	lock(&chip->queue_lock);
>>>> +	if (!chip->cmd_in_progress)
>>>> +		goto exit;
>>>> +
>>>> +	chip->cmd_in_progress = false;
>>>> +	send_occ_rsp_msg(chip->msg->request_id, OPAL_OCC_CMD_TIMEOUT);
>>>> +exit:
>>>> +	unlock(&chip->queue_lock);
>>>> +}
>>>> +
>>>> +static void read_occ_rsp(struct occ_response_buffer *rsp,
>>>> +			 struct opal_occ_cmd_rsp_msg *msg)
>>>> +{
>>>> +	/* Copy response to host buffer */
>>>> +	msg->status = rsp->status;
>>>> +	msg->rdata_size = rsp->data_size;
>>>> +	memcpy(msg->rdata, rsp->data, rsp->data_size);
>>>> +
>>>> +	/* Clear the OCC response flag */
>>>> +	rsp->flag = 0;
>>>> +}
>>>> +
>>>> +static void handle_occ_rsp(uint32_t chip_id)
>>>> +{
>>>> +	struct cmd_interface *chip;
>>>> +	struct opal_command_buffer *cmd;
>>>> +	struct occ_response_buffer *rsp;
>>>> +	struct opal_occ_cmd_rsp_msg *msg;
>>>> +
>>>> +	chip = get_chip_cmd_interface(chip_id);
>>>> +	if (!chip)
>>>> +		return;
>>>> +
>>>> +	cmd = chip->cmd;
>>>> +	rsp = chip->rsp;
>>>> +	msg = chip->msg;
>>>> +
>>>> +	/*Read rsp*/
>>>> +	if (rsp->flag != OCC_RSP_READY)
>>>> +		return;
>>>> +	lock(&chip->queue_lock);
>>>> +	if (!chip->cmd_in_progress)
>>>> +		goto exit;
>>>> +
>>>> +	chip->cmd_in_progress = false;
>>>> +	cancel_timer(&chip->timeout);
>>>> +	if (!sanity_check_opal_cmd(cmd, chip->msg) ||
>>>> +	    !check_occ_rsp(cmd, rsp)) {
>>>> +		send_occ_rsp_msg(msg->request_id, OPAL_OCC_CMD_MISMATCH);
>>>> +		goto exit;
>>>> +	}
>>>> +
>>>> +	read_occ_rsp(chip->rsp, msg);
>>>> +	send_occ_rsp_msg(msg->request_id, OPAL_SUCCESS);
>>>> +exit:
>>>> +	unlock(&chip->queue_lock);
>>>> +}
>>>> +
>>>> +static void occ_cmd_interface_init(void)
>>>> +{
>>>> +	struct dt_node *power_mgt;
>>>> +	struct occ_dynamic_data *data;
>>>> +	struct occ_pstate_table *pdata;
>>>> +	struct proc_chip *chip;
>>>> +	int i = 0, nr_chips = 0;
>>>> +
>>>> +	chip = next_chip(NULL);
>>>> +	pdata = get_occ_pstate_table(chip);
>>>> +	if (pdata->version != 0x90)
>>>> +		return;
>>>> +
>>>> +	for_each_chip(chip)
>>>> +		nr_chips++;
>>>> +
>>>> +	chips = malloc(sizeof(*chips) * nr_chips);
>>>> +	assert(chips);
>>>> +
>>>> +	for_each_chip(chip) {
>>>> +		pdata = get_occ_pstate_table(chip);
>>>> +		data = get_occ_dynamic_data(chip);
>>>> +		chips[i].id = chip->id;
>>>> +		chips[i].occ_role = pdata->v9.occ_role;
>>>> +		chips[i].occ_state = &data->occ_state;
>>>> +		chips[i].cmd = &data->cmd;
>>>> +		chips[i].rsp = &data->rsp;
>>>> +		init_lock(&chips[i].queue_lock);
>>>> +		chips[i].cmd_in_progress = false;
>>>> +		init_timer(&chips[i].timeout, occ_cmd_timeout_handler,
>>>> +			   &chips[i]);
>>>> +		i++;
>>>> +	}
>>>> +
>>>> +	power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
>>>> +	if (!power_mgt) {
>>>> +		prerror("OCC: dt node /ibm,opal/power-mgt not found\n");
>>>> +		free(chips);
>>>> +		return;
>>>> +	}
>>>> +
>>>> +	dt_add_property_string(power_mgt, "compatible",
>>>> +			       "ibm,opal-occ-cmd-rsp-interface");
>>>> +	opal_register(OPAL_OCC_COMMAND, opal_occ_command, 3);
>>>> +}
>>>> +
>>>>  /* CPU-OCC PState init */
>>>>  /* Called after OCC init on P8 and P9 */
>>>>  void occ_pstates_init(void)
>>>> @@ -909,6 +1351,9 @@ void occ_pstates_init(void)
>>>>  		chip->throttle = 0;
>>>>  	opal_add_poller(occ_throttle_poll, NULL);
>>>>  	occ_pstates_initialized = true;
>>>> +
>>>> +	/* Init OPAL-OCC command-response interface */
>>>> +	occ_cmd_interface_init();
>>>>  }
>>>>  
>>>>  struct occ_load_req {
>>>> @@ -1394,8 +1839,10 @@ void occ_p9_interrupt(uint32_t chip_id)
>>>>  	if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
>>>>  		prd_tmgt_interrupt(chip_id);
>>>>  
>>>> -	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM)
>>>> +	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM) {
>>>>  		occ_throttle_poll(NULL);
>>>> +		handle_occ_rsp(chip_id);
>>>> +	}
>>>>  
>>>>  	/* We may have masked-out OCB_OCI_OCIMISC_IRQ in the previous
>>>>  	 * OCCMISC_AND write. Check if there are any new source bits set,
>>>> @@ -1417,5 +1864,3 @@ void occ_fsp_init(void)
>>>>  	if (fsp_present())
>>>>  		fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
>>>>  }
>>>> -
>>>> -
>>>> diff --git a/include/opal-api.h b/include/opal-api.h
>>>> index 80033c6..4acbd44 100644
>>>> --- a/include/opal-api.h
>>>> +++ b/include/opal-api.h
>>>> @@ -55,6 +55,10 @@
>>>>  #define OPAL_XSCOM_CTR_OFFLINED	-30
>>>>  #define OPAL_XIVE_PROVISIONING	-31
>>>>  #define OPAL_XIVE_FREE_ACTIVE	-32
>>>> +#define OPAL_OCC_INVALID_STATE	-33
>>>> +#define OPAL_OCC_BUSY		-34
>>>> +#define OPAL_OCC_CMD_TIMEOUT	-35
>>>> +#define OPAL_OCC_CMD_MISMATCH -36
>>>>  
>>>>  /* API Tokens (in r0) */
>>>>  #define OPAL_INVALID_CALL		       -1
>>>> @@ -204,7 +208,8 @@
>>>>  #define OPAL_NPU_INIT_CONTEXT			146
>>>>  #define OPAL_NPU_DESTROY_CONTEXT		147
>>>>  #define OPAL_NPU_MAP_LPAR			148
>>>> -#define OPAL_LAST				148
>>>> +#define OPAL_OCC_COMMAND			149
>>>> +#define OPAL_LAST				149
>>>>  
>>>>  /* Device tree flags */
>>>>  
>>>> @@ -1021,6 +1026,67 @@ typedef struct oppanel_line {
>>>>  	__be64 line_len;
>>>>  } oppanel_line_t;
>>>>  
>>>> +enum occ_cmd {
>>>> +	OCC_CMD_AMESTER_PASS_THRU = 0,
>>>> +	OCC_CMD_CLEAR_SENSOR_DATA,
>>>> +	OCC_CMD_SET_POWER_CAP,
>>>> +	OCC_CMD_SET_POWER_SHIFTING_RATIO,
>>>> +	OCC_CMD_LAST
>>>> +};
>>>> +
>>>> +enum occ_cmd_data_length {
>>>> +	OCC_CMD_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
>>>> +	OCC_CMD_DL_CLEAR_SENSOR_DATA		= 4,
>>>> +	OCC_CMD_DL_SET_POWER_CAP		= 2,
>>>> +	OCC_CMD_DL_SET_POWER_SHIFTING_RATIO	= 1,
>>>> +};
>>>> +
>>>> +enum occ_rsp_data_length {
>>>> +	OCC_RSP_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
>>>> +	OCC_RSP_DL_CLEAR_SENSOR_DATA		= 4,
>>>> +	OCC_RSP_DL_SET_POWER_CAP		= 2,
>>>> +	OCC_RSP_DL_SET_POWER_SHIFTING_RATIO	= 1,
>>>> +};
>>>> +
>>>> +enum occ_sensor_limit_group {
>>>> +	OCC_SENSOR_LIMIT_GROUP_CSM		= 0x10,
>>>> +	OCC_SENSOR_LIMIT_GROUP_PROFILER		= 0x20,
>>>> +	OCC_SENSOR_LIMIT_GROUP_JOB_SCHED	= 0x30,
>>>> +};
>>>> +
>>>> +#define MAX_OPAL_CMD_DATA_LENGTH	4090
>>>> +#define MAX_OCC_RSP_DATA_LENGTH		8698
>>>> +
>>>> +enum occ_response_status {
>>>> +	OCC_SUCCESS			= 0x00,
>>>> +	OCC_INVALID_COMMAND		= 0x11,
>>>> +	OCC_INVALID_CMD_DATA_LENGTH	= 0x12,
>>>> +	OCC_INVALID_DATA		= 0x13,
>>>> +	OCC_INTERNAL_ERROR		= 0x15,
>>>> +};
>>>> +
>>>> +struct opal_occ_cmd_rsp_msg {
>>>> +	u8 *cdata;
>>>> +	u8 *rdata;
>>>> +	__be16 cdata_size;
>>>> +	__be16 rdata_size;
>>>> +	u8 cmd;
>>>> +	u8 request_id;
>>>> +	u8 status;
>>>> +};
>>>> +
>>>> +struct opal_occ_cmd_data {
>>>> +	u16 size;
>>>> +	u8 cmd;
>>>> +	u8 data[];
>>>> +};
>>>> +
>>>> +struct opal_occ_rsp_data {
>>>> +	u16 size;
>>>> +	u8 status;
>>>> +	u8 data[];
>>>> +};
>>>> +
>>>>  enum opal_prd_msg_type {
>>>>  	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
>>>>  	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */
>>
>>
>
diff mbox

Patch

diff --git a/hw/occ.c b/hw/occ.c
index 4be4df4..aa01bba 100644
--- a/hw/occ.c
+++ b/hw/occ.c
@@ -119,6 +119,87 @@  struct occ_pstate_table {
 } __packed;
 
 /**
+ * OPAL-OCC Command Response Interface
+ *
+ * OPAL-OCC Command Buffer
+ *
+ * ---------------------------------------------------------------------
+ * | OPAL  |  Cmd    | OPAL |	       | Cmd Data | Cmd Data | OPAL    |
+ * | Cmd   | Request | OCC  | Reserved | Length   | Length   | Cmd     |
+ * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
+ * ---------------------------------------------------------------------
+ * |  ….OPAL Command Data up to max of Cmd Data Length 4090 bytes      |
+ * |								       |
+ * ---------------------------------------------------------------------
+ *
+ * OPAL Command Flag
+ *
+ * -----------------------------------------------------------------
+ * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
+ * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
+ * -----------------------------------------------------------------
+ * |Cmd    |       |       |       |       |       |       |       |
+ * |Ready  |	   |	   |	   |	   |	   |	   |	   |
+ * -----------------------------------------------------------------
+ *
+ * struct opal_command_buffer -	Defines the layout of OPAL command buffer
+ * @flag:			Provides general status of the command
+ * @request_id:			Token to identify request
+ * @cmd:			Command sent
+ * @data_size:			Command data length
+ * @data:			Command specific data
+ * @spare:			Unused byte
+ */
+struct opal_command_buffer {
+	u8 flag;
+	u8 request_id;
+	u8 cmd;
+	u8 spare;
+	u16 data_size;
+	u8 data[MAX_OPAL_CMD_DATA_LENGTH];
+} __packed;
+
+/**
+ * OPAL-OCC Response Buffer
+ *
+ * ---------------------------------------------------------------------
+ * | OCC   |  Cmd    | OPAL | Response | Rsp Data | Rsp Data | OPAL    |
+ * | Rsp   | Request | OCC  |  Status  | Length   | Length   | Rsp     |
+ * | Flags |   ID    | Cmd  |	       | (MSB)    | (LSB)    | Data... |
+ * ---------------------------------------------------------------------
+ * |  ….OPAL Response Data up to max of Rsp Data Length 8698 bytes     |
+ * |								       |
+ * ---------------------------------------------------------------------
+ *
+ * OCC Response Flag
+ *
+ * -----------------------------------------------------------------
+ * | Bit 7 | Bit 6 | Bit 5 | Bit 4 | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
+ * | (msb) |	   |	   |	   |	   |	   |	   | (lsb) |
+ * -----------------------------------------------------------------
+ * |       |       |       |       |       |       |OCC in  | Rsp  |
+ * |       |	   |	   |	   |	   |	   |progress|Ready |
+ * -----------------------------------------------------------------
+ *
+ * struct occ_response_buffer -	Defines the layout of OCC response buffer
+ * @flag:			Provides general status of the response
+ * @request_id:			Token to identify request
+ * @cmd:			Command requested
+ * @status:			Indicates success/failure status of
+ *				the command
+ * @data_size:			Response data length
+ * @data:			Response specific data
+ */
+struct occ_response_buffer {
+	u8 flag;
+	u8 request_id;
+	u8 cmd;
+	u8 status;
+	u16 data_size;
+	u8 data[MAX_OCC_RSP_DATA_LENGTH];
+} __packed;
+
+/**
  * OCC-OPAL Shared Memory Interface Dynamic Data Vx90
  *
  * struct occ_dynamic_data -	Contains runtime attributes
@@ -135,6 +216,8 @@  struct occ_pstate_table {
  * @max_pwr_cap:		Maximum allowed system power cap in Watts
  * @cur_pwr_cap:		Current system power cap
  * @spare/reserved:		Unused data
+ * @cmd:			Opal Command Buffer
+ * @rsp:			OCC Response Buffer
  */
 struct occ_dynamic_data {
 	u8 occ_state;
@@ -150,7 +233,9 @@  struct occ_dynamic_data {
 	u16 min_pwr_cap;
 	u16 max_pwr_cap;
 	u16 cur_pwr_cap;
-	u64 reserved;
+	u8 pad[112];
+	struct opal_command_buffer cmd;
+	struct occ_response_buffer rsp;
 } __packed;
 
 static bool occ_reset;
@@ -842,6 +927,363 @@  done:
 	unlock(&occ_lock);
 }
 
+#define OCC_RSP_READY		0x01
+#define OCC_IN_PROGRESS		0x02
+#define OPAL_CMD_READY		0x80
+
+enum occ_state {
+	OCC_STATE_NOT_RUNNING	= 0x00,
+	OCC_STATE_STANDBY	= 0x01,
+	OCC_STATE_OBSERVATION	= 0x02,
+	OCC_STATE_ACTIVE	= 0x03,
+	OCC_STATE_SAFE		= 0x04,
+	OCC_STATE_CHAR		= 0x05, /* Characterization */
+};
+
+enum occ_role {
+	OCC_ROLE_SLAVE		= 0x0,
+	OCC_ROLE_MASTER		= 0x1,
+};
+
+enum occ_cmd_value {
+	OCC_CMD_VALUE_AMESTER_PASS_THRU		= 0x41,
+	OCC_CMD_VALUE_CLEAR_SENSOR_DATA		= 0xD0,
+	OCC_CMD_VALUE_SET_POWER_CAP		= 0xD1,
+	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO	= 0xD2,
+};
+
+struct opal_occ_cmd_info {
+	enum	occ_cmd_value value;
+	int	timeout_ms;
+	u16	state_mask;
+	u8	role_mask;
+};
+
+static struct opal_occ_cmd_info occ_cmds[] = {
+	{	OCC_CMD_VALUE_AMESTER_PASS_THRU,
+		1000,
+		PPC_BIT16(OCC_STATE_OBSERVATION) |
+		PPC_BIT16(OCC_STATE_ACTIVE) |
+		PPC_BIT16(OCC_STATE_CHAR),
+		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
+	},
+	{	OCC_CMD_VALUE_CLEAR_SENSOR_DATA,
+		1000,
+		PPC_BIT16(OCC_STATE_OBSERVATION) |
+		PPC_BIT16(OCC_STATE_ACTIVE) |
+		PPC_BIT16(OCC_STATE_CHAR),
+		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
+	},
+	{	OCC_CMD_VALUE_SET_POWER_CAP,
+		1000,
+		PPC_BIT16(OCC_STATE_OBSERVATION) |
+		PPC_BIT16(OCC_STATE_ACTIVE) |
+		PPC_BIT16(OCC_STATE_CHAR),
+		PPC_BIT8(OCC_ROLE_MASTER)
+	},
+	{	OCC_CMD_VALUE_SET_POWER_SHIFTING_RATIO,
+		1000,
+		PPC_BIT16(OCC_STATE_OBSERVATION) |
+		PPC_BIT16(OCC_STATE_ACTIVE) |
+		PPC_BIT16(OCC_STATE_CHAR),
+		PPC_BIT8(OCC_ROLE_MASTER) | PPC_BIT8(OCC_ROLE_SLAVE)
+	},
+};
+
+static struct cmd_interface {
+	struct lock queue_lock;
+	struct timer timeout;
+	struct opal_command_buffer *cmd;
+	struct occ_response_buffer *rsp;
+	struct opal_occ_cmd_rsp_msg *msg;
+	u32 id;
+	u8 occ_role;
+	u8 *occ_state;
+	bool cmd_in_progress;
+} *chips;
+
+struct occ_rsp_msg {
+	struct list_node link;
+	u8 token;
+	int rc;
+};
+
+static LIST_HEAD(rsp_msg_list);
+static struct lock rsp_msg_lock = LOCK_UNLOCKED;
+static bool rsp_msg_in_use;
+
+static inline struct cmd_interface *get_chip_cmd_interface(int chip_id)
+{
+	struct proc_chip *chip;
+	int i = 0;
+
+	for_each_chip(chip) {
+		if (chips[i].id == chip_id)
+			return &chips[i];
+		i++;
+	}
+
+	return NULL;
+}
+
+static inline bool occ_in_progress(struct cmd_interface *chip)
+{
+	return (chip->rsp->flag == OCC_IN_PROGRESS);
+}
+
+static int write_occ_cmd(struct cmd_interface *chip, bool retry)
+{
+	struct opal_command_buffer *cmd = chip->cmd;
+	struct opal_occ_cmd_rsp_msg *msg = chip->msg;
+
+	if (!retry && occ_in_progress(chip)) {
+		chip->cmd_in_progress = false;
+		return OPAL_OCC_BUSY;
+	}
+
+	cmd->flag = chip->rsp->flag = 0;
+	cmd->cmd = occ_cmds[msg->cmd].value;
+	cmd->request_id = msg->request_id;
+	cmd->data_size = msg->cdata_size;
+	memcpy(&cmd->data, msg->cdata, msg->cdata_size);
+	cmd->flag = OPAL_CMD_READY;
+
+	schedule_timer(&chip->timeout,
+		       msecs_to_tb(occ_cmds[msg->cmd].timeout_ms));
+
+	return OPAL_ASYNC_COMPLETION;
+}
+
+static int64_t opal_occ_command(int chip_id, struct opal_occ_cmd_rsp_msg *msg,
+				bool retry)
+{
+	struct cmd_interface *chip;
+	int rc;
+
+	if (!msg || !opal_addr_valid(msg->cdata) ||
+	    !opal_addr_valid(msg->rdata))
+		return OPAL_PARAMETER;
+
+	if (msg->cmd >= OCC_CMD_LAST)
+		return OPAL_UNSUPPORTED;
+
+	if (msg->cdata_size > MAX_OPAL_CMD_DATA_LENGTH)
+		return OPAL_PARAMETER;
+
+	chip = get_chip_cmd_interface(chip_id);
+	if (!chip)
+		return OPAL_PARAMETER;
+
+	if (!(PPC_BIT8(chip->occ_role) & occ_cmds[msg->cmd].role_mask))
+		return OPAL_PARAMETER;
+
+	if (!(PPC_BIT16(*chip->occ_state) & occ_cmds[msg->cmd].state_mask))
+		return OPAL_OCC_INVALID_STATE;
+
+	lock(&chip->queue_lock);
+	if (chip->cmd_in_progress) {
+		rc = OPAL_OCC_BUSY;
+		goto out;
+	}
+
+	chip->msg = msg;
+	chip->cmd_in_progress = true;
+	rc = write_occ_cmd(chip, retry);
+out:
+	unlock(&chip->queue_lock);
+	return rc;
+}
+
+static inline bool sanity_check_opal_cmd(struct opal_command_buffer *cmd,
+					 struct opal_occ_cmd_rsp_msg *msg)
+{
+	return ((cmd->cmd == occ_cmds[msg->cmd].value) &&
+		(cmd->request_id == msg->request_id) &&
+		(cmd->data_size == msg->cdata_size));
+}
+
+static inline bool check_occ_rsp(struct opal_command_buffer *cmd,
+				 struct occ_response_buffer *rsp)
+{
+	if (cmd->cmd != rsp->cmd) {
+		prlog(PR_WARNING, "OCC: Command value mismatch in OCC response"
+		      "rsp->cmd = %d cmd->cmd = %d\n", rsp->cmd, cmd->cmd);
+		return false;
+	}
+
+	if (cmd->request_id != rsp->request_id) {
+		prlog(PR_WARNING, "OCC: Request ID mismatch in OCC response"
+		      "rsp->request_id = %d cmd->request_id = %d\n",
+		      rsp->request_id, cmd->request_id);
+		return false;
+	}
+
+	return true;
+}
+
+static void occ_rsp_msg_consumed(void *data __unused);
+
+static inline int queue_occ_rsp_msg(int token, int rc)
+{
+	return opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, occ_rsp_msg_consumed,
+			      token, rc);
+}
+
+static void occ_rsp_msg_consumed(void *data __unused)
+{
+	struct occ_rsp_msg *item;
+
+	lock(&rsp_msg_lock);
+
+	/* Queue next message */
+	item = list_pop(&rsp_msg_list, struct occ_rsp_msg, link);
+	if (!item) {
+		rsp_msg_in_use = false;
+		goto exit;
+	}
+
+	if (!queue_occ_rsp_msg(item->token, item->rc)) {
+		rsp_msg_in_use = true;
+		free(item);
+	} else {
+		rsp_msg_in_use = false;
+		list_add(&rsp_msg_list, &item->link);
+	}
+exit:
+	unlock(&rsp_msg_lock);
+}
+
+static void send_occ_rsp_msg(u8 token, int rc)
+{
+	struct occ_rsp_msg *item;
+
+	lock(&rsp_msg_lock);
+	if (!rsp_msg_in_use && !queue_occ_rsp_msg(token, rc)) {
+		rsp_msg_in_use = true;
+		goto out;
+	}
+
+	item = malloc(sizeof(*item));
+	if (!item)
+		goto out;
+
+	item->token = token;
+	item->rc = rc;
+	item->link.next = item->link.prev = NULL;
+	list_add_tail(&rsp_msg_list, &item->link);
+out:
+	unlock(&rsp_msg_lock);
+}
+
+static void occ_cmd_timeout_handler(struct timer *t __unused, void *data,
+				    uint64_t now __unused)
+{
+	struct cmd_interface *chip = data;
+
+	lock(&chip->queue_lock);
+	if (!chip->cmd_in_progress)
+		goto exit;
+
+	chip->cmd_in_progress = false;
+	send_occ_rsp_msg(chip->msg->request_id, OPAL_OCC_CMD_TIMEOUT);
+exit:
+	unlock(&chip->queue_lock);
+}
+
+static void read_occ_rsp(struct occ_response_buffer *rsp,
+			 struct opal_occ_cmd_rsp_msg *msg)
+{
+	/* Copy response to host buffer */
+	msg->status = rsp->status;
+	msg->rdata_size = rsp->data_size;
+	memcpy(msg->rdata, rsp->data, rsp->data_size);
+
+	/* Clear the OCC response flag */
+	rsp->flag = 0;
+}
+
+static void handle_occ_rsp(uint32_t chip_id)
+{
+	struct cmd_interface *chip;
+	struct opal_command_buffer *cmd;
+	struct occ_response_buffer *rsp;
+	struct opal_occ_cmd_rsp_msg *msg;
+
+	chip = get_chip_cmd_interface(chip_id);
+	if (!chip)
+		return;
+
+	cmd = chip->cmd;
+	rsp = chip->rsp;
+	msg = chip->msg;
+
+	/*Read rsp*/
+	if (rsp->flag != OCC_RSP_READY)
+		return;
+	lock(&chip->queue_lock);
+	if (!chip->cmd_in_progress)
+		goto exit;
+
+	chip->cmd_in_progress = false;
+	cancel_timer(&chip->timeout);
+	if (!sanity_check_opal_cmd(cmd, chip->msg) ||
+	    !check_occ_rsp(cmd, rsp)) {
+		send_occ_rsp_msg(msg->request_id, OPAL_OCC_CMD_MISMATCH);
+		goto exit;
+	}
+
+	read_occ_rsp(chip->rsp, msg);
+	send_occ_rsp_msg(msg->request_id, OPAL_SUCCESS);
+exit:
+	unlock(&chip->queue_lock);
+}
+
+static void occ_cmd_interface_init(void)
+{
+	struct dt_node *power_mgt;
+	struct occ_dynamic_data *data;
+	struct occ_pstate_table *pdata;
+	struct proc_chip *chip;
+	int i = 0, nr_chips = 0;
+
+	chip = next_chip(NULL);
+	pdata = get_occ_pstate_table(chip);
+	if (pdata->version != 0x90)
+		return;
+
+	for_each_chip(chip)
+		nr_chips++;
+
+	chips = malloc(sizeof(*chips) * nr_chips);
+	assert(chips);
+
+	for_each_chip(chip) {
+		pdata = get_occ_pstate_table(chip);
+		data = get_occ_dynamic_data(chip);
+		chips[i].id = chip->id;
+		chips[i].occ_role = pdata->v9.occ_role;
+		chips[i].occ_state = &data->occ_state;
+		chips[i].cmd = &data->cmd;
+		chips[i].rsp = &data->rsp;
+		init_lock(&chips[i].queue_lock);
+		chips[i].cmd_in_progress = false;
+		init_timer(&chips[i].timeout, occ_cmd_timeout_handler,
+			   &chips[i]);
+		i++;
+	}
+
+	power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
+	if (!power_mgt) {
+		prerror("OCC: dt node /ibm,opal/power-mgt not found\n");
+		free(chips);
+		return;
+	}
+
+	dt_add_property_string(power_mgt, "compatible",
+			       "ibm,opal-occ-cmd-rsp-interface");
+	opal_register(OPAL_OCC_COMMAND, opal_occ_command, 3);
+}
+
 /* CPU-OCC PState init */
 /* Called after OCC init on P8 and P9 */
 void occ_pstates_init(void)
@@ -909,6 +1351,9 @@  void occ_pstates_init(void)
 		chip->throttle = 0;
 	opal_add_poller(occ_throttle_poll, NULL);
 	occ_pstates_initialized = true;
+
+	/* Init OPAL-OCC command-response interface */
+	occ_cmd_interface_init();
 }
 
 struct occ_load_req {
@@ -1394,8 +1839,10 @@  void occ_p9_interrupt(uint32_t chip_id)
 	if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
 		prd_tmgt_interrupt(chip_id);
 
-	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM)
+	if (ireg & OCB_OCI_OCIMISC_IRQ_SHMEM) {
 		occ_throttle_poll(NULL);
+		handle_occ_rsp(chip_id);
+	}
 
 	/* We may have masked-out OCB_OCI_OCIMISC_IRQ in the previous
 	 * OCCMISC_AND write. Check if there are any new source bits set,
@@ -1417,5 +1864,3 @@  void occ_fsp_init(void)
 	if (fsp_present())
 		fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
 }
-
-
diff --git a/include/opal-api.h b/include/opal-api.h
index 80033c6..4acbd44 100644
--- a/include/opal-api.h
+++ b/include/opal-api.h
@@ -55,6 +55,10 @@ 
 #define OPAL_XSCOM_CTR_OFFLINED	-30
 #define OPAL_XIVE_PROVISIONING	-31
 #define OPAL_XIVE_FREE_ACTIVE	-32
+#define OPAL_OCC_INVALID_STATE	-33
+#define OPAL_OCC_BUSY		-34
+#define OPAL_OCC_CMD_TIMEOUT	-35
+#define OPAL_OCC_CMD_MISMATCH -36
 
 /* API Tokens (in r0) */
 #define OPAL_INVALID_CALL		       -1
@@ -204,7 +208,8 @@ 
 #define OPAL_NPU_INIT_CONTEXT			146
 #define OPAL_NPU_DESTROY_CONTEXT		147
 #define OPAL_NPU_MAP_LPAR			148
-#define OPAL_LAST				148
+#define OPAL_OCC_COMMAND			149
+#define OPAL_LAST				149
 
 /* Device tree flags */
 
@@ -1021,6 +1026,67 @@  typedef struct oppanel_line {
 	__be64 line_len;
 } oppanel_line_t;
 
+enum occ_cmd {
+	OCC_CMD_AMESTER_PASS_THRU = 0,
+	OCC_CMD_CLEAR_SENSOR_DATA,
+	OCC_CMD_SET_POWER_CAP,
+	OCC_CMD_SET_POWER_SHIFTING_RATIO,
+	OCC_CMD_LAST
+};
+
+enum occ_cmd_data_length {
+	OCC_CMD_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
+	OCC_CMD_DL_CLEAR_SENSOR_DATA		= 4,
+	OCC_CMD_DL_SET_POWER_CAP		= 2,
+	OCC_CMD_DL_SET_POWER_SHIFTING_RATIO	= 1,
+};
+
+enum occ_rsp_data_length {
+	OCC_RSP_DL_AMESTER_PASS_THRU		= 0, /* Variable data length */
+	OCC_RSP_DL_CLEAR_SENSOR_DATA		= 4,
+	OCC_RSP_DL_SET_POWER_CAP		= 2,
+	OCC_RSP_DL_SET_POWER_SHIFTING_RATIO	= 1,
+};
+
+enum occ_sensor_limit_group {
+	OCC_SENSOR_LIMIT_GROUP_CSM		= 0x10,
+	OCC_SENSOR_LIMIT_GROUP_PROFILER		= 0x20,
+	OCC_SENSOR_LIMIT_GROUP_JOB_SCHED	= 0x30,
+};
+
+#define MAX_OPAL_CMD_DATA_LENGTH	4090
+#define MAX_OCC_RSP_DATA_LENGTH		8698
+
+enum occ_response_status {
+	OCC_SUCCESS			= 0x00,
+	OCC_INVALID_COMMAND		= 0x11,
+	OCC_INVALID_CMD_DATA_LENGTH	= 0x12,
+	OCC_INVALID_DATA		= 0x13,
+	OCC_INTERNAL_ERROR		= 0x15,
+};
+
+struct opal_occ_cmd_rsp_msg {
+	u8 *cdata;
+	u8 *rdata;
+	__be16 cdata_size;
+	__be16 rdata_size;
+	u8 cmd;
+	u8 request_id;
+	u8 status;
+};
+
+struct opal_occ_cmd_data {
+	u16 size;
+	u8 cmd;
+	u8 data[];
+};
+
+struct opal_occ_rsp_data {
+	u16 size;
+	u8 status;
+	u8 data[];
+};
+
 enum opal_prd_msg_type {
 	OPAL_PRD_MSG_TYPE_INIT = 0,	/* HBRT --> OPAL */
 	OPAL_PRD_MSG_TYPE_FINI,		/* HBRT/kernel --> OPAL */