Skip to content

Commit

Permalink
target/riscv: read abstract args using batch
Browse files Browse the repository at this point in the history
This would elliminate the need for an extra nop in-between the two reads
in case of a 64-bit register.

Change-Id: I2cddc14f7f78181bbda5f931c4e2289cfb7a6674
Signed-off-by: Evgeniy Naydanov <[email protected]>
  • Loading branch information
en-sc committed Apr 27, 2024
1 parent 48b681f commit 8c50d4e
Show file tree
Hide file tree
Showing 3 changed files with 167 additions and 41 deletions.
57 changes: 44 additions & 13 deletions src/target/riscv/batch.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_
out->allocated_scans = scans;
out->idle_count = idle;
out->last_scan = RISCV_SCAN_TYPE_INVALID;
out->was_run = false;

out->data_out = NULL;
out->data_in = NULL;
Expand Down Expand Up @@ -89,17 +90,27 @@ bool riscv_batch_full(struct riscv_batch *batch)
return riscv_batch_available_scans(batch) == 0;
}

int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
size_t reset_delays_after)
static bool riscv_batch_was_busy(const struct riscv_batch *batch, size_t scan_i)
{
if (batch->used_scans == 0) {
LOG_TARGET_DEBUG(batch->target, "Ignoring empty batch.");
return ERROR_OK;
}
assert(batch->was_run);
assert(scan_i < batch->used_scans);
const struct scan_field *field = batch->fields + scan_i;
const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
}

riscv_batch_add_nop(batch);
int riscv_batch_run_from(struct riscv_batch *batch, size_t start_i,
bool resets_delays, size_t reset_delays_after)
{
assert(batch->used_scans);
assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
assert(!batch->was_run || riscv_batch_was_busy(batch, start_i - 1));
assert(start_i == 0 || !riscv_batch_was_busy(batch, start_i));

for (size_t i = 0; i < batch->used_scans; ++i) {
LOG_TARGET_DEBUG(batch->target, "Running scans [%zu, %zu)",
start_i, batch->used_scans);

for (size_t i = start_i; i < batch->used_scans; ++i) {
if (bscan_tunnel_ir_width != 0)
riscv_add_bscan_tunneled_scan(batch->target, batch->fields + i, batch->bscan_ctxt + i);
else
Expand All @@ -122,19 +133,29 @@ int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,

if (bscan_tunnel_ir_width != 0) {
/* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
for (size_t i = 0; i < batch->used_scans; ++i) {
for (size_t i = start_i; i < batch->used_scans; ++i) {
if ((batch->fields + i)->in_value)
buffer_shr((batch->fields + i)->in_value, DMI_SCAN_BUF_SIZE, 1);
}
}

for (size_t i = 0; i < batch->used_scans; ++i)
for (size_t i = start_i; i < batch->used_scans; ++i)
riscv_decode_dmi_scan(batch->target, batch->idle_count, batch->fields + i,
/*discard_in*/ false);

batch->was_run = true;
return ERROR_OK;
}

void riscv_batch_update_idle(struct riscv_batch *batch, size_t new_idle)
{
assert(batch->was_run);
const size_t old_idle = batch->idle_count;
if (new_idle > old_idle)
jtag_add_runtest(new_idle - old_idle, TAP_IDLE);
batch->idle_count = new_idle;
}

void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data,
bool read_back)
{
Expand Down Expand Up @@ -210,12 +231,22 @@ size_t riscv_batch_available_scans(struct riscv_batch *batch)

bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch)
{
assert(batch->was_run);
if (batch->used_scans == 0)
/* Empty batch */
return false;

assert(batch->last_scan == RISCV_SCAN_TYPE_NOP);
const struct scan_field *field = batch->fields + batch->used_scans - 1;
const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY;
return riscv_batch_was_busy(batch, batch->used_scans - 1);
}

size_t riscv_batch_finished_scans(const struct riscv_batch *batch)
{
if (!riscv_batch_dmi_busy_encountered(batch))
return batch->used_scans;
assert(batch->used_scans);
size_t first_busy = 0;
while (!riscv_batch_was_busy(batch, first_busy))
++first_busy;
return first_busy;
}
24 changes: 22 additions & 2 deletions src/target/riscv/batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ struct riscv_batch {
/* The read keys. */
size_t *read_keys;
size_t read_keys_used;

/* Flag, indicating that the last run of the batch was successful. */
bool was_run;
};

/* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG
Expand All @@ -55,16 +58,33 @@ void riscv_batch_free(struct riscv_batch *batch);
/* Checks to see if this batch is full. */
bool riscv_batch_full(struct riscv_batch *batch);

/* Executes this batch of JTAG DTM DMI scans.
/* Executes this batch of JTAG DTM DMI scans, starting form "start" scan.
* If batch is run for the first time, it is expected that "start" is zero.
* It is expected, that the bacth ends with a DMI NOP operation.
*
* If resets_delays is true, the algorithm will stop inserting idle cycles
* (JTAG Run-Test Idle) after "reset_delays_after" number of scans is
* performed. This is useful for stress-testing of RISC-V algorithms in
* OpenOCD that are based on batches.
*/
int riscv_batch_run(struct riscv_batch *batch, bool resets_delays,
int riscv_batch_run_from(struct riscv_batch *batch, size_t start, bool resets_delays,
size_t reset_delays_after);

/* Change the nomber of JTAG Run-Test Idle cycles added in-between DMI operations,
* adding the difference to JTAG queue.
*/
void riscv_batch_update_idle(struct riscv_batch *batch, size_t new_idle);

/* Continue running the batch that was once run using "riscv_batch_run" and got
* a DMI busy responce on "finished_scans" scan, updating the number of idle
* cycles used.
*/
int riscv_batch_continue(struct riscv_batch *batch, size_t finished_scans,
size_t new_idle, bool resets_delays, size_t reset_delays_after);

/* Get the number of scans successfuly executed form this batch. */
size_t riscv_batch_finished_scans(const struct riscv_batch *batch);

/* Adds a DM register write to this batch. */
void riscv_batch_add_dm_write(struct riscv_batch *batch, uint64_t address, uint32_t data,
bool read_back);
Expand Down
127 changes: 101 additions & 26 deletions src/target/riscv/riscv-013.c
Original file line number Diff line number Diff line change
Expand Up @@ -977,27 +977,51 @@ static int execute_abstract_command(struct target *target, uint32_t command,
return ERROR_OK;
}

static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
unsigned size_bits)
static void add_data_regs_read(struct riscv_batch *batch, unsigned int index,
unsigned int size_bits)
{
const unsigned int size_in_words = size_bits / 32;
assert(size_bits % 32 == 0);
const unsigned int offset = index * size_in_words;
for (unsigned int i = 0; i < size_in_words; ++i) {
const unsigned int reg_address = DM_DATA0 + offset + i;
riscv_batch_add_dm_read(batch, reg_address);
}
}

static riscv_reg_t read_data_regs(struct riscv_batch *batch,
unsigned int index, unsigned int size_bits)
{
const unsigned int size_in_words = size_bits / 32;
assert(size_bits % 32 == 0);
assert(size_in_words * sizeof(uint32_t) <= sizeof(riscv_reg_t));
riscv_reg_t value = 0;
uint32_t v;
unsigned offset = index * size_bits / 32;
switch (size_bits) {
default:
LOG_TARGET_ERROR(target, "Unsupported size: %d bits", size_bits);
return ~0;
case 64:
if (dm_read(target, &v, DM_DATA0 + offset + 1) == ERROR_OK)
value |= ((uint64_t)v) << 32;
/* falls through */
case 32:
if (dm_read(target, &v, DM_DATA0 + offset) == ERROR_OK)
value |= v;
for (unsigned int i = 0; i < size_in_words; ++i) {
const uint32_t v = riscv_batch_get_dmi_read_data(batch, i);
value |= (riscv_reg_t)v << (i * 32);
}
return value;
}

static int batch_run_timeout(struct target *target, struct riscv_batch *batch);

static int read_abstract_arg(struct target *target, uint64_t *value,
unsigned int index, unsigned int size_bits)
{
assert(value);
RISCV013_INFO(info);
const unsigned char size_in_words = size_bits / 32;
assert(size_bits % 32 == 0);
struct riscv_batch * const batch = riscv_batch_alloc(target, size_in_words,
info->dmi_busy_delay);
add_data_regs_read(batch, index, size_bits);
int result = batch_run_timeout(target, batch);
if (result == ERROR_OK)
*value = read_data_regs(batch, index, size_bits);
riscv_batch_free(batch);
return result;
}

static int write_abstract_arg(struct target *target, unsigned index,
riscv_reg_t value, unsigned size_bits)
{
Expand Down Expand Up @@ -1094,7 +1118,7 @@ static int register_read_abstract_with_size(struct target *target,
}

if (value)
*value = read_abstract_arg(target, 0, size);
return read_abstract_arg(target, value, 0, size);

return ERROR_OK;
}
Expand Down Expand Up @@ -2630,15 +2654,53 @@ static int sb_write_address(struct target *target, target_addr_t address,
static int batch_run(struct target *target, struct riscv_batch *batch)
{
RISCV_INFO(r);
const int result = riscv_batch_run(batch, /*resets_delays*/ r->reset_delays_wait >= 0,
riscv_batch_add_nop(batch);
const int result = riscv_batch_run_from(batch, 0,
/*resets_delays*/ r->reset_delays_wait >= 0,
r->reset_delays_wait);
/* TODO: `finished_scans` should be the number of scans that have
* finished, not the number of scans scheduled. */
const size_t finished_scans = batch->used_scans;
const size_t finished_scans = riscv_batch_finished_scans(batch);
decrement_reset_delays_counter(target, finished_scans);
return result;
}

static int batch_run_timeout(struct target *target, struct riscv_batch *batch)
{
RISCV013_INFO(info);
size_t finished_scans = 0;
const time_t start = time(NULL);
riscv_batch_add_nop(batch);
int result = ERROR_FAIL;
do {
RISCV_INFO(r);
result = riscv_batch_run_from(batch, finished_scans,
/*resets_delays*/ r->reset_delays_wait >= 0,
r->reset_delays_wait);
const size_t new_finished_scans = riscv_batch_finished_scans(batch);
assert(new_finished_scans >= finished_scans);
decrement_reset_delays_counter(target, new_finished_scans - finished_scans);
finished_scans = new_finished_scans;
if (result != ERROR_OK)
return result;
if (!riscv_batch_dmi_busy_encountered(batch)) {
assert(finished_scans == batch->used_scans);
return ERROR_OK;
}
increase_dmi_busy_delay(target);
riscv_batch_update_idle(batch, info->dmi_busy_delay);
} while (time(NULL) - start < riscv_command_timeout_sec);
assert(result == ERROR_OK);
assert(riscv_batch_dmi_busy_encountered(batch));
/* Reset dmi_busy_delay, so the value doesn't get too big. */
LOG_TARGET_DEBUG(target, "dmi_busy_delay is reset.");
info->dmi_busy_delay = 0;
/* TODO: Introduce a separate timeout for this. */
LOG_TARGET_ERROR(target, "DMI operation didn't complete in %d seconds. "
"The target is either really slow or broken. You could increase "
"the timeout with riscv set_command_timeout_sec.",
riscv_command_timeout_sec);
return ERROR_TIMEOUT_REACHED;
}

static int sba_supports_access(struct target *target, unsigned int size_bytes)
{
RISCV013_INFO(info);
Expand Down Expand Up @@ -3711,7 +3773,11 @@ static int read_memory_abstract(struct target *target, target_addr_t address,
if (info->has_aampostincrement == YNM_MAYBE) {
if (result == ERROR_OK) {
/* Safety: double-check that the address was really auto-incremented */
riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
riscv_reg_t new_address;
result = read_abstract_arg(target, &new_address, 1, riscv_xlen(target));
if (result != ERROR_OK)
return result;

if (new_address == address + size) {
LOG_TARGET_DEBUG(target, "aampostincrement is supported on this target.");
info->has_aampostincrement = YNM_YES;
Expand All @@ -3734,7 +3800,10 @@ static int read_memory_abstract(struct target *target, target_addr_t address,
return result;

/* Copy arg0 to buffer (rounded width up to nearest 32) */
riscv_reg_t value = read_abstract_arg(target, 0, width32);
riscv_reg_t value;
result = read_abstract_arg(target, &value, 0, width32);
if (result != ERROR_OK)
return result;
buf_set_u64(p, 0, 8 * size, value);

if (info->has_aampostincrement == YNM_YES)
Expand Down Expand Up @@ -3797,7 +3866,11 @@ static int write_memory_abstract(struct target *target, target_addr_t address,
if (info->has_aampostincrement == YNM_MAYBE) {
if (result == ERROR_OK) {
/* Safety: double-check that the address was really auto-incremented */
riscv_reg_t new_address = read_abstract_arg(target, 1, riscv_xlen(target));
riscv_reg_t new_address;
result = read_abstract_arg(target, &new_address, 1, riscv_xlen(target));
if (result != ERROR_OK)
return result;

if (new_address == address + size) {
LOG_TARGET_DEBUG(target, "aampostincrement is supported on this target.");
info->has_aampostincrement = YNM_YES;
Expand Down Expand Up @@ -4177,10 +4250,12 @@ static int read_word_from_dm_data_regs(struct target *target,
struct memory_access_info access, uint32_t index)
{
assert(access.element_size <= 8);
const uint64_t value = read_abstract_arg(target, /*index*/ 0,
uint64_t value;
int result = read_abstract_arg(target, &value, /*index*/ 0,
access.element_size > 4 ? 64 : 32);
set_buffer_and_log_read(access, index, value);
return ERROR_OK;
if (result == ERROR_OK)
set_buffer_and_log_read(access, index, value);
return result;
}

static int read_word_from_s1(struct target *target,
Expand Down

0 comments on commit 8c50d4e

Please sign in to comment.