2
* QEMU ESP/NCR53C9x emulation
4
* Copyright (c) 2005-2006 Fabrice Bellard
5
* Copyright (c) 2012 Herve Poussineau
6
* Copyright (c) 2023 Mark Cave-Ayland
8
* Permission is hereby granted, free of charge, to any person obtaining a copy
9
* of this software and associated documentation files (the "Software"), to deal
10
* in the Software without restriction, including without limitation the rights
11
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
* copies of the Software, and to permit persons to whom the Software is
13
* furnished to do so, subject to the following conditions:
15
* The above copyright notice and this permission notice shall be included in
16
* all copies or substantial portions of the Software.
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27
#include "qemu/osdep.h"
29
#include "migration/vmstate.h"
31
#include "hw/scsi/esp.h"
34
#include "qemu/module.h"
37
* On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38
* also produced as NCR89C100. See
39
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
41
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
43
* On Macintosh Quadra it is a NCR53C96.
46
static void esp_raise_irq(ESPState *s)
48
if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49
s->rregs[ESP_RSTAT] |= STAT_INT;
50
qemu_irq_raise(s->irq);
51
trace_esp_raise_irq();
55
static void esp_lower_irq(ESPState *s)
57
if (s->rregs[ESP_RSTAT] & STAT_INT) {
58
s->rregs[ESP_RSTAT] &= ~STAT_INT;
59
qemu_irq_lower(s->irq);
60
trace_esp_lower_irq();
64
static void esp_raise_drq(ESPState *s)
66
if (!(s->drq_state)) {
67
qemu_irq_raise(s->drq_irq);
68
trace_esp_raise_drq();
73
static void esp_lower_drq(ESPState *s)
76
qemu_irq_lower(s->drq_irq);
77
trace_esp_lower_drq();
82
static const char *esp_phase_names[8] = {
83
"DATA OUT", "DATA IN", "COMMAND", "STATUS",
84
"(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
87
static void esp_set_phase(ESPState *s, uint8_t phase)
89
s->rregs[ESP_RSTAT] &= ~7;
90
s->rregs[ESP_RSTAT] |= phase;
92
trace_esp_set_phase(esp_phase_names[phase]);
95
static uint8_t esp_get_phase(ESPState *s)
97
return s->rregs[ESP_RSTAT] & 7;
100
void esp_dma_enable(ESPState *s, int irq, int level)
104
trace_esp_dma_enable();
110
trace_esp_dma_disable();
115
void esp_request_cancelled(SCSIRequest *req)
117
ESPState *s = req->hba_private;
119
if (req == s->current_req) {
120
scsi_req_unref(s->current_req);
121
s->current_req = NULL;
122
s->current_dev = NULL;
127
static void esp_update_drq(ESPState *s)
131
switch (esp_get_phase(s)) {
149
/* DMA request so update DRQ according to transfer direction */
151
if (fifo8_num_free(&s->fifo) < 2) {
157
if (fifo8_num_used(&s->fifo) < 2) {
164
/* Not a DMA request */
169
static void esp_fifo_push(ESPState *s, uint8_t val)
171
if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172
trace_esp_error_fifo_overrun();
174
fifo8_push(&s->fifo, val);
180
static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
182
fifo8_push_all(&s->fifo, buf, len);
186
static uint8_t esp_fifo_pop(ESPState *s)
190
if (fifo8_is_empty(&s->fifo)) {
193
val = fifo8_pop(&s->fifo);
200
static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
202
uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
208
static uint32_t esp_get_tc(ESPState *s)
212
dmalen = s->rregs[ESP_TCLO];
213
dmalen |= s->rregs[ESP_TCMID] << 8;
214
dmalen |= s->rregs[ESP_TCHI] << 16;
219
static void esp_set_tc(ESPState *s, uint32_t dmalen)
221
uint32_t old_tc = esp_get_tc(s);
223
s->rregs[ESP_TCLO] = dmalen;
224
s->rregs[ESP_TCMID] = dmalen >> 8;
225
s->rregs[ESP_TCHI] = dmalen >> 16;
227
if (old_tc && dmalen == 0) {
228
s->rregs[ESP_RSTAT] |= STAT_TC;
232
static uint32_t esp_get_stc(ESPState *s)
236
dmalen = s->wregs[ESP_TCLO];
237
dmalen |= s->wregs[ESP_TCMID] << 8;
238
dmalen |= s->wregs[ESP_TCHI] << 16;
243
static uint8_t esp_pdma_read(ESPState *s)
247
val = esp_fifo_pop(s);
251
static void esp_pdma_write(ESPState *s, uint8_t val)
253
uint32_t dmalen = esp_get_tc(s);
255
esp_fifo_push(s, val);
257
if (dmalen && s->drq_state) {
259
esp_set_tc(s, dmalen);
263
static int esp_select(ESPState *s)
267
target = s->wregs[ESP_WBUSID] & BUSID_DID;
270
s->rregs[ESP_RSEQ] = SEQ_0;
272
if (s->current_req) {
273
/* Started a new command before the old one finished. Cancel it. */
274
scsi_req_cancel(s->current_req);
277
s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
278
if (!s->current_dev) {
280
s->rregs[ESP_RSTAT] = 0;
281
s->rregs[ESP_RINTR] = INTR_DC;
287
* Note that we deliberately don't raise the IRQ here: this will be done
288
* either in esp_transfer_data() or esp_command_complete()
293
static void esp_do_dma(ESPState *s);
294
static void esp_do_nodma(ESPState *s);
296
static void do_command_phase(ESPState *s)
300
SCSIDevice *current_lun;
301
uint8_t buf[ESP_CMDFIFO_SZ];
303
trace_esp_do_command_phase(s->lun);
304
cmdlen = fifo8_num_used(&s->cmdfifo);
305
if (!cmdlen || !s->current_dev) {
308
fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
310
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
313
s->rregs[ESP_RSTAT] = 0;
314
s->rregs[ESP_RINTR] = INTR_DC;
315
s->rregs[ESP_RSEQ] = SEQ_0;
320
s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
321
datalen = scsi_req_enqueue(s->current_req);
322
s->ti_size = datalen;
323
fifo8_reset(&s->cmdfifo);
324
s->data_ready = false;
327
* Switch to DATA phase but wait until initial data xfer is
328
* complete before raising the command completion interrupt
331
esp_set_phase(s, STAT_DI);
333
esp_set_phase(s, STAT_DO);
335
scsi_req_continue(s->current_req);
340
static void do_message_phase(ESPState *s)
342
if (s->cmdfifo_cdb_offset) {
343
uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
344
fifo8_pop(&s->cmdfifo);
346
trace_esp_do_identify(message);
347
s->lun = message & 7;
348
s->cmdfifo_cdb_offset--;
351
/* Ignore extended messages for now */
352
if (s->cmdfifo_cdb_offset) {
353
int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
354
fifo8_drop(&s->cmdfifo, len);
355
s->cmdfifo_cdb_offset = 0;
359
static void do_cmd(ESPState *s)
362
assert(s->cmdfifo_cdb_offset == 0);
366
static void handle_satn(ESPState *s)
368
if (s->dma && !s->dma_enabled) {
369
s->dma_cb = handle_satn;
373
if (esp_select(s) < 0) {
377
esp_set_phase(s, STAT_MO);
386
static void handle_s_without_atn(ESPState *s)
388
if (s->dma && !s->dma_enabled) {
389
s->dma_cb = handle_s_without_atn;
393
if (esp_select(s) < 0) {
397
esp_set_phase(s, STAT_CD);
398
s->cmdfifo_cdb_offset = 0;
407
static void handle_satn_stop(ESPState *s)
409
if (s->dma && !s->dma_enabled) {
410
s->dma_cb = handle_satn_stop;
414
if (esp_select(s) < 0) {
418
esp_set_phase(s, STAT_MO);
419
s->cmdfifo_cdb_offset = 0;
428
static void handle_pad(ESPState *s)
437
static void write_response(ESPState *s)
439
trace_esp_write_response(s->status);
448
static bool esp_cdb_ready(ESPState *s)
450
int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
459
pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
462
* In normal use the cmdfifo should never wrap, but include this check
463
* to prevent a malicious guest from reading past the end of the
464
* cmdfifo data buffer below
469
cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
471
return cdblen < 0 ? false : (len >= cdblen);
474
static void esp_dma_ti_check(ESPState *s)
476
if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
477
s->rregs[ESP_RINTR] |= INTR_BS;
482
static void esp_do_dma(ESPState *s)
484
uint32_t len, cmdlen;
485
uint8_t buf[ESP_CMDFIFO_SZ];
489
switch (esp_get_phase(s)) {
491
if (s->dma_memory_read) {
492
len = MIN(len, fifo8_num_free(&s->cmdfifo));
493
s->dma_memory_read(s->dma_opaque, buf, len);
494
esp_set_tc(s, esp_get_tc(s) - len);
496
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
497
len = MIN(fifo8_num_free(&s->cmdfifo), len);
500
fifo8_push_all(&s->cmdfifo, buf, len);
501
s->cmdfifo_cdb_offset += len;
503
switch (s->rregs[ESP_CMD]) {
504
case CMD_SELATN | CMD_DMA:
505
if (fifo8_num_used(&s->cmdfifo) >= 1) {
506
/* First byte received, switch to command phase */
507
esp_set_phase(s, STAT_CD);
508
s->rregs[ESP_RSEQ] = SEQ_CD;
509
s->cmdfifo_cdb_offset = 1;
511
if (fifo8_num_used(&s->cmdfifo) > 1) {
512
/* Process any additional command phase data */
518
case CMD_SELATNS | CMD_DMA:
519
if (fifo8_num_used(&s->cmdfifo) == 1) {
520
/* First byte received, stop in message out phase */
521
s->rregs[ESP_RSEQ] = SEQ_MO;
522
s->cmdfifo_cdb_offset = 1;
524
/* Raise command completion interrupt */
525
s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
530
case CMD_TI | CMD_DMA:
531
/* ATN remains asserted until TC == 0 */
532
if (esp_get_tc(s) == 0) {
533
esp_set_phase(s, STAT_CD);
534
s->rregs[ESP_CMD] = 0;
535
s->rregs[ESP_RINTR] |= INTR_BS;
543
cmdlen = fifo8_num_used(&s->cmdfifo);
544
trace_esp_do_dma(cmdlen, len);
545
if (s->dma_memory_read) {
546
len = MIN(len, fifo8_num_free(&s->cmdfifo));
547
s->dma_memory_read(s->dma_opaque, buf, len);
548
fifo8_push_all(&s->cmdfifo, buf, len);
549
esp_set_tc(s, esp_get_tc(s) - len);
551
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
552
len = MIN(fifo8_num_free(&s->cmdfifo), len);
553
fifo8_push_all(&s->cmdfifo, buf, len);
555
trace_esp_handle_ti_cmd(cmdlen);
557
if (esp_get_tc(s) == 0) {
558
/* Command has been received */
564
if (!s->current_req) {
567
if (s->async_len == 0 && esp_get_tc(s)) {
568
/* Defer until data is available. */
571
if (len > s->async_len) {
575
switch (s->rregs[ESP_CMD]) {
576
case CMD_TI | CMD_DMA:
577
if (s->dma_memory_read) {
578
s->dma_memory_read(s->dma_opaque, s->async_buf, len);
579
esp_set_tc(s, esp_get_tc(s) - len);
581
/* Copy FIFO data to device */
582
len = MIN(s->async_len, ESP_FIFO_SZ);
583
len = MIN(len, fifo8_num_used(&s->fifo));
584
len = esp_fifo_pop_buf(s, s->async_buf, len);
592
case CMD_PAD | CMD_DMA:
593
/* Copy TC zero bytes into the incoming stream */
594
if (!s->dma_memory_read) {
595
len = MIN(s->async_len, ESP_FIFO_SZ);
596
len = MIN(len, fifo8_num_free(&s->fifo));
599
memset(s->async_buf, 0, len);
607
if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
608
/* Defer until the scsi layer has completed */
609
scsi_req_continue(s->current_req);
617
if (!s->current_req) {
620
if (s->async_len == 0 && esp_get_tc(s)) {
621
/* Defer until data is available. */
624
if (len > s->async_len) {
628
switch (s->rregs[ESP_CMD]) {
629
case CMD_TI | CMD_DMA:
630
if (s->dma_memory_write) {
631
s->dma_memory_write(s->dma_opaque, s->async_buf, len);
633
/* Copy device data to FIFO */
634
len = MIN(len, fifo8_num_free(&s->fifo));
635
esp_fifo_push_buf(s, s->async_buf, len);
641
esp_set_tc(s, esp_get_tc(s) - len);
644
case CMD_PAD | CMD_DMA:
645
/* Drop TC bytes from the incoming stream */
646
if (!s->dma_memory_write) {
647
len = MIN(len, fifo8_num_free(&s->fifo));
653
esp_set_tc(s, esp_get_tc(s) - len);
657
if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
658
/* If the guest underflows TC then terminate SCSI request */
659
scsi_req_continue(s->current_req);
663
if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
664
/* Defer until the scsi layer has completed */
665
scsi_req_continue(s->current_req);
673
switch (s->rregs[ESP_CMD]) {
674
case CMD_ICCS | CMD_DMA:
680
if (s->dma_memory_write) {
681
s->dma_memory_write(s->dma_opaque, buf, len);
683
esp_fifo_push_buf(s, buf, len);
686
esp_set_tc(s, esp_get_tc(s) - len);
687
esp_set_phase(s, STAT_MI);
689
if (esp_get_tc(s) > 0) {
690
/* Process any message in phase data */
697
/* Consume remaining data if the guest underflows TC */
698
if (fifo8_num_used(&s->fifo) < 2) {
699
s->rregs[ESP_RINTR] |= INTR_BS;
707
switch (s->rregs[ESP_CMD]) {
708
case CMD_ICCS | CMD_DMA:
714
if (s->dma_memory_write) {
715
s->dma_memory_write(s->dma_opaque, buf, len);
717
esp_fifo_push_buf(s, buf, len);
720
esp_set_tc(s, esp_get_tc(s) - len);
722
/* Raise end of command interrupt */
723
s->rregs[ESP_RINTR] |= INTR_FC;
732
static void esp_nodma_ti_dataout(ESPState *s)
736
if (!s->current_req) {
739
if (s->async_len == 0) {
740
/* Defer until data is available. */
743
len = MIN(s->async_len, ESP_FIFO_SZ);
744
len = MIN(len, fifo8_num_used(&s->fifo));
745
esp_fifo_pop_buf(s, s->async_buf, len);
750
if (s->async_len == 0) {
751
scsi_req_continue(s->current_req);
755
s->rregs[ESP_RINTR] |= INTR_BS;
759
static void esp_do_nodma(ESPState *s)
761
uint8_t buf[ESP_FIFO_SZ];
765
switch (esp_get_phase(s)) {
767
switch (s->rregs[ESP_CMD]) {
769
/* Copy FIFO into cmdfifo */
770
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
771
len = MIN(fifo8_num_free(&s->cmdfifo), len);
772
fifo8_push_all(&s->cmdfifo, buf, len);
774
if (fifo8_num_used(&s->cmdfifo) >= 1) {
775
/* First byte received, switch to command phase */
776
esp_set_phase(s, STAT_CD);
777
s->rregs[ESP_RSEQ] = SEQ_CD;
778
s->cmdfifo_cdb_offset = 1;
780
if (fifo8_num_used(&s->cmdfifo) > 1) {
781
/* Process any additional command phase data */
788
/* Copy one byte from FIFO into cmdfifo */
789
len = esp_fifo_pop_buf(s, buf,
790
MIN(fifo8_num_used(&s->fifo), 1));
791
len = MIN(fifo8_num_free(&s->cmdfifo), len);
792
fifo8_push_all(&s->cmdfifo, buf, len);
794
if (fifo8_num_used(&s->cmdfifo) >= 1) {
795
/* First byte received, stop in message out phase */
796
s->rregs[ESP_RSEQ] = SEQ_MO;
797
s->cmdfifo_cdb_offset = 1;
799
/* Raise command completion interrupt */
800
s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
806
/* Copy FIFO into cmdfifo */
807
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
808
len = MIN(fifo8_num_free(&s->cmdfifo), len);
809
fifo8_push_all(&s->cmdfifo, buf, len);
811
/* ATN remains asserted until FIFO empty */
812
s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
813
esp_set_phase(s, STAT_CD);
814
s->rregs[ESP_CMD] = 0;
815
s->rregs[ESP_RINTR] |= INTR_BS;
822
switch (s->rregs[ESP_CMD]) {
824
/* Copy FIFO into cmdfifo */
825
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
826
len = MIN(fifo8_num_free(&s->cmdfifo), len);
827
fifo8_push_all(&s->cmdfifo, buf, len);
829
cmdlen = fifo8_num_used(&s->cmdfifo);
830
trace_esp_handle_ti_cmd(cmdlen);
832
/* CDB may be transferred in one or more TI commands */
833
if (esp_cdb_ready(s)) {
834
/* Command has been received */
838
* If data was transferred from the FIFO then raise bus
839
* service interrupt to indicate transfer complete. Otherwise
840
* defer until the next FIFO write.
843
/* Raise interrupt to indicate transfer complete */
844
s->rregs[ESP_RINTR] |= INTR_BS;
850
case CMD_SEL | CMD_DMA:
851
case CMD_SELATN | CMD_DMA:
852
/* Copy FIFO into cmdfifo */
853
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
854
len = MIN(fifo8_num_free(&s->cmdfifo), len);
855
fifo8_push_all(&s->cmdfifo, buf, len);
857
/* Handle when DMA transfer is terminated by non-DMA FIFO write */
858
if (esp_cdb_ready(s)) {
859
/* Command has been received */
866
/* FIFO already contain entire CDB: copy to cmdfifo and execute */
867
len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
868
len = MIN(fifo8_num_free(&s->cmdfifo), len);
869
fifo8_push_all(&s->cmdfifo, buf, len);
877
/* Accumulate data in FIFO until non-DMA TI is executed */
881
if (!s->current_req) {
884
if (s->async_len == 0) {
885
/* Defer until data is available. */
888
if (fifo8_is_empty(&s->fifo)) {
889
esp_fifo_push(s, s->async_buf[0]);
895
if (s->async_len == 0) {
896
scsi_req_continue(s->current_req);
900
/* If preloading the FIFO, defer until TI command issued */
901
if (s->rregs[ESP_CMD] != CMD_TI) {
905
s->rregs[ESP_RINTR] |= INTR_BS;
910
switch (s->rregs[ESP_CMD]) {
912
esp_fifo_push(s, s->status);
913
esp_set_phase(s, STAT_MI);
915
/* Process any message in phase data */
922
switch (s->rregs[ESP_CMD]) {
926
/* Raise end of command interrupt */
927
s->rregs[ESP_RINTR] |= INTR_FC;
935
void esp_command_complete(SCSIRequest *req, size_t resid)
937
ESPState *s = req->hba_private;
938
int to_device = (esp_get_phase(s) == STAT_DO);
940
trace_esp_command_complete();
943
* Non-DMA transfers from the target will leave the last byte in
944
* the FIFO so don't reset ti_size in this case
946
if (s->dma || to_device) {
947
if (s->ti_size != 0) {
948
trace_esp_command_complete_unexpected();
954
trace_esp_command_complete_fail();
956
s->status = req->status;
959
* Switch to status phase. For non-DMA transfers from the target the last
960
* byte is still in the FIFO
964
switch (s->rregs[ESP_CMD]) {
965
case CMD_SEL | CMD_DMA:
967
case CMD_SELATN | CMD_DMA:
970
* No data phase for sequencer command so raise deferred bus service
971
* and function complete interrupt
973
s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
974
s->rregs[ESP_RSEQ] = SEQ_CD;
977
case CMD_TI | CMD_DMA:
979
s->rregs[ESP_CMD] = 0;
983
/* Raise bus service interrupt to indicate change to STATUS phase */
984
esp_set_phase(s, STAT_ST);
985
s->rregs[ESP_RINTR] |= INTR_BS;
988
if (s->current_req) {
989
scsi_req_unref(s->current_req);
990
s->current_req = NULL;
991
s->current_dev = NULL;
995
void esp_transfer_data(SCSIRequest *req, uint32_t len)
997
ESPState *s = req->hba_private;
998
uint32_t dmalen = esp_get_tc(s);
1000
trace_esp_transfer_data(dmalen, s->ti_size);
1002
s->async_buf = scsi_req_get_buf(req);
1004
if (!s->data_ready) {
1005
s->data_ready = true;
1007
switch (s->rregs[ESP_CMD]) {
1008
case CMD_SEL | CMD_DMA:
1010
case CMD_SELATN | CMD_DMA:
1013
* Initial incoming data xfer is complete for sequencer command
1014
* so raise deferred bus service and function complete interrupt
1016
s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1017
s->rregs[ESP_RSEQ] = SEQ_CD;
1020
case CMD_SELATNS | CMD_DMA:
1023
* Initial incoming data xfer is complete so raise command
1024
* completion interrupt
1026
s->rregs[ESP_RINTR] |= INTR_BS;
1027
s->rregs[ESP_RSEQ] = SEQ_MO;
1030
case CMD_TI | CMD_DMA:
1033
* Bus service interrupt raised because of initial change to
1036
s->rregs[ESP_CMD] = 0;
1037
s->rregs[ESP_RINTR] |= INTR_BS;
1045
* Always perform the initial transfer upon reception of the next TI
1046
* command to ensure the DMA/non-DMA status of the command is correct.
1047
* It is not possible to use s->dma directly in the section below as
1048
* some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1049
* async data transfer is delayed then s->dma is set incorrectly.
1052
if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1053
/* When the SCSI layer returns more data, raise deferred INTR_BS */
1054
esp_dma_ti_check(s);
1057
} else if (s->rregs[ESP_CMD] == CMD_TI) {
1062
static void handle_ti(ESPState *s)
1066
if (s->dma && !s->dma_enabled) {
1067
s->dma_cb = handle_ti;
1072
dmalen = esp_get_tc(s);
1073
trace_esp_handle_ti(dmalen);
1076
trace_esp_handle_ti(s->ti_size);
1079
if (esp_get_phase(s) == STAT_DO) {
1080
esp_nodma_ti_dataout(s);
1085
void esp_hard_reset(ESPState *s)
1087
memset(s->rregs, 0, ESP_REGS);
1088
memset(s->wregs, 0, ESP_REGS);
1089
s->tchi_written = 0;
1092
fifo8_reset(&s->fifo);
1093
fifo8_reset(&s->cmdfifo);
1097
s->rregs[ESP_CFG1] = 7;
1100
static void esp_soft_reset(ESPState *s)
1102
qemu_irq_lower(s->irq);
1103
qemu_irq_lower(s->drq_irq);
1107
static void esp_bus_reset(ESPState *s)
1109
bus_cold_reset(BUS(&s->bus));
1112
static void parent_esp_reset(ESPState *s, int irq, int level)
1119
static void esp_run_cmd(ESPState *s)
1121
uint8_t cmd = s->rregs[ESP_CMD];
1123
if (cmd & CMD_DMA) {
1125
/* Reload DMA counter. */
1126
if (esp_get_stc(s) == 0) {
1127
esp_set_tc(s, 0x10000);
1129
esp_set_tc(s, esp_get_stc(s));
1134
switch (cmd & CMD_CMD) {
1136
trace_esp_mem_writeb_cmd_nop(cmd);
1139
trace_esp_mem_writeb_cmd_flush(cmd);
1140
fifo8_reset(&s->fifo);
1143
trace_esp_mem_writeb_cmd_reset(cmd);
1147
trace_esp_mem_writeb_cmd_bus_reset(cmd);
1149
if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1150
s->rregs[ESP_RINTR] |= INTR_RST;
1155
trace_esp_mem_writeb_cmd_ti(cmd);
1159
trace_esp_mem_writeb_cmd_iccs(cmd);
1163
trace_esp_mem_writeb_cmd_msgacc(cmd);
1164
s->rregs[ESP_RINTR] |= INTR_DC;
1165
s->rregs[ESP_RSEQ] = 0;
1166
s->rregs[ESP_RFLAGS] = 0;
1170
trace_esp_mem_writeb_cmd_pad(cmd);
1174
trace_esp_mem_writeb_cmd_satn(cmd);
1177
trace_esp_mem_writeb_cmd_rstatn(cmd);
1180
trace_esp_mem_writeb_cmd_sel(cmd);
1181
handle_s_without_atn(s);
1184
trace_esp_mem_writeb_cmd_selatn(cmd);
1188
trace_esp_mem_writeb_cmd_selatns(cmd);
1189
handle_satn_stop(s);
1192
trace_esp_mem_writeb_cmd_ensel(cmd);
1193
s->rregs[ESP_RINTR] = 0;
1196
trace_esp_mem_writeb_cmd_dissel(cmd);
1197
s->rregs[ESP_RINTR] = 0;
1201
trace_esp_error_unhandled_command(cmd);
1206
uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1212
s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1213
val = s->rregs[ESP_FIFO];
1217
* Clear sequence step, interrupt register and all status bits
1220
val = s->rregs[ESP_RINTR];
1221
s->rregs[ESP_RINTR] = 0;
1223
s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1225
* According to the datasheet ESP_RSEQ should be cleared, but as the
1226
* emulation currently defers information transfers to the next TI
1227
* command leave it for now so that pedantic guests such as the old
1228
* Linux 2.6 driver see the correct flags before the next SCSI phase
1231
* s->rregs[ESP_RSEQ] = SEQ_0;
1235
/* Return the unique id if the value has never been written */
1236
if (!s->tchi_written) {
1239
val = s->rregs[saddr];
1243
/* Bottom 5 bits indicate number of bytes in FIFO */
1244
val = fifo8_num_used(&s->fifo);
1247
val = s->rregs[saddr];
1251
trace_esp_mem_readb(saddr, val);
1255
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1257
trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1260
s->tchi_written = true;
1264
s->rregs[ESP_RSTAT] &= ~STAT_TC;
1267
if (!fifo8_is_full(&s->fifo)) {
1268
esp_fifo_push(s, val);
1273
s->rregs[saddr] = val;
1276
case ESP_WBUSID ... ESP_WSYNO:
1279
case ESP_CFG2: case ESP_CFG3:
1280
case ESP_RES3: case ESP_RES4:
1281
s->rregs[saddr] = val;
1283
case ESP_WCCF ... ESP_WTEST:
1286
trace_esp_error_invalid_write(val, saddr);
1289
s->wregs[saddr] = val;
1292
static bool esp_mem_accepts(void *opaque, hwaddr addr,
1293
unsigned size, bool is_write,
1296
return (size == 1) || (is_write && size == 4);
1299
static bool esp_is_before_version_5(void *opaque, int version_id)
1301
ESPState *s = ESP(opaque);
1303
version_id = MIN(version_id, s->mig_version_id);
1304
return version_id < 5;
1307
static bool esp_is_version_5(void *opaque, int version_id)
1309
ESPState *s = ESP(opaque);
1311
version_id = MIN(version_id, s->mig_version_id);
1312
return version_id >= 5;
1315
static bool esp_is_version_6(void *opaque, int version_id)
1317
ESPState *s = ESP(opaque);
1319
version_id = MIN(version_id, s->mig_version_id);
1320
return version_id >= 6;
1323
static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1325
ESPState *s = ESP(opaque);
1327
version_id = MIN(version_id, s->mig_version_id);
1328
return version_id >= 5 && version_id <= 6;
1331
int esp_pre_save(void *opaque)
1333
ESPState *s = ESP(object_resolve_path_component(
1334
OBJECT(opaque), "esp"));
1336
s->mig_version_id = vmstate_esp.version_id;
1340
static int esp_post_load(void *opaque, int version_id)
1342
ESPState *s = ESP(opaque);
1345
version_id = MIN(version_id, s->mig_version_id);
1347
if (version_id < 5) {
1348
esp_set_tc(s, s->mig_dma_left);
1350
/* Migrate ti_buf to fifo */
1351
len = s->mig_ti_wptr - s->mig_ti_rptr;
1352
for (i = 0; i < len; i++) {
1353
fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1356
/* Migrate cmdbuf to cmdfifo */
1357
for (i = 0; i < s->mig_cmdlen; i++) {
1358
fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1362
s->mig_version_id = vmstate_esp.version_id;
1366
const VMStateDescription vmstate_esp = {
1369
.minimum_version_id = 3,
1370
.post_load = esp_post_load,
1371
.fields = (const VMStateField[]) {
1372
VMSTATE_BUFFER(rregs, ESPState),
1373
VMSTATE_BUFFER(wregs, ESPState),
1374
VMSTATE_INT32(ti_size, ESPState),
1375
VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1376
VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1377
VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1378
VMSTATE_UINT32(status, ESPState),
1379
VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1380
esp_is_before_version_5),
1381
VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1382
esp_is_before_version_5),
1383
VMSTATE_UINT32(dma, ESPState),
1384
VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1385
esp_is_before_version_5, 0, 16),
1386
VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1387
esp_is_before_version_5, 16,
1388
sizeof(typeof_field(ESPState, mig_cmdbuf))),
1389
VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1390
VMSTATE_UINT32(do_cmd, ESPState),
1391
VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1392
VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1393
VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1394
VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1395
VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1396
VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1397
esp_is_between_version_5_and_6),
1398
VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1399
VMSTATE_BOOL(drq_state, ESPState),
1400
VMSTATE_END_OF_LIST()
1404
static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1405
uint64_t val, unsigned int size)
1407
SysBusESPState *sysbus = opaque;
1408
ESPState *s = ESP(&sysbus->esp);
1411
saddr = addr >> sysbus->it_shift;
1412
esp_reg_write(s, saddr, val);
1415
static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1418
SysBusESPState *sysbus = opaque;
1419
ESPState *s = ESP(&sysbus->esp);
1422
saddr = addr >> sysbus->it_shift;
1423
return esp_reg_read(s, saddr);
1426
static const MemoryRegionOps sysbus_esp_mem_ops = {
1427
.read = sysbus_esp_mem_read,
1428
.write = sysbus_esp_mem_write,
1429
.endianness = DEVICE_NATIVE_ENDIAN,
1430
.valid.accepts = esp_mem_accepts,
1433
static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1434
uint64_t val, unsigned int size)
1436
SysBusESPState *sysbus = opaque;
1437
ESPState *s = ESP(&sysbus->esp);
1439
trace_esp_pdma_write(size);
1443
esp_pdma_write(s, val);
1446
esp_pdma_write(s, val >> 8);
1447
esp_pdma_write(s, val);
1453
static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1456
SysBusESPState *sysbus = opaque;
1457
ESPState *s = ESP(&sysbus->esp);
1460
trace_esp_pdma_read(size);
1464
val = esp_pdma_read(s);
1467
val = esp_pdma_read(s);
1468
val = (val << 8) | esp_pdma_read(s);
1475
static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1477
ESPState *s = container_of(req->bus, ESPState, bus);
1480
s->current_req = req;
1484
static const MemoryRegionOps sysbus_esp_pdma_ops = {
1485
.read = sysbus_esp_pdma_read,
1486
.write = sysbus_esp_pdma_write,
1487
.endianness = DEVICE_NATIVE_ENDIAN,
1488
.valid.min_access_size = 1,
1489
.valid.max_access_size = 4,
1490
.impl.min_access_size = 1,
1491
.impl.max_access_size = 2,
1494
static const struct SCSIBusInfo esp_scsi_info = {
1496
.max_target = ESP_MAX_DEVS,
1499
.load_request = esp_load_request,
1500
.transfer_data = esp_transfer_data,
1501
.complete = esp_command_complete,
1502
.cancel = esp_request_cancelled
1505
static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1507
SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1508
ESPState *s = ESP(&sysbus->esp);
1512
parent_esp_reset(s, irq, level);
1515
esp_dma_enable(s, irq, level);
1520
static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1522
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1523
SysBusESPState *sysbus = SYSBUS_ESP(dev);
1524
ESPState *s = ESP(&sysbus->esp);
1526
if (!qdev_realize(DEVICE(s), NULL, errp)) {
1530
sysbus_init_irq(sbd, &s->irq);
1531
sysbus_init_irq(sbd, &s->drq_irq);
1532
assert(sysbus->it_shift != -1);
1534
s->chip_id = TCHI_FAS100A;
1535
memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1536
sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1537
sysbus_init_mmio(sbd, &sysbus->iomem);
1538
memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1539
sysbus, "esp-pdma", 4);
1540
sysbus_init_mmio(sbd, &sysbus->pdma);
1542
qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1544
scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1547
static void sysbus_esp_hard_reset(DeviceState *dev)
1549
SysBusESPState *sysbus = SYSBUS_ESP(dev);
1550
ESPState *s = ESP(&sysbus->esp);
1555
static void sysbus_esp_init(Object *obj)
1557
SysBusESPState *sysbus = SYSBUS_ESP(obj);
1559
object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1562
static const VMStateDescription vmstate_sysbus_esp_scsi = {
1563
.name = "sysbusespscsi",
1565
.minimum_version_id = 1,
1566
.pre_save = esp_pre_save,
1567
.fields = (const VMStateField[]) {
1568
VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1569
VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1570
VMSTATE_END_OF_LIST()
1574
static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1576
DeviceClass *dc = DEVICE_CLASS(klass);
1578
dc->realize = sysbus_esp_realize;
1579
dc->reset = sysbus_esp_hard_reset;
1580
dc->vmsd = &vmstate_sysbus_esp_scsi;
1581
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1584
static void esp_finalize(Object *obj)
1586
ESPState *s = ESP(obj);
1588
fifo8_destroy(&s->fifo);
1589
fifo8_destroy(&s->cmdfifo);
1592
static void esp_init(Object *obj)
1594
ESPState *s = ESP(obj);
1596
fifo8_create(&s->fifo, ESP_FIFO_SZ);
1597
fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1600
static void esp_class_init(ObjectClass *klass, void *data)
1602
DeviceClass *dc = DEVICE_CLASS(klass);
1604
/* internal device for sysbusesp/pciespscsi, not user-creatable */
1605
dc->user_creatable = false;
1606
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1609
static const TypeInfo esp_info_types[] = {
1611
.name = TYPE_SYSBUS_ESP,
1612
.parent = TYPE_SYS_BUS_DEVICE,
1613
.instance_init = sysbus_esp_init,
1614
.instance_size = sizeof(SysBusESPState),
1615
.class_init = sysbus_esp_class_init,
1619
.parent = TYPE_DEVICE,
1620
.instance_init = esp_init,
1621
.instance_finalize = esp_finalize,
1622
.instance_size = sizeof(ESPState),
1623
.class_init = esp_class_init,
1627
DEFINE_TYPES(esp_info_types)