kvm-guest-drivers-windows

Форк
0
1076 строк · 28.1 Кб
1
/*
2
 * Copyright (C) 2019-2020 Red Hat, Inc.
3
 *
4
 * Written By: Vadim Rozenfeld <vrozenfe@redhat.com>
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met :
9
 * 1. Redistributions of source code must retain the above copyright
10
 *    notice, this list of conditions and the following disclaimer.
11
 * 2. Redistributions in binary form must reproduce the above copyright
12
 *    notice, this list of conditions and the following disclaimer in the
13
 *    documentation and / or other materials provided with the distribution.
14
 * 3. Neither the names of the copyright holders nor the names of their contributors
15
 *    may be used to endorse or promote products derived from this software
16
 *    without specific prior written permission.
17
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29

30
#include "viogpu_queue.h"
31
#include "baseobj.h"
32
#if !DBG
33
#include "viogpu_queue.tmh"
34
#endif
35

36
static BOOLEAN BuildSGElement(VirtIOBufferDescriptor* sg, PVOID buf, ULONG size)
37
{
38
    if (size != 0 && MmIsAddressValid(buf))
39
    {
40
        sg->length = min(size, PAGE_SIZE);
41
        sg->physAddr = MmGetPhysicalAddress(buf);
42
        return TRUE;
43
    }
44
    return FALSE;
45
}
46

47
VioGpuQueue::VioGpuQueue()
48
{
49
    m_pBuf = NULL;
50
    m_Index = (UINT)-1;
51
    m_pVIODevice = NULL;
52
    m_pVirtQueue = NULL;
53
    KeInitializeSpinLock(&m_SpinLock);
54
}
55

56
VioGpuQueue::~VioGpuQueue()
57
{
58
    Close();
59
}
60

61
void VioGpuQueue::Close(void)
62
{
63
    KIRQL SavedIrql;
64
    Lock(&SavedIrql);
65
    m_pVirtQueue = NULL;
66
    Unlock(SavedIrql);
67
}
68

69
BOOLEAN  VioGpuQueue::Init(
70
    _In_ VirtIODevice* pVIODevice,
71
    _In_ struct virtqueue* pVirtQueue,
72
    _In_ UINT index)
73
{
74
    if ((pVIODevice == NULL) ||
75
        (pVirtQueue == NULL)) {
76
        return FALSE;
77
    }
78
    m_pVIODevice = pVIODevice;
79
    m_pVirtQueue = pVirtQueue;
80
    m_Index = index;
81
    EnableInterrupt();
82
    return TRUE;
83
}
84

85
_IRQL_requires_max_(DISPATCH_LEVEL)
86
_IRQL_saves_global_(OldIrql, Irql)
87
_IRQL_raises_(DISPATCH_LEVEL)
88
void VioGpuQueue::Lock(KIRQL* Irql)
89
{
90
    KIRQL SavedIrql = KeGetCurrentIrql();
91
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s at IRQL %d\n", __FUNCTION__, SavedIrql));
92

93
    if (SavedIrql < DISPATCH_LEVEL) {
94
        KeAcquireSpinLock(&m_SpinLock, &SavedIrql);
95
    }
96
    else if (SavedIrql == DISPATCH_LEVEL) {
97
        KeAcquireSpinLockAtDpcLevel(&m_SpinLock);
98
    }
99
    else {
100
        VioGpuDbgBreak();
101
    }
102
    *Irql = SavedIrql;
103

104
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
105
}
106

107
_IRQL_requires_(DISPATCH_LEVEL)
108
_IRQL_restores_global_(OldIrql, Irql)
109
void VioGpuQueue::Unlock(KIRQL Irql)
110
{
111
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s at IRQL %d\n", __FUNCTION__, Irql));
112

113
    if (Irql < DISPATCH_LEVEL) {
114
        KeReleaseSpinLock(&m_SpinLock, Irql);
115
    }
116
    else {
117
        KeReleaseSpinLockFromDpcLevel(&m_SpinLock);
118
    }
119

120
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
121
}
122

123
PAGED_CODE_SEG_BEGIN
124

125
UINT VioGpuQueue::QueryAllocation()
126
{
127
    PAGED_CODE();
128

129
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
130

131
    USHORT NumEntries;
132
    ULONG RingSize, HeapSize;
133

134
    NTSTATUS status = virtio_query_queue_allocation(
135
        m_pVIODevice,
136
        m_Index,
137
        &NumEntries,
138
        &RingSize,
139
        &HeapSize);
140
    if (!NT_SUCCESS(status))
141
    {
142
        DbgPrint(TRACE_LEVEL_FATAL, ("[%s] virtio_query_queue_allocation(%d) failed with error %x\n", __FUNCTION__, m_Index, status));
143
        return 0;
144
    }
145

146
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
147

148
    return NumEntries;
149
}
150
PAGED_CODE_SEG_END
151

152
PAGED_CODE_SEG_BEGIN
153

154
BOOLEAN CtrlQueue::GetDisplayInfo(PGPU_VBUFFER buf, UINT id, PULONG xres, PULONG yres)
155
{
156
    PAGED_CODE();
157

158
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
159

160
    PGPU_RESP_DISP_INFO resp = (PGPU_RESP_DISP_INFO)buf->resp_buf;
161
    if (resp->hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO)
162
    {
163
        DbgPrint(TRACE_LEVEL_VERBOSE, (" %s type = %x: disabled\n", __FUNCTION__, resp->hdr.type));
164
        return FALSE;
165
    }
166
    if (resp->pmodes[id].enabled) {
167
        DbgPrint(TRACE_LEVEL_VERBOSE, ("output %d: %dx%d+%d+%d\n", id,
168
            resp->pmodes[id].r.width,
169
            resp->pmodes[id].r.height,
170
            resp->pmodes[id].r.x,
171
            resp->pmodes[id].r.y));
172
        *xres = resp->pmodes[id].r.width;
173
        *yres = resp->pmodes[id].r.height;
174
    }
175
    else {
176
        DbgPrint(TRACE_LEVEL_VERBOSE, ("output %d: disabled\n", id));
177
        return FALSE;
178
    }
179

180
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
181

182
    return TRUE;
183
}
184

185
BOOLEAN CtrlQueue::AskDisplayInfo(PGPU_VBUFFER* buf)
186
{
187
    PAGED_CODE();
188

189
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
190

191
    PGPU_CTRL_HDR cmd;
192
    PGPU_VBUFFER vbuf;
193
    PGPU_RESP_DISP_INFO resp_buf;
194
    KEVENT   event;
195
    NTSTATUS status;
196

197
    resp_buf = reinterpret_cast<PGPU_RESP_DISP_INFO>
198
        (new (NonPagedPoolNx) BYTE[sizeof(GPU_RESP_DISP_INFO)]);
199

200
    if (!resp_buf)
201
    {
202
        DbgPrint(TRACE_LEVEL_ERROR, ("---> %s Failed allocate %d bytes\n", __FUNCTION__, sizeof(GPU_RESP_DISP_INFO)));
203
        return FALSE;
204
    }
205

206
    cmd = (PGPU_CTRL_HDR)AllocCmdResp(&vbuf, sizeof(GPU_CTRL_HDR), resp_buf, sizeof(GPU_RESP_DISP_INFO));
207
    RtlZeroMemory(cmd, sizeof(GPU_CTRL_HDR));
208

209
    cmd->type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
210

211
    KeInitializeEvent(&event, NotificationEvent, FALSE);
212
    vbuf->event = &event;
213

214
    LARGE_INTEGER timeout = { 0 };
215
    timeout.QuadPart = Int32x32To64(1000, -10000);
216

217
    QueueBuffer(vbuf);
218
    status = KeWaitForSingleObject(&event,
219
        Executive,
220
        KernelMode,
221
        FALSE,
222
        &timeout);
223

224
    if (status == STATUS_TIMEOUT) {
225
        DbgPrint(TRACE_LEVEL_FATAL, ("---> Failed to ask display info\n"));
226
        VioGpuDbgBreak();
227
    }
228
    *buf = vbuf;
229

230
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
231

232
    return TRUE;
233
}
234

235
BOOLEAN CtrlQueue::AskEdidInfo(PGPU_VBUFFER* buf, UINT id)
236
{
237
    PAGED_CODE();
238

239
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
240

241
    PGPU_CMD_GET_EDID cmd;
242
    PGPU_VBUFFER vbuf;
243
    PGPU_RESP_EDID resp_buf;
244
    KEVENT   event;
245
    NTSTATUS status;
246

247
    resp_buf = reinterpret_cast<PGPU_RESP_EDID>
248
        (new (NonPagedPoolNx) BYTE[sizeof(GPU_RESP_EDID)]);
249

250
    if (!resp_buf)
251
    {
252
        DbgPrint(TRACE_LEVEL_ERROR, ("---> %s Failed allocate %d bytes\n", __FUNCTION__, sizeof(GPU_RESP_EDID)));
253
        return FALSE;
254
    }
255
    cmd = (PGPU_CMD_GET_EDID)AllocCmdResp(&vbuf, sizeof(GPU_CMD_GET_EDID), resp_buf, sizeof(GPU_RESP_EDID));
256
    RtlZeroMemory(cmd, sizeof(GPU_CMD_GET_EDID));
257

258
    cmd->hdr.type = VIRTIO_GPU_CMD_GET_EDID;
259
    cmd->scanout = id;
260

261
    KeInitializeEvent(&event, NotificationEvent, FALSE);
262
    vbuf->event = &event;
263

264
    LARGE_INTEGER timeout = { 0 };
265
    timeout.QuadPart = Int32x32To64(1000, -10000);
266

267
    QueueBuffer(vbuf);
268

269
    status = KeWaitForSingleObject(&event,
270
        Executive,
271
        KernelMode,
272
        FALSE,
273
        &timeout
274
    );
275

276
    if (status == STATUS_TIMEOUT) {
277
        DbgPrint(TRACE_LEVEL_FATAL, ("---> Failed to get edid info\n"));
278
        VioGpuDbgBreak();
279
    }
280

281
    *buf = vbuf;
282

283
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
284

285
    return TRUE;
286
}
287

288
BOOLEAN CtrlQueue::GetEdidInfo(PGPU_VBUFFER buf, UINT id, PBYTE edid)
289
{
290
    PAGED_CODE();
291

292
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
293
    PGPU_CMD_GET_EDID cmd = (PGPU_CMD_GET_EDID)buf->buf;
294
    PGPU_RESP_EDID resp = (PGPU_RESP_EDID)buf->resp_buf;
295
    PUCHAR resp_edit = (PUCHAR)(resp->edid + (ULONGLONG)id * EDID_V1_BLOCK_SIZE);
296
    if (resp->hdr.type != VIRTIO_GPU_RESP_OK_EDID)
297
    {
298
        DbgPrint(TRACE_LEVEL_VERBOSE, (" %s type = %x: disabled\n", __FUNCTION__, resp->hdr.type));
299
        return FALSE;
300
    }
301
    if (cmd->scanout != id)
302
    {
303
        DbgPrint(TRACE_LEVEL_VERBOSE, (" %s invalid scaout = %x\n", __FUNCTION__, cmd->scanout));
304
        return FALSE;
305
    }
306

307
    RtlCopyMemory(edid, resp_edit, EDID_RAW_BLOCK_SIZE);
308
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
309

310
    return TRUE;
311
}
312

313
void CtrlQueue::CreateResource(UINT res_id, UINT format, UINT width, UINT height)
314
{
315
    PAGED_CODE();
316

317
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
318

319
    PGPU_RES_CREATE_2D cmd;
320
    PGPU_VBUFFER vbuf;
321
    cmd = (PGPU_RES_CREATE_2D)AllocCmd(&vbuf, sizeof(*cmd));
322
    RtlZeroMemory(cmd, sizeof(*cmd));
323

324
    cmd->hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
325
    cmd->resource_id = res_id;
326
    cmd->format = format;
327
    cmd->width = width;
328
    cmd->height = height;
329

330
//FIXME!!! if 
331
    QueueBuffer(vbuf);
332

333
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
334
}
335

336
void CtrlQueue::ResFlush(UINT res_id, UINT width, UINT height, UINT x, UINT y)
337
{
338
    PAGED_CODE();
339

340
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
341
    PGPU_RES_FLUSH cmd;
342
    PGPU_VBUFFER vbuf;
343
    cmd = (PGPU_RES_FLUSH)AllocCmd(&vbuf, sizeof(*cmd));
344
    RtlZeroMemory(cmd, sizeof(*cmd));
345

346
    cmd->hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
347
    cmd->resource_id = res_id;
348
    cmd->r.width = width;
349
    cmd->r.height = height;
350
    cmd->r.x = x;
351
    cmd->r.y = y;
352

353
    QueueBuffer(vbuf);
354

355
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
356
}
357

358
void CtrlQueue::TransferToHost2D(UINT res_id, ULONG offset, UINT width, UINT height, UINT x, UINT y, PUINT fence_id)
359
{
360
    PAGED_CODE();
361

362
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
363
    PGPU_RES_TRANSF_TO_HOST_2D cmd;
364
    PGPU_VBUFFER vbuf;
365
    cmd = (PGPU_RES_TRANSF_TO_HOST_2D)AllocCmd(&vbuf, sizeof(*cmd));
366
    RtlZeroMemory(cmd, sizeof(*cmd));
367

368
    cmd->hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
369
    cmd->resource_id = res_id;
370
    cmd->offset = offset;
371
    cmd->r.width = width;
372
    cmd->r.height = height;
373
    cmd->r.x = x;
374
    cmd->r.y = y;
375

376
    if (fence_id) {
377
        cmd->hdr.flags |= VIRTIO_GPU_FLAG_FENCE;
378
        cmd->hdr.fence_id = *fence_id;
379
    }
380

381
    QueueBuffer(vbuf);
382

383
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
384
}
385

386
void CtrlQueue::AttachBacking(UINT res_id, PGPU_MEM_ENTRY ents, UINT nents)
387
{
388
    PAGED_CODE();
389

390
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
391

392
    PGPU_RES_ATTACH_BACKING cmd;
393
    PGPU_VBUFFER vbuf;
394
    cmd = (PGPU_RES_ATTACH_BACKING)AllocCmd(&vbuf, sizeof(*cmd));
395
    RtlZeroMemory(cmd, sizeof(*cmd));
396

397
    cmd->hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
398
    cmd->resource_id = res_id;
399
    cmd->nr_entries = nents;
400

401
    vbuf->data_buf = ents;
402
    vbuf->data_size = sizeof(*ents) * nents;
403

404
    QueueBuffer(vbuf);
405

406
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
407
}
408

409
PAGED_CODE_SEG_END
410

411
void CtrlQueue::UnrefResource(UINT res_id)
412
{
413
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
414

415
    PGPU_RES_UNREF cmd;
416
    PGPU_VBUFFER vbuf;
417
    cmd = (PGPU_RES_UNREF)AllocCmd(&vbuf, sizeof(*cmd));
418
    RtlZeroMemory(cmd, sizeof(*cmd));
419

420
    cmd->hdr.type = VIRTIO_GPU_CMD_RESOURCE_UNREF;
421
    cmd->resource_id = res_id;
422

423
    QueueBuffer(vbuf);
424

425
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
426
}
427

428
void CtrlQueue::InvalBacking(UINT res_id)
429
{
430
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
431

432
    PGPU_RES_DETACH_BACKING cmd;
433
    PGPU_VBUFFER vbuf;
434
    cmd = (PGPU_RES_DETACH_BACKING)AllocCmd(&vbuf, sizeof(*cmd));
435
    RtlZeroMemory(cmd, sizeof(*cmd));
436

437
    cmd->hdr.type = VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING;
438
    cmd->resource_id = res_id;
439

440
    QueueBuffer(vbuf);
441

442
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
443
}
444

445
PVOID CtrlQueue::AllocCmdResp(PGPU_VBUFFER* buf, int cmd_sz, PVOID resp_buf, int resp_sz)
446
{
447
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
448

449
    PGPU_VBUFFER vbuf;
450
    vbuf = m_pBuf->GetBuf(cmd_sz, resp_sz, resp_buf);
451
    ASSERT(vbuf);
452
    *buf = vbuf;
453

454
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
455

456
    return vbuf ? vbuf->buf : NULL;
457
}
458

459
PVOID CtrlQueue::AllocCmd(PGPU_VBUFFER* buf, int sz)
460
{
461
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
462

463
    if (buf == NULL || sz == 0) {
464
        return NULL;
465
    }
466

467
    PGPU_VBUFFER vbuf = m_pBuf->GetBuf(sz, sizeof(GPU_CTRL_HDR), NULL);
468
    ASSERT(vbuf);
469
    *buf = vbuf;
470

471
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s  vbuf = %p\n", __FUNCTION__, vbuf));
472

473
    return vbuf ? vbuf->buf : NULL;
474
}
475

476
void CtrlQueue::SetScanout(UINT scan_id, UINT res_id, UINT width, UINT height, UINT x, UINT y)
477
{
478
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
479

480
    PGPU_SET_SCANOUT cmd;
481
    PGPU_VBUFFER vbuf;
482
    cmd = (PGPU_SET_SCANOUT)AllocCmd(&vbuf, sizeof(*cmd));
483
    RtlZeroMemory(cmd, sizeof(*cmd));
484

485
    cmd->hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
486
    cmd->resource_id = res_id;
487
    cmd->scanout_id = scan_id;
488
    cmd->r.width = width;
489
    cmd->r.height = height;
490
    cmd->r.x = x;
491
    cmd->r.y = y;
492

493
    //FIXME if
494
    QueueBuffer(vbuf);
495

496
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
497
}
498

499

500
#define SGLIST_SIZE 64
501
UINT CtrlQueue::QueueBuffer(PGPU_VBUFFER buf)
502
{
503
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
504

505
    VirtIOBufferDescriptor  sg[SGLIST_SIZE];
506
    UINT sgleft = SGLIST_SIZE;
507
    UINT outcnt = 0, incnt = 0;
508
    UINT ret = 0;
509
    KIRQL SavedIrql;
510

511
    if (buf->size > PAGE_SIZE) {
512
        DbgPrint(TRACE_LEVEL_ERROR, ("<--> %s size is too big %d\n", __FUNCTION__, buf->size));
513
        return 0;
514
    }
515

516
    if (BuildSGElement(&sg[outcnt + incnt], (PVOID)buf->buf, buf->size))
517
    {
518
        outcnt++;
519
        sgleft--;
520
    }
521

522
    if (buf->data_size)
523
    {
524
        ULONG data_size = buf->data_size;
525
        PVOID data_buf = (PVOID)buf->data_buf;
526
        while (data_size)
527
        {
528
            if (BuildSGElement(&sg[outcnt + incnt], data_buf, data_size))
529
            {
530
                data_buf = (PVOID)((LONG_PTR)(data_buf)+PAGE_SIZE);
531
                data_size -= min(data_size, PAGE_SIZE);
532
                outcnt++;
533
                sgleft--;
534
                if (sgleft == 0) {
535
                    DbgPrint(TRACE_LEVEL_ERROR, ("<--> %s no more sgelenamt spots left %d\n", __FUNCTION__, outcnt));
536
                    return 0;
537
                }
538
            }
539
        }
540
    }
541

542
    if (buf->resp_size > PAGE_SIZE) {
543
        DbgPrint(TRACE_LEVEL_ERROR, ("<--> %s resp_size is too big %d\n", __FUNCTION__, buf->resp_size));
544
        return 0;
545
    }
546

547
    if (buf->resp_size && (sgleft > 0))
548
    {
549
        if (BuildSGElement(&sg[outcnt + incnt], (PVOID)buf->resp_buf, buf->resp_size))
550
        {
551
            incnt++;
552
            sgleft--;
553
        }
554
    }
555

556
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--> %s sgleft %d\n", __FUNCTION__, sgleft));
557

558
    Lock(&SavedIrql);
559
    ret = AddBuf(&sg[0], outcnt, incnt, buf, NULL, 0);
560
    Kick();
561
    Unlock(SavedIrql);
562

563
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s ret = %d\n", __FUNCTION__, ret));
564

565
    return ret;
566
}
567

568
PGPU_VBUFFER CtrlQueue::DequeueBuffer(_Out_ UINT* len)
569
{
570
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
571

572
    PGPU_VBUFFER buf = NULL;
573
    KIRQL SavedIrql;
574
    Lock(&SavedIrql);
575
    buf = (PGPU_VBUFFER)GetBuf(len);
576
    Unlock(SavedIrql);
577
    if (buf == NULL)
578
    {
579
        *len = 0;
580
    }
581
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
582

583
    return buf;
584
}
585

586

587
void VioGpuQueue::ReleaseBuffer(PGPU_VBUFFER buf)
588
{
589
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
590

591
    m_pBuf->FreeBuf(buf);
592

593
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
594
}
595

596

597
BOOLEAN VioGpuBuf::Init(_In_ UINT cnt)
598
{
599
    KIRQL                   OldIrql;
600

601
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
602

603
    for (UINT i = 0; i < cnt; ++i) {
604
        PGPU_VBUFFER pvbuf = reinterpret_cast<PGPU_VBUFFER>
605
            (new (NonPagedPoolNx) BYTE[VBUFFER_SIZE]);
606
        //FIXME
607
        RtlZeroMemory(pvbuf, VBUFFER_SIZE);
608
        if (pvbuf)
609
        {
610
            KeAcquireSpinLock(&m_SpinLock, &OldIrql);
611
            InsertTailList(&m_FreeBufs, &pvbuf->list_entry);
612
            ++m_uCount;
613
            KeReleaseSpinLock(&m_SpinLock, OldIrql);
614
        }
615
    }
616
    ASSERT(m_uCount == cnt);
617

618
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
619

620
    return (m_uCount > 0);
621
}
622

623
void VioGpuBuf::Close(void)
624
{
625
    KIRQL                   OldIrql;
626

627
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
628

629
    KeAcquireSpinLock(&m_SpinLock, &OldIrql);
630
    while (!IsListEmpty(&m_InUseBufs))
631
    {
632
        LIST_ENTRY* pListItem = RemoveHeadList(&m_InUseBufs);
633
        if (pListItem)
634
        {
635
            PGPU_VBUFFER pvbuf = CONTAINING_RECORD(pListItem, GPU_VBUFFER, list_entry);
636
            ASSERT(pvbuf);
637
            ASSERT(pvbuf->resp_size <= MAX_INLINE_RESP_SIZE);
638

639
            delete[] reinterpret_cast<PBYTE>(pvbuf);
640
            --m_uCount;
641
        }
642
    }
643

644
    while (!IsListEmpty(&m_FreeBufs))
645
    {
646
        LIST_ENTRY* pListItem = RemoveHeadList(&m_FreeBufs);
647
        if (pListItem)
648
        {
649
            PGPU_VBUFFER pbuf = CONTAINING_RECORD(pListItem, GPU_VBUFFER, list_entry);
650
            ASSERT(pbuf);
651

652
            if (pbuf->resp_buf && pbuf->resp_size > MAX_INLINE_RESP_SIZE)
653
            {
654
                delete[] reinterpret_cast<PBYTE>(pbuf->resp_buf);
655
                pbuf->resp_buf = NULL;
656
                pbuf->resp_size = 0;
657
            }
658

659
            if (pbuf->data_buf && pbuf->data_size)
660
            {
661
                delete[] reinterpret_cast<PBYTE>(pbuf->data_buf);
662
                pbuf->data_buf = NULL;
663
                pbuf->data_size = 0;
664
            }
665

666
            delete[] reinterpret_cast<PBYTE>(pbuf);
667
            --m_uCount;
668
        }
669
    }
670
    KeReleaseSpinLock(&m_SpinLock, OldIrql);
671

672
    ASSERT(m_uCount == 0);
673

674
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
675
}
676

677
PGPU_VBUFFER VioGpuBuf::GetBuf(
678
    _In_ int size,
679
    _In_ int resp_size,
680
    _In_opt_ void *resp_buf)
681
{
682

683
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
684

685
    PGPU_VBUFFER pbuf = NULL;
686
    PLIST_ENTRY pListItem = NULL;
687
    KIRQL SavedIrql = KeGetCurrentIrql();
688

689
    if (SavedIrql < DISPATCH_LEVEL) {
690
        KeAcquireSpinLock(&m_SpinLock, &SavedIrql);
691
    }
692
    else if (SavedIrql == DISPATCH_LEVEL) {
693
        KeAcquireSpinLockAtDpcLevel(&m_SpinLock);
694
    }
695
    else {
696
        VioGpuDbgBreak();
697
    }
698

699
    if (!IsListEmpty(&m_FreeBufs))
700
    {
701
        pListItem = RemoveHeadList(&m_FreeBufs);
702
        pbuf = CONTAINING_RECORD(pListItem, GPU_VBUFFER, list_entry);
703
        ASSERT(pvbuf);
704
        memset(pbuf, 0, VBUFFER_SIZE);
705
        ASSERT(size > MAX_INLINE_CMD_SIZE);
706

707
        pbuf->buf = (char *)((ULONG_PTR)pbuf + sizeof(*pbuf));
708
        pbuf->size = size;
709

710
        pbuf->resp_size = resp_size;
711
        if (resp_size <= MAX_INLINE_RESP_SIZE)
712
        {
713
            pbuf->resp_buf = (char *)((ULONG_PTR)pbuf->buf + size);
714
        }
715
        else
716
        {
717
            pbuf->resp_buf = (char *)resp_buf;
718
        }
719
        ASSERT(vbuf->resp_buf);
720
        InsertTailList(&m_InUseBufs, &pbuf->list_entry);
721
    }
722
    else
723
    {
724
        DbgPrint(TRACE_LEVEL_FATAL, ("<--- %s Cannot allocate buffer\n", __FUNCTION__));
725
        VioGpuDbgBreak();
726
    }
727
    if (SavedIrql < DISPATCH_LEVEL) {
728
        KeReleaseSpinLock(&m_SpinLock, SavedIrql);
729
    }
730
    else if (SavedIrql == DISPATCH_LEVEL) {
731
        KeReleaseSpinLockFromDpcLevel(&m_SpinLock);
732
    }
733
    else {
734
        VioGpuDbgBreak();
735
    }
736

737
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s buf = %p\n", __FUNCTION__, pbuf));
738

739
    return pbuf;
740
}
741

742
void VioGpuBuf::FreeBuf(
743
    _In_ PGPU_VBUFFER pbuf)
744
{
745
    KIRQL                   OldIrql;
746

747
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s buf = %p\n", __FUNCTION__, pbuf));
748
    KeAcquireSpinLock(&m_SpinLock, &OldIrql);
749

750
    if (!IsListEmpty(&m_InUseBufs))
751
    {
752
        PLIST_ENTRY leCurrent = m_InUseBufs.Flink;
753
        PGPU_VBUFFER pvbuf = CONTAINING_RECORD(leCurrent, GPU_VBUFFER, list_entry);
754
        while (leCurrent && pvbuf)
755
        {
756
            if (pvbuf == pbuf)
757
            {
758
                RemoveEntryList(leCurrent);
759
                pvbuf = NULL;
760
                break;
761
            }
762

763
            leCurrent = leCurrent->Flink;
764
            if (leCurrent) {
765
                pvbuf = CONTAINING_RECORD(leCurrent, GPU_VBUFFER, list_entry);
766
            }
767
        }
768
    }
769
    if (pbuf->resp_buf && pbuf->resp_size > MAX_INLINE_RESP_SIZE)
770
    {
771
        delete[] reinterpret_cast<PBYTE>(pbuf->resp_buf);
772
        pbuf->resp_buf = NULL;
773
        pbuf->resp_size = 0;
774
    }
775

776
    if (pbuf->data_buf && pbuf->data_size)
777
    {
778
        delete[] reinterpret_cast<PBYTE>(pbuf->data_buf);
779
        pbuf->data_buf = NULL;
780
        pbuf->data_size = 0;
781
    }
782

783
    InsertTailList(&m_FreeBufs, &pbuf->list_entry);
784
    KeReleaseSpinLock(&m_SpinLock, OldIrql);
785

786
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
787
}
788

789
PAGED_CODE_SEG_BEGIN
790
VioGpuBuf::VioGpuBuf()
791
{
792
    PAGED_CODE();
793

794
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
795

796
    InitializeListHead(&m_FreeBufs);
797
    InitializeListHead(&m_InUseBufs);
798
    KeInitializeSpinLock(&m_SpinLock);
799
    m_uCount = 0;
800

801
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
802
}
803

804
VioGpuBuf::~VioGpuBuf()
805
{
806
    PAGED_CODE();
807

808
    DbgPrint(TRACE_LEVEL_FATAL, ("---> %s 0x%p\n", __FUNCTION__, this));
809

810
    Close();
811

812
    DbgPrint(TRACE_LEVEL_FATAL, ("<--- %s\n", __FUNCTION__));
813
}
814

815
VioGpuMemSegment::VioGpuMemSegment(void)
816
{
817
    PAGED_CODE();
818

819
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
820

821
    m_pSGList = NULL;
822
    m_pVAddr = NULL;
823
    m_pMdl = NULL;
824
    m_bSystemMemory = FALSE;
825
    m_bMapped = FALSE;
826
    m_Size = 0;
827
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
828
}
829

830
VioGpuMemSegment::~VioGpuMemSegment(void)
831
{
832
    PAGED_CODE();
833

834
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
835

836
    Close();
837

838
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
839
}
840

841
BOOLEAN VioGpuMemSegment::Init(_In_ UINT size, _In_opt_ PPHYSICAL_ADDRESS pPAddr)
842
{
843
    PAGED_CODE();
844

845
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
846

847
    ASSERT(size);
848
    PVOID buf = NULL;
849
    UINT pages = BYTES_TO_PAGES(size);
850
    UINT sglsize = sizeof(SCATTER_GATHER_LIST) + (sizeof(SCATTER_GATHER_ELEMENT) * pages);
851
    size = pages * PAGE_SIZE;
852

853
    if ((pPAddr == NULL) ||
854
        pPAddr->QuadPart == 0LL) {
855
        m_pVAddr = new (NonPagedPoolNx) BYTE[size];
856

857
        if (!m_pVAddr)
858
        {
859
            DbgPrint(TRACE_LEVEL_FATAL, ("%s insufficient resources to allocate %x bytes\n", __FUNCTION__, size));
860
            return FALSE;
861
        }
862
        RtlZeroMemory(m_pVAddr, size);
863
        m_bSystemMemory = TRUE;
864
    }
865
    else {
866
        NTSTATUS Status = MapFrameBuffer(*pPAddr, size, &m_pVAddr);
867
        if (!NT_SUCCESS(Status)) {
868
            DbgPrint(TRACE_LEVEL_FATAL, ("<--- %s MapFrameBuffer failed with Status: 0x%X\n", __FUNCTION__, Status));
869
            return FALSE;
870
        }
871
        m_bMapped = TRUE;
872
    }
873

874
    m_pMdl = IoAllocateMdl(m_pVAddr, size, FALSE, FALSE, NULL);
875
    if (!m_pMdl)
876
    {
877
        DbgPrint(TRACE_LEVEL_FATAL, ("%s insufficient resources to allocate MDLs\n", __FUNCTION__));
878
        return FALSE;
879
    }
880
    if (m_bSystemMemory == TRUE) {
881
        __try
882
        {
883
            MmProbeAndLockPages(m_pMdl, KernelMode, IoWriteAccess);
884
        }
885
#pragma prefast(suppress: __WARNING_EXCEPTIONEXECUTEHANDLER, "try/except is only able to protect against user-mode errors and these are the only errors we try to catch here");
886
        __except (EXCEPTION_EXECUTE_HANDLER)
887
        {
888
            DbgPrint(TRACE_LEVEL_FATAL, ("%s Failed to lock pages with error %x\n", __FUNCTION__, GetExceptionCode()));
889
            IoFreeMdl(m_pMdl);
890
            return FALSE;
891
        }
892
    }
893
    m_pSGList = reinterpret_cast<PSCATTER_GATHER_LIST>(new (NonPagedPoolNx) BYTE[sglsize]);
894
    m_pSGList->NumberOfElements = 0;
895
    m_pSGList->Reserved = 0;
896
    //       m_pSAddr = reinterpret_cast<BYTE*>
897
    //    (MmGetSystemAddressForMdlSafe(m_pMdl, NormalPagePriority | MdlMappingNoExecute));
898

899
    RtlZeroMemory(m_pSGList, sglsize);
900
    buf = PAGE_ALIGN(m_pVAddr);
901

902
    for (UINT i = 0; i < pages; ++i)
903
    {
904
        PHYSICAL_ADDRESS pa = { 0 };
905
        ASSERT(MmIsAddressValid(buf));
906
        pa = MmGetPhysicalAddress(buf);
907
        if (pa.QuadPart == 0LL)
908
        {
909
            DbgPrint(TRACE_LEVEL_FATAL, ("%s Invalid PA buf = %p element %d\n", __FUNCTION__, buf, i));
910
            break;
911
        }
912
        m_pSGList->Elements[i].Address = pa;
913
        m_pSGList->Elements[i].Length = PAGE_SIZE;
914
        buf = (PVOID)((LONG_PTR)(buf)+PAGE_SIZE);
915
        m_pSGList->NumberOfElements++;
916
    }
917
    m_Size = size;
918
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
919

920
    return TRUE;
921
}
922

923
PHYSICAL_ADDRESS VioGpuMemSegment::GetPhysicalAddress(void)
924
{
925
    PAGED_CODE();
926

927
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
928

929
    PHYSICAL_ADDRESS pa = { 0 };
930
    if (m_pVAddr && MmIsAddressValid(m_pVAddr))
931
    {
932
        pa = MmGetPhysicalAddress(m_pVAddr);
933
    }
934

935
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
936

937
    return pa;
938
}
939

940
void VioGpuMemSegment::Close(void)
941
{
942
    PAGED_CODE();
943

944
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
945

946
    if (m_pMdl)
947
    {
948
        if (m_bSystemMemory) {
949
            MmUnlockPages(m_pMdl);
950
        }
951
        IoFreeMdl(m_pMdl);
952
        m_pMdl = NULL;
953
    }
954

955
    if (m_bSystemMemory) {
956
        delete[] m_pVAddr;
957
    }
958
    else {
959
        UnmapFrameBuffer(m_pVAddr, (ULONG)m_Size);
960
        m_bMapped = FALSE;
961
    }
962
    m_pVAddr = NULL;
963

964
    delete[] reinterpret_cast<PBYTE>(m_pSGList);
965
    m_pSGList = NULL;
966

967
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
968
}
969

970

971
VioGpuObj::VioGpuObj(void)
972
{
973
    PAGED_CODE();
974

975
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
976

977
    m_uiHwRes = 0;
978
    m_pSegment = NULL;
979
    m_Size = 0;
980

981
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
982
}
983

984
VioGpuObj::~VioGpuObj(void)
985
{
986
    PAGED_CODE();
987

988
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
989

990
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s\n", __FUNCTION__));
991
}
992

993
BOOLEAN VioGpuObj::Init(_In_ UINT size, VioGpuMemSegment *pSegment)
994
{
995
    PAGED_CODE();
996

997
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s requested size = %d\n", __FUNCTION__, size));
998

999
    ASSERT(size);
1000
    ASSERT(pSegment);
1001
    UINT pages = BYTES_TO_PAGES(size);
1002
    size = pages * PAGE_SIZE;
1003
    if (size > pSegment->GetSize())
1004
    {
1005
        DbgPrint(TRACE_LEVEL_FATAL, ("<--- %s segment size too small = %Iu (%u)\n", __FUNCTION__, m_pSegment->GetSize(), size));
1006
        return FALSE;
1007
    }
1008
    m_pSegment = pSegment;
1009
    m_Size = size;
1010
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s size = %Iu\n", __FUNCTION__, m_Size));
1011
    return TRUE;
1012
}
1013

1014
PVOID CrsrQueue::AllocCursor(PGPU_VBUFFER* buf)
1015
{
1016
    PAGED_CODE();
1017

1018
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
1019

1020
    PGPU_VBUFFER vbuf;
1021
    vbuf = m_pBuf->GetBuf(sizeof(GPU_UPDATE_CURSOR), 0, NULL);
1022
    ASSERT(vbuf);
1023
    *buf = vbuf;
1024

1025
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s  vbuf = %p\n", __FUNCTION__, vbuf));
1026

1027
    return vbuf ? vbuf->buf : NULL;
1028
}
1029

1030
PAGED_CODE_SEG_END
1031

1032
UINT CrsrQueue::QueueCursor(PGPU_VBUFFER buf)
1033
{
1034
    //    PAGED_CODE();
1035

1036
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
1037

1038
    UINT res = 0;
1039
    KIRQL SavedIrql;
1040

1041
    VirtIOBufferDescriptor  sg[1];
1042
    int outcnt = 0;
1043
    UINT ret = 0;
1044

1045
    ASSERT(buf->size <= PAGE_SIZE);
1046
    if (BuildSGElement(&sg[outcnt], (PVOID)buf->buf, buf->size))
1047
    {
1048
        outcnt++;
1049
    }
1050

1051
    ASSERT(outcnt);
1052
    Lock(&SavedIrql);
1053
    ret = AddBuf(&sg[0], outcnt, 0, buf, NULL, 0);
1054
    Kick();
1055
    Unlock(SavedIrql);
1056

1057
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s vbuf = %p outcnt = %d, ret = %d\n", __FUNCTION__, buf, outcnt, ret));
1058
    return res;
1059
}
1060

1061
PGPU_VBUFFER CrsrQueue::DequeueCursor(_Out_ UINT* len)
1062
{
1063
    DbgPrint(TRACE_LEVEL_VERBOSE, ("---> %s\n", __FUNCTION__));
1064

1065
    PGPU_VBUFFER buf = NULL;
1066
    KIRQL SavedIrql;
1067
    Lock(&SavedIrql);
1068
    buf = (PGPU_VBUFFER)GetBuf(len);
1069
    Unlock(SavedIrql);
1070
    if (buf == NULL)
1071
    {
1072
        *len = 0;
1073
    }
1074
    DbgPrint(TRACE_LEVEL_VERBOSE, ("<--- %s buf %p len = %u\n", __FUNCTION__, buf, *len));
1075
    return buf;
1076
}
1077

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.