diff --git a/system/dev/bus/virtio/block.cpp b/system/dev/bus/virtio/block.cpp index 39f1cd01c10167484aaba5ae38c8aab1d5a1e56c..d3de376092505a088319e62b8e693b1c18936674 100644 --- a/system/dev/bus/virtio/block.cpp +++ b/system/dev/bus/virtio/block.cpp @@ -21,7 +21,7 @@ #define LOCAL_TRACE 0 -// 1MB max transfer (unless further restricted by ring size +// 1MB max transfer (unless further restricted by ring size). #define MAX_SCATTER 257 #define MAX_MAX_XFER ((MAX_SCATTER - 1) * PAGE_SIZE) @@ -39,8 +39,8 @@ void BlockDevice::txn_complete(block_txn_t* txn, zx_status_t status) { // DDK level ops -// optional: return the size (in bytes) of the readable/writable space -// of the device. Will default to 0 (non-seekable) if this is unimplemented +// Optional: return the size (in bytes) of the readable/writable space of the device. Will default +// to 0 (non-seekable) if this is unimplemented. zx_off_t BlockDevice::virtio_block_get_size(void* ctx) { LTRACEF("ctx %p\n", ctx); @@ -55,7 +55,7 @@ void BlockDevice::GetInfo(block_info_t* info) { info->block_count = GetSize() / GetBlockSize(); info->max_transfer_size = (uint32_t)(PAGE_SIZE * (ring_size - 2)); - // limit max transfer to our worst case scatter list size + // Limit max transfer to our worst case scatter list size. if (info->max_transfer_size > MAX_MAX_XFER) { info->max_transfer_size = MAX_MAX_XFER; } @@ -70,12 +70,12 @@ void BlockDevice::virtio_block_query(void* ctx, block_info_t* info, size_t* bops void BlockDevice::virtio_block_queue(void* ctx, block_op_t* bop, block_impl_queue_callback completion_cb, void* cookie) { BlockDevice* bd = static_cast<BlockDevice*>(ctx); - block_txn_t* txn = static_cast<block_txn_t*>((void*) bop); + block_txn_t* txn = static_cast<block_txn_t*>((void*)bop); txn->pmt = ZX_HANDLE_INVALID; txn->completion_cb = completion_cb; txn->cookie = cookie; - switch(txn->op.command & BLOCK_OP_MASK) { + switch (txn->op.command & BLOCK_OP_MASK) { case BLOCK_OP_READ: bd->QueueReadWriteTxn(txn, false); break; @@ -83,18 +83,17 @@ void BlockDevice::virtio_block_queue(void* ctx, block_op_t* bop, bd->QueueReadWriteTxn(txn, true); break; case BLOCK_OP_FLUSH: - //TODO: this should complete after any in-flight IO and before - // any later IO begins + // TODO: this should complete after any in-flight IO and before any later IO begins. bd->txn_complete(txn, ZX_OK); break; default: bd->txn_complete(txn, ZX_ERR_NOT_SUPPORTED); } - } -zx_status_t BlockDevice::virtio_block_ioctl(void* ctx, uint32_t op, const void* in_buf, size_t in_len, - void* reply, size_t max, size_t* out_actual) { +zx_status_t BlockDevice::virtio_block_ioctl(void* ctx, uint32_t op, const void* in_buf, + size_t in_len, void* reply, size_t max, + size_t* out_actual) { LTRACEF("ctx %p, op %u\n", ctx, op); BlockDevice* bd = static_cast<BlockDevice*>(ctx); @@ -129,11 +128,9 @@ BlockDevice::~BlockDevice() { zx_status_t BlockDevice::Init() { LTRACE_ENTRY; - // reset the device DeviceReset(); - - // read our configuration CopyDeviceConfig(&config_, sizeof(config_)); + // TODO(cja): The blk_size provided in the device configuration is only // populated if a specific feature bit has been negotiated during // initialization, otherwise it is 0, at least in Virtio 0.9.5. Use 512 @@ -147,23 +144,22 @@ zx_status_t BlockDevice::Init() { LTRACEF("seg_max %#x\n", config_.seg_max); LTRACEF("blk_size %#x\n", config_.blk_size); - // ack and set the driver status bit DriverStatusAck(); - // XXX check features bits and ack/nak them + // TODO: Check features bits and ack/nak them - // allocate the main vring + // Allocate the main vring. auto err = vring_.Init(0, ring_size); if (err < 0) { zxlogf(ERROR, "failed to allocate vring\n"); return err; } - // allocate a queue of block requests + // Allocate a queue of block requests. size_t size = sizeof(virtio_blk_req_t) * blk_req_count + sizeof(uint8_t) * blk_req_count; - zx_status_t status = io_buffer_init(&blk_req_buf_, bti_.get(), size, - IO_BUFFER_RW | IO_BUFFER_CONTIG); + zx_status_t status = + io_buffer_init(&blk_req_buf_, bti_.get(), size, IO_BUFFER_RW | IO_BUFFER_CONTIG); if (status != ZX_OK) { zxlogf(ERROR, "cannot alloc blk_req buffers %d\n", status); return status; @@ -173,20 +169,17 @@ zx_status_t BlockDevice::Init() { LTRACEF("allocated blk request at %p, physical address %#" PRIxPTR "\n", blk_req_, io_buffer_phys(&blk_req_buf_)); - // responses are 32 words at the end of the allocated block + // Responses are 32 words at the end of the allocated block. blk_res_pa_ = io_buffer_phys(&blk_req_buf_) + sizeof(virtio_blk_req_t) * blk_req_count; blk_res_ = (uint8_t*)((uintptr_t)blk_req_ + sizeof(virtio_blk_req_t) * blk_req_count); - LTRACEF("allocated blk responses at %p, physical address %#" PRIxPTR "\n", blk_res_, blk_res_pa_); + LTRACEF("allocated blk responses at %p, physical address %#" PRIxPTR "\n", blk_res_, + blk_res_pa_); - // start the interrupt thread StartIrqThread(); - - // set DRIVER_OK DriverStatusOk(); - // initialize the zx_device and publish us - // point the ctx of our DDK device at ourself + // Initialize and publish the zx_device. device_ops_.get_size = &virtio_block_get_size; device_ops_.ioctl = &virtio_block_ioctl; @@ -213,11 +206,11 @@ zx_status_t BlockDevice::Init() { void BlockDevice::IrqRingUpdate() { LTRACE_ENTRY; - // parse our descriptor chain, add back to the free queue + // Parse our descriptor chain and add back to the free queue. auto free_chain = [this](vring_used_elem* used_elem) { uint32_t i = (uint16_t)used_elem->id; struct vring_desc* desc = vring_.DescFromIndex((uint16_t)i); - auto head_desc = desc; // save the first element + auto head_desc = desc; // Save the first element. { fbl::AutoLock lock(&ring_lock_); for (;;) { @@ -226,7 +219,7 @@ void BlockDevice::IrqRingUpdate() { if (desc->flags & VRING_DESC_F_NEXT) { next = desc->next; } else { - /* end of chain */ + // End of chain. next = -1; } @@ -245,19 +238,17 @@ void BlockDevice::IrqRingUpdate() { { fbl::AutoLock lock(&txn_lock_); - // search our pending txn list to see if this completes it - + // Search our pending txn list to see if this completes it. list_for_every_entry (&txn_list_, txn, block_txn_t, node) { if (txn->desc == head_desc) { LTRACEF("completes txn %p\n", txn); free_blk_req((unsigned int)txn->index); list_delete(&txn->node); - // we will do this outside of the lock + // We will do this outside of the lock. need_complete = true; - // check to see if QueueTxn is waiting on - // resources becoming available + // Check to see if QueueTxn is waiting on resources becoming available. if ((need_signal = txn_wait_)) { txn_wait_ = false; } @@ -274,7 +265,7 @@ void BlockDevice::IrqRingUpdate() { } }; - // tell the ring to find free chains and hand it back to our lambda + // Tell the ring to find free chains and hand it back to our lambda. vring_.IrqRingUpdate(free_chain); } @@ -282,8 +273,8 @@ void BlockDevice::IrqConfigChange() { LTRACE_ENTRY; } -zx_status_t BlockDevice::QueueTxn(block_txn_t* txn, bool write, size_t bytes, - uint64_t* pages, size_t pagecount, uint16_t* idx) { +zx_status_t BlockDevice::QueueTxn(block_txn_t* txn, bool write, size_t bytes, uint64_t* pages, + size_t pagecount, uint16_t* idx) { size_t index; { @@ -299,25 +290,17 @@ zx_status_t BlockDevice::QueueTxn(block_txn_t* txn, bool write, size_t bytes, req->type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN; req->ioprio = 0; req->sector = txn->op.rw.offset_dev; - LTRACEF("blk_req type %u ioprio %u sector %" PRIu64 "\n", - req->type, req->ioprio, req->sector); + LTRACEF("blk_req type %u ioprio %u sector %" PRIu64 "\n", req->type, req->ioprio, req->sector); - // save the req index into the txn->extra[1] slot so we can free it when we complete the transfer + // Save the request index so we can free it when we complete the transfer. txn->index = index; -#if LOCAL_TRACE - LTRACEF("phys %p, phys_count %#lx\n", txn->phys, txn->phys_count); - for (uint64_t i = 0; i < txn->phys_count; i++) { - LTRACEF("phys %lu: %#lx\n", i, txn->phys[i]); - } -#endif - LTRACEF("page count %lu\n", pagecount); assert(pagecount > 0); - /* put together a transfer */ + // Put together a transfer. uint16_t i; - vring_desc *desc; + vring_desc* desc; { fbl::AutoLock lock(&ring_lock_); desc = vring_.AllocDescChain((uint16_t)(2u + pagecount), &i); @@ -331,10 +314,10 @@ zx_status_t BlockDevice::QueueTxn(block_txn_t* txn, bool write, size_t bytes, LTRACEF("after alloc chain desc %p, i %u\n", desc, i); - /* point the txn at this head descriptor */ + // Point the txn at this head descriptor. txn->desc = desc; - /* set up the descriptor pointing to the head */ + // Set up the descriptor pointing to the head. desc->addr = io_buffer_phys(&blk_req_buf_) + index * sizeof(virtio_blk_req_t); desc->len = sizeof(virtio_blk_req_t); desc->flags = VRING_DESC_F_NEXT; @@ -343,32 +326,34 @@ zx_status_t BlockDevice::QueueTxn(block_txn_t* txn, bool write, size_t bytes, for (size_t n = 0; n < pagecount; n++) { desc = vring_.DescFromIndex(desc->next); desc->addr = pages[n]; - desc->len = (uint32_t) ((bytes > PAGE_SIZE) ? PAGE_SIZE : bytes); + desc->len = (uint32_t)((bytes > PAGE_SIZE) ? PAGE_SIZE : bytes); if (n == 0) { - // first entry may not be page aligned + // First entry may not be page aligned. size_t page0_offset = txn->op.rw.offset_vmo & PAGE_MASK; - // adjust starting address + // Adjust starting address. desc->addr += page0_offset; - // trim length if necessary + // Trim length if necessary. size_t max = PAGE_SIZE - page0_offset; if (desc->len > max) { - desc->len = (uint32_t) max; + desc->len = (uint32_t)max; } } desc->flags = VRING_DESC_F_NEXT; LTRACEF("pa %#lx, len %#x\n", desc->addr, desc->len); - if (!write) - desc->flags |= VRING_DESC_F_WRITE; /* mark buffer as write-only if its a block read */ + // Mark buffer as write-only if its a block read. + if (!write) { + desc->flags |= VRING_DESC_F_WRITE; + } bytes -= desc->len; } LTRACE_DO(virtio_dump_desc(desc)); assert(bytes == 0); - /* set up the descriptor pointing to the response */ + // Set up the descriptor pointing to the response. desc = vring_.DescFromIndex(desc->next); desc->addr = blk_res_pa_ + index; desc->len = 1; @@ -386,7 +371,7 @@ void BlockDevice::QueueReadWriteTxn(block_txn_t* txn, bool write) { txn->op.rw.offset_vmo *= config_.blk_size; - // transaction must fit within device + // Transaction must fit within device. if ((txn->op.rw.offset_dev >= config_.capacity) || (config_.capacity - txn->op.rw.offset_dev < txn->op.rw.length)) { LTRACEF("request beyond the end of the device!\n"); @@ -414,8 +399,8 @@ void BlockDevice::QueueReadWriteTxn(block_txn_t* txn, bool write) { zx_handle_t vmo = txn->op.rw.vmo; uint64_t pages[MAX_SCATTER]; zx_status_t r; - if ((r = zx_bti_pin(bti_.get(), ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE, vmo, - aligned_offset, pin_size, pages, num_pages, &txn->pmt)) != ZX_OK) { + if ((r = zx_bti_pin(bti_.get(), ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE, vmo, aligned_offset, + pin_size, pages, num_pages, &txn->pmt)) != ZX_OK) { TRACEF("virtio: could not pin pages\n"); txn_complete(txn, ZX_ERR_INTERNAL); return; @@ -428,24 +413,17 @@ void BlockDevice::QueueReadWriteTxn(block_txn_t* txn, bool write) { for (;;) { uint16_t idx; - // attempt to setup hw txn + // Attempt to setup hw txn. zx_status_t status = QueueTxn(txn, write, bytes, pages, num_pages, &idx); if (status == ZX_OK) { fbl::AutoLock lock(&txn_lock_); - - // save the txn in a list list_add_tail(&txn_list_, &txn->node); - - /* submit the transfer */ vring_.SubmitChain(idx); - - /* kick it off */ vring_.Kick(); - return; } else { if (cannot_fail) { - printf("virtio-block: failed to queue txn to hw: %d\n", status); + TRACEF("virtio-block: failed to queue txn to hw: %d\n", status); txn_complete(txn, status); return; } @@ -453,12 +431,12 @@ void BlockDevice::QueueReadWriteTxn(block_txn_t* txn, bool write) { fbl::AutoLock lock(&txn_lock_); if (list_is_empty(&txn_list_)) { - // we hold the queue lock and the list is empty - // if we fail this time around, no point in trying again + // We hold the queue lock and the list is empty, if we fail this time around there's + // no point in trying again. cannot_fail = true; continue; } else { - // let the completer know we need to wake up + // Let the completer know we need to wake up. txn_wait_ = true; } } diff --git a/system/dev/bus/virtio/block.h b/system/dev/bus/virtio/block.h index 4a34982606c3a8d6556cd8bb2827b29fb663da57..3143ccb99d5deb7fc068f9e35a7c93606f53946c 100644 --- a/system/dev/bus/virtio/block.h +++ b/system/dev/bus/virtio/block.h @@ -1,7 +1,8 @@ // Copyright 2016 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#pragma once +#ifndef ZIRCON_SYSTEM_DEV_BUS_VIRTIO_BLOCK_H_ +#define ZIRCON_SYSTEM_DEV_BUS_VIRTIO_BLOCK_H_ #include "device.h" #include "ring.h" @@ -10,9 +11,9 @@ #include <zircon/compiler.h> #include "backends/backend.h" +#include <ddk/protocol/block.h> #include <virtio/block.h> #include <zircon/device/block.h> -#include <ddk/protocol/block.h> #include <lib/sync/completion.h> @@ -57,26 +58,26 @@ private: void GetInfo(block_info_t* info); - zx_status_t QueueTxn(block_txn_t* txn, bool write, size_t bytes, - uint64_t* pages, size_t pagecount, uint16_t* idx); + zx_status_t QueueTxn(block_txn_t* txn, bool write, size_t bytes, uint64_t* pages, + size_t pagecount, uint16_t* idx); void QueueReadWriteTxn(block_txn_t* txn, bool write); void txn_complete(block_txn_t* txn, zx_status_t status); - // the main virtio ring + // The main virtio ring. Ring vring_ = {this}; - // lock to be used around Ring::AllocDescChain and FreeDesc - // TODO: move this into Ring class once it's certain that other - // users of the class are okay with it. + // Lock to be used around Ring::AllocDescChain and FreeDesc. + // TODO: Move this into Ring class once it's certain that other users of the class are okay with + // it. fbl::Mutex ring_lock_; - static const uint16_t ring_size = 128; // 128 matches legacy pci + static const uint16_t ring_size = 128; // 128 matches legacy pci. - // saved block device configuration out of the pci config BAR + // Saved block device configuration out of the pci config BAR. virtio_blk_config_t config_ = {}; - // a queue of block request/responses + // A queue of block request/responses. static const size_t blk_req_count = 32; io_buffer_t blk_req_buf_; @@ -96,11 +97,9 @@ private: return i; } - void free_blk_req(size_t i) { - blk_req_bitmap_ &= ~(1 << i); - } + void free_blk_req(size_t i) { blk_req_bitmap_ &= ~(1 << i); } - // pending iotxns and waiter state + // Pending iotxns and waiter state. fbl::Mutex txn_lock_; list_node txn_list_ = LIST_INITIAL_VALUE(txn_list_); bool txn_wait_ = false; @@ -110,3 +109,5 @@ private: }; } // namespace virtio + +#endif // ZIRCON_SYSTEM_DEV_BUS_VIRTIO_BLOCK_H_