-
Rift authored0d962064
Forked from
根本写不队 / OSKernel2023-Rift-OS
use super::*;
use crate::header::VirtIOHeader;
use crate::queue::VirtQueue;
use _core::sync::atomic::{fence, Ordering};
use bitflags::*;
use core::hint::spin_loop;
use log::*;
use volatile::Volatile;
/// The virtio block device is a simple virtual block device (ie. disk).
///
/// Read and write requests (and other exotic requests) are placed in the queue,
/// and serviced (probably out of order) by the device except where noted.
pub struct VirtIOBlk<'a, H: Hal> {
header: &'static mut VirtIOHeader,
queue: VirtQueue<'a, H>,
capacity: usize,
}
impl<H: Hal> VirtIOBlk<'_, H> {
/// Create a new VirtIO-Blk driver.
pub fn new(header: &'static mut VirtIOHeader) -> Result<Self> {
header.begin_init(|features| {
let features = BlkFeature::from_bits_truncate(features);
info!("device features: {:?}", features);
// negotiate these flags only
let supported_features = BlkFeature::empty();
(features & supported_features).bits()
});
// read configuration space
let config = unsafe { &mut *(header.config_space() as *mut BlkConfig) };
info!("config: {:?}", config);
info!(
"found a block device of size {}KB",
config.capacity.read() / 2
);
let queue = VirtQueue::new(header, 0, 16)?;
header.finish_init();
Ok(VirtIOBlk {
header,
queue,
capacity: config.capacity.read() as usize,
})
}
/// Acknowledge interrupt.
pub fn ack_interrupt(&mut self) -> bool {
self.header.ack_interrupt()
}
/// Read a block.
pub fn read_block(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
assert_eq!(buf.len(), BLK_SIZE);
let req = BlkReq {
type_: ReqType::In,
reserved: 0,
sector: block_id as u64,
};
let mut resp = BlkResp::default();
self.queue.add(&[req.as_buf()], &[buf, resp.as_buf_mut()])?;
self.header.notify(0);
while !self.queue.can_pop() {
spin_loop();
}
self.queue.pop_used()?;
fence(Ordering::SeqCst);
match resp.status {
7172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
RespStatus::Ok => Ok(()),
_ => Err(Error::IoError),
}
}
/// Read a block in a non-blocking way which means that it returns immediately.
///
/// # Arguments
///
/// * `block_id` - The identifier of the block to read.
/// * `buf` - The buffer in the memory which the block is read into.
/// * `resp` - A mutable reference to a variable provided by the caller
/// which contains the status of the requests. The caller can safely
/// read the variable only after the request is ready.
///
/// # Usage
///
/// It will submit request to the virtio block device and return a token identifying
/// the position of the first Descriptor in the chain. If there are not enough
/// Descriptors to allocate, then it returns [Error::BufferTooSmall].
///
/// After the request is ready, `resp` will be updated and the caller can get the
/// status of the request(e.g. succeed or failed) through it. However, the caller
/// **must not** spin on `resp` to wait for it to change. A safe way is to read it
/// after the same token as this method returns is fetched through [VirtIOBlk::pop_used()],
/// which means that the request has been ready.
///
/// # Safety
///
/// `buf` is still borrowed by the underlying virtio block device even if this
/// method returns. Thus, it is the caller's responsibility to guarantee that
/// `buf` is not accessed before the request is completed in order to avoid
/// data races.
pub unsafe fn read_block_nb(
&mut self,
block_id: usize,
buf: &mut [u8],
resp: &mut BlkResp,
) -> Result<u16> {
assert_eq!(buf.len(), BLK_SIZE);
let req = BlkReq {
type_: ReqType::In,
reserved: 0,
sector: block_id as u64,
};
let token = self.queue.add(&[req.as_buf()], &[buf, resp.as_buf_mut()])?;
self.header.notify(0);
Ok(token)
}
/// Write a block.
pub fn write_block(&mut self, block_id: usize, buf: &[u8]) -> Result {
assert_eq!(buf.len(), BLK_SIZE);
let req = BlkReq {
type_: ReqType::Out,
reserved: 0,
sector: block_id as u64,
};
let mut resp = BlkResp::default();
self.queue.add(&[req.as_buf(), buf], &[resp.as_buf_mut()])?;
self.header.notify(0);
while !self.queue.can_pop() {
spin_loop();
}
self.queue.pop_used()?;
match resp.status {
RespStatus::Ok => Ok(()),
_ => Err(Error::IoError),
}
}
141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
//// Write a block in a non-blocking way which means that it returns immediately.
///
/// # Arguments
///
/// * `block_id` - The identifier of the block to write.
/// * `buf` - The buffer in the memory containing the data to write to the block.
/// * `resp` - A mutable reference to a variable provided by the caller
/// which contains the status of the requests. The caller can safely
/// read the variable only after the request is ready.
///
/// # Usage
///
/// See also [VirtIOBlk::read_block_nb()].
///
/// # Safety
///
/// See also [VirtIOBlk::read_block_nb()].
pub unsafe fn write_block_nb(
&mut self,
block_id: usize,
buf: &[u8],
resp: &mut BlkResp,
) -> Result<u16> {
assert_eq!(buf.len(), BLK_SIZE);
let req = BlkReq {
type_: ReqType::Out,
reserved: 0,
sector: block_id as u64,
};
let token = self.queue.add(&[req.as_buf(), buf], &[resp.as_buf_mut()])?;
self.header.notify(0);
Ok(token)
}
/// During an interrupt, it fetches a token of a completed request from the used
/// ring and return it. If all completed requests have already been fetched, return
/// Err(Error::NotReady).
pub fn pop_used(&mut self) -> Result<u16> {
self.queue.pop_used().map(|p| p.0)
}
/// Return size of its VirtQueue.
/// It can be used to tell the caller how many channels he should monitor on.
pub fn virt_queue_size(&self) -> u16 {
self.queue.size()
}
}
#[repr(C)]
#[derive(Debug)]
struct BlkConfig {
/// Number of 512 Bytes sectors
capacity: Volatile<u64>,
size_max: Volatile<u32>,
seg_max: Volatile<u32>,
cylinders: Volatile<u16>,
heads: Volatile<u8>,
sectors: Volatile<u8>,
blk_size: Volatile<u32>,
physical_block_exp: Volatile<u8>,
alignment_offset: Volatile<u8>,
min_io_size: Volatile<u16>,
opt_io_size: Volatile<u32>,
// ... ignored
}
#[repr(C)]
#[derive(Debug)]
struct BlkReq {