pub trait Device: WasmNotSendSync {
type A: Api;
Show 44 methods
// Required methods
unsafe fn exit(self, queue: <Self::A as Api>::Queue);
unsafe fn create_buffer(
&self,
desc: &BufferDescriptor<'_>
) -> Result<<Self::A as Api>::Buffer, DeviceError>;
unsafe fn destroy_buffer(&self, buffer: <Self::A as Api>::Buffer);
unsafe fn map_buffer(
&self,
buffer: &<Self::A as Api>::Buffer,
range: Range<u64>
) -> Result<BufferMapping, DeviceError>;
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer);
unsafe fn flush_mapped_ranges<I>(
&self,
buffer: &<Self::A as Api>::Buffer,
ranges: I
)
where I: Iterator<Item = Range<u64>>;
unsafe fn invalidate_mapped_ranges<I>(
&self,
buffer: &<Self::A as Api>::Buffer,
ranges: I
)
where I: Iterator<Item = Range<u64>>;
unsafe fn create_texture(
&self,
desc: &TextureDescriptor<'_>
) -> Result<<Self::A as Api>::Texture, DeviceError>;
unsafe fn destroy_texture(&self, texture: <Self::A as Api>::Texture);
unsafe fn create_texture_view(
&self,
texture: &<Self::A as Api>::Texture,
desc: &TextureViewDescriptor<'_>
) -> Result<<Self::A as Api>::TextureView, DeviceError>;
unsafe fn destroy_texture_view(&self, view: <Self::A as Api>::TextureView);
unsafe fn create_sampler(
&self,
desc: &SamplerDescriptor<'_>
) -> Result<<Self::A as Api>::Sampler, DeviceError>;
unsafe fn destroy_sampler(&self, sampler: <Self::A as Api>::Sampler);
unsafe fn create_command_encoder(
&self,
desc: &CommandEncoderDescriptor<'_, Self::A>
) -> Result<<Self::A as Api>::CommandEncoder, DeviceError>;
unsafe fn destroy_command_encoder(
&self,
pool: <Self::A as Api>::CommandEncoder
);
unsafe fn create_bind_group_layout(
&self,
desc: &BindGroupLayoutDescriptor<'_>
) -> Result<<Self::A as Api>::BindGroupLayout, DeviceError>;
unsafe fn destroy_bind_group_layout(
&self,
bg_layout: <Self::A as Api>::BindGroupLayout
);
unsafe fn create_pipeline_layout(
&self,
desc: &PipelineLayoutDescriptor<'_, Self::A>
) -> Result<<Self::A as Api>::PipelineLayout, DeviceError>;
unsafe fn destroy_pipeline_layout(
&self,
pipeline_layout: <Self::A as Api>::PipelineLayout
);
unsafe fn create_bind_group(
&self,
desc: &BindGroupDescriptor<'_, Self::A>
) -> Result<<Self::A as Api>::BindGroup, DeviceError>;
unsafe fn destroy_bind_group(&self, group: <Self::A as Api>::BindGroup);
unsafe fn create_shader_module(
&self,
desc: &ShaderModuleDescriptor<'_>,
shader: ShaderInput<'_>
) -> Result<<Self::A as Api>::ShaderModule, ShaderError>;
unsafe fn destroy_shader_module(
&self,
module: <Self::A as Api>::ShaderModule
);
unsafe fn create_render_pipeline(
&self,
desc: &RenderPipelineDescriptor<'_, Self::A>
) -> Result<<Self::A as Api>::RenderPipeline, PipelineError>;
unsafe fn destroy_render_pipeline(
&self,
pipeline: <Self::A as Api>::RenderPipeline
);
unsafe fn create_compute_pipeline(
&self,
desc: &ComputePipelineDescriptor<'_, Self::A>
) -> Result<<Self::A as Api>::ComputePipeline, PipelineError>;
unsafe fn destroy_compute_pipeline(
&self,
pipeline: <Self::A as Api>::ComputePipeline
);
unsafe fn create_pipeline_cache(
&self,
desc: &PipelineCacheDescriptor<'_>
) -> Result<<Self::A as Api>::PipelineCache, PipelineCacheError>;
unsafe fn destroy_pipeline_cache(
&self,
cache: <Self::A as Api>::PipelineCache
);
unsafe fn create_query_set(
&self,
desc: &QuerySetDescriptor<Option<&str>>
) -> Result<<Self::A as Api>::QuerySet, DeviceError>;
unsafe fn destroy_query_set(&self, set: <Self::A as Api>::QuerySet);
unsafe fn create_fence(
&self
) -> Result<<Self::A as Api>::Fence, DeviceError>;
unsafe fn destroy_fence(&self, fence: <Self::A as Api>::Fence);
unsafe fn get_fence_value(
&self,
fence: &<Self::A as Api>::Fence
) -> Result<u64, DeviceError>;
unsafe fn wait(
&self,
fence: &<Self::A as Api>::Fence,
value: u64,
timeout_ms: u32
) -> Result<bool, DeviceError>;
unsafe fn start_capture(&self) -> bool;
unsafe fn stop_capture(&self);
unsafe fn create_acceleration_structure(
&self,
desc: &AccelerationStructureDescriptor<'_>
) -> Result<<Self::A as Api>::AccelerationStructure, DeviceError>;
unsafe fn get_acceleration_structure_build_sizes(
&self,
desc: &GetAccelerationStructureBuildSizesDescriptor<'_, Self::A>
) -> AccelerationStructureBuildSizes;
unsafe fn get_acceleration_structure_device_address(
&self,
acceleration_structure: &<Self::A as Api>::AccelerationStructure
) -> u64;
unsafe fn destroy_acceleration_structure(
&self,
acceleration_structure: <Self::A as Api>::AccelerationStructure
);
fn get_internal_counters(&self) -> HalCounters;
// Provided methods
fn pipeline_cache_validation_key(&self) -> Option<[u8; 16]> { ... }
unsafe fn pipeline_cache_get_data(
&self,
cache: &<Self::A as Api>::PipelineCache
) -> Option<Vec<u8>> { ... }
}
Expand description
A connection to a GPU and a pool of resources to use with it.
A wgpu-hal
Device
represents an open connection to a specific graphics
processor, controlled via the backend Device::A
. A Device
is mostly
used for creating resources. Each Device
has an associated Queue
used
for command submission.
On Vulkan a Device
corresponds to a logical device (VkDevice
). Other
backends don’t have an exact analog: for example, ID3D12Device
s and
MTLDevice
s are owned by the backends’ wgpu_hal::Adapter
implementations, and shared by all wgpu_hal::Device
s created from that
Adapter
.
A Device
’s life cycle is generally:
-
Obtain a
Device
and its associatedQueue
by callingAdapter::open
.Alternatively, the backend-specific types that implement
Adapter
often have methods for creating awgpu-hal
Device
from a platform-specific handle. For example,vulkan::Adapter::device_from_raw
can create avulkan::Device
from anash::Device
. -
Create resources to use on the device by calling methods like
Device::create_texture
orDevice::create_shader_module
. -
Call
Device::create_command_encoder
to obtain aCommandEncoder
, which you can use to buildCommandBuffer
s holding commands to be executed on the GPU. -
Call
Queue::submit
on theDevice
’s associatedQueue
to submitCommandBuffer
s for execution on the GPU. If needed, callDevice::wait
to wait for them to finish execution. -
Free resources with methods like
Device::destroy_texture
orDevice::destroy_shader_module
. -
Shut down the device by calling
Device::exit
.
§Safety
As with other wgpu-hal
APIs, validation is the caller’s
responsibility. Here are the general requirements for all Device
methods:
-
Any resource passed to a
Device
method must have been created by thatDevice
. For example, aTexture
passed toDevice::destroy_texture
must have been created with theDevice
passed asself
. -
Resources may not be destroyed if they are used by any submitted command buffers that have not yet finished execution.
Required Associated Types§
Required Methods§
unsafe fn create_buffer(
&self,
desc: &BufferDescriptor<'_>
) -> Result<<Self::A as Api>::Buffer, DeviceError>
unsafe fn create_buffer( &self, desc: &BufferDescriptor<'_> ) -> Result<<Self::A as Api>::Buffer, DeviceError>
Creates a new buffer.
The initial usage is BufferUses::empty()
.
unsafe fn destroy_buffer(&self, buffer: <Self::A as Api>::Buffer)
unsafe fn destroy_buffer(&self, buffer: <Self::A as Api>::Buffer)
Free buffer
and any GPU resources it owns.
Note that backends are allowed to allocate GPU memory for buffers from
allocation pools, and this call is permitted to simply return buffer
’s
storage to that pool, without making it available to other applications.
§Safety
- The given
buffer
must not currently be mapped.
unsafe fn map_buffer(
&self,
buffer: &<Self::A as Api>::Buffer,
range: Range<u64>
) -> Result<BufferMapping, DeviceError>
unsafe fn map_buffer( &self, buffer: &<Self::A as Api>::Buffer, range: Range<u64> ) -> Result<BufferMapping, DeviceError>
Return a pointer to CPU memory mapping the contents of buffer
.
Buffer mappings are persistent: the buffer may remain mapped on the CPU
while the GPU reads or writes to it. (Note that wgpu_core
does not use
this feature: when a wgpu_core::Buffer
is unmapped, the underlying
wgpu_hal
buffer is also unmapped.)
If this function returns Ok(mapping)
, then:
-
mapping.ptr
is the CPU address of the start of the mapped memory. -
If
mapping.is_coherent
istrue
, then CPU writes to the mapped memory are immediately visible on the GPU, and vice versa.
§Safety
-
The given
buffer
must have been created with theMAP_READ
orMAP_WRITE
flags set inBufferDescriptor::usage
. -
The given
range
must fall within the size ofbuffer
. -
The caller must avoid data races between the CPU and the GPU. A data race is any pair of accesses to a particular byte, one of which is a write, that are not ordered with respect to each other by some sort of synchronization operation.
-
If this function returns
Ok(mapping)
andmapping.is_coherent
isfalse
, then:-
Every CPU write to a mapped byte followed by a GPU read of that byte must have at least one call to
Device::flush_mapped_ranges
covering that byte that occurs between those two accesses. -
Every GPU write to a mapped byte followed by a CPU read of that byte must have at least one call to
Device::invalidate_mapped_ranges
covering that byte that occurs between those two accesses.
Note that the data race rule above requires that all such access pairs be ordered, so it is meaningful to talk about what must occur “between” them.
-
-
Zero-sized mappings are not allowed.
-
The returned
BufferMapping::ptr
must not be used after a call toDevice::unmap_buffer
.
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer)
unsafe fn unmap_buffer(&self, buffer: &<Self::A as Api>::Buffer)
Remove the mapping established by the last call to Device::map_buffer
.
§Safety
- The given
buffer
must be currently mapped.
unsafe fn flush_mapped_ranges<I>(
&self,
buffer: &<Self::A as Api>::Buffer,
ranges: I
)
unsafe fn flush_mapped_ranges<I>( &self, buffer: &<Self::A as Api>::Buffer, ranges: I )
Indicate that CPU writes to mapped buffer memory should be made visible to the GPU.
§Safety
-
The given
buffer
must be currently mapped. -
All ranges produced by
ranges
must fall withinbuffer
’s size.
unsafe fn invalidate_mapped_ranges<I>(
&self,
buffer: &<Self::A as Api>::Buffer,
ranges: I
)
unsafe fn invalidate_mapped_ranges<I>( &self, buffer: &<Self::A as Api>::Buffer, ranges: I )
Indicate that GPU writes to mapped buffer memory should be made visible to the CPU.
§Safety
-
The given
buffer
must be currently mapped. -
All ranges produced by
ranges
must fall withinbuffer
’s size.
unsafe fn create_texture(
&self,
desc: &TextureDescriptor<'_>
) -> Result<<Self::A as Api>::Texture, DeviceError>
unsafe fn create_texture( &self, desc: &TextureDescriptor<'_> ) -> Result<<Self::A as Api>::Texture, DeviceError>
Creates a new texture.
The initial usage for all subresources is TextureUses::UNINITIALIZED
.
unsafe fn destroy_texture(&self, texture: <Self::A as Api>::Texture)
unsafe fn create_texture_view( &self, texture: &<Self::A as Api>::Texture, desc: &TextureViewDescriptor<'_> ) -> Result<<Self::A as Api>::TextureView, DeviceError>
unsafe fn destroy_texture_view(&self, view: <Self::A as Api>::TextureView)
unsafe fn create_sampler( &self, desc: &SamplerDescriptor<'_> ) -> Result<<Self::A as Api>::Sampler, DeviceError>
unsafe fn destroy_sampler(&self, sampler: <Self::A as Api>::Sampler)
unsafe fn create_command_encoder(
&self,
desc: &CommandEncoderDescriptor<'_, Self::A>
) -> Result<<Self::A as Api>::CommandEncoder, DeviceError>
unsafe fn create_command_encoder( &self, desc: &CommandEncoderDescriptor<'_, Self::A> ) -> Result<<Self::A as Api>::CommandEncoder, DeviceError>
Create a fresh CommandEncoder
.
The new CommandEncoder
is in the “closed” state.
unsafe fn destroy_command_encoder(&self, pool: <Self::A as Api>::CommandEncoder)
unsafe fn create_bind_group_layout(
&self,
desc: &BindGroupLayoutDescriptor<'_>
) -> Result<<Self::A as Api>::BindGroupLayout, DeviceError>
unsafe fn create_bind_group_layout( &self, desc: &BindGroupLayoutDescriptor<'_> ) -> Result<<Self::A as Api>::BindGroupLayout, DeviceError>
Creates a bind group layout.
unsafe fn destroy_bind_group_layout( &self, bg_layout: <Self::A as Api>::BindGroupLayout )
unsafe fn create_pipeline_layout( &self, desc: &PipelineLayoutDescriptor<'_, Self::A> ) -> Result<<Self::A as Api>::PipelineLayout, DeviceError>
unsafe fn destroy_pipeline_layout( &self, pipeline_layout: <Self::A as Api>::PipelineLayout )
unsafe fn create_bind_group( &self, desc: &BindGroupDescriptor<'_, Self::A> ) -> Result<<Self::A as Api>::BindGroup, DeviceError>
unsafe fn destroy_bind_group(&self, group: <Self::A as Api>::BindGroup)
unsafe fn create_shader_module( &self, desc: &ShaderModuleDescriptor<'_>, shader: ShaderInput<'_> ) -> Result<<Self::A as Api>::ShaderModule, ShaderError>
unsafe fn destroy_shader_module(&self, module: <Self::A as Api>::ShaderModule)
unsafe fn create_render_pipeline( &self, desc: &RenderPipelineDescriptor<'_, Self::A> ) -> Result<<Self::A as Api>::RenderPipeline, PipelineError>
unsafe fn destroy_render_pipeline( &self, pipeline: <Self::A as Api>::RenderPipeline )
unsafe fn create_compute_pipeline( &self, desc: &ComputePipelineDescriptor<'_, Self::A> ) -> Result<<Self::A as Api>::ComputePipeline, PipelineError>
unsafe fn destroy_compute_pipeline( &self, pipeline: <Self::A as Api>::ComputePipeline )
unsafe fn create_pipeline_cache( &self, desc: &PipelineCacheDescriptor<'_> ) -> Result<<Self::A as Api>::PipelineCache, PipelineCacheError>
unsafe fn destroy_pipeline_cache(&self, cache: <Self::A as Api>::PipelineCache)
unsafe fn create_query_set( &self, desc: &QuerySetDescriptor<Option<&str>> ) -> Result<<Self::A as Api>::QuerySet, DeviceError>
unsafe fn destroy_query_set(&self, set: <Self::A as Api>::QuerySet)
unsafe fn create_fence(&self) -> Result<<Self::A as Api>::Fence, DeviceError>
unsafe fn destroy_fence(&self, fence: <Self::A as Api>::Fence)
unsafe fn get_fence_value( &self, fence: &<Self::A as Api>::Fence ) -> Result<u64, DeviceError>
unsafe fn wait(
&self,
fence: &<Self::A as Api>::Fence,
value: u64,
timeout_ms: u32
) -> Result<bool, DeviceError>
unsafe fn wait( &self, fence: &<Self::A as Api>::Fence, value: u64, timeout_ms: u32 ) -> Result<bool, DeviceError>
Wait for fence
to reach value
.
Operations like Queue::submit
can accept a Fence
and a
FenceValue
to store in it, so you can use this wait
function
to wait for a given queue submission to finish execution.
The value
argument must be a value that some actual operation you have
already presented to the device is going to store in fence
. You cannot
wait for values yet to be submitted. (This restriction accommodates
implementations like the vulkan
backend’s FencePool
that must
allocate a distinct synchronization object for each fence value one is
able to wait for.)
Calling wait
with a lower FenceValue
than fence
’s current value
returns immediately.