Struct bevy::render::render_resource::Buffer
pub struct Buffer { /* private fields */ }
Implementations§
§impl Buffer
impl Buffer
pub fn id(&self) -> BufferId
pub fn slice(&self, bounds: impl RangeBounds<u64>) -> BufferSlice<'_>
pub fn slice(&self, bounds: impl RangeBounds<u64>) -> BufferSlice<'_>
Examples found in repository?
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
fn render<'w>(
item: &P,
_view: (),
instance_buffer: Option<&'w InstanceBuffer>,
(meshes, render_mesh_instances): SystemParamItem<'w, '_, Self::Param>,
pass: &mut TrackedRenderPass<'w>,
) -> RenderCommandResult {
let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(item.entity())
else {
return RenderCommandResult::Failure;
};
let Some(gpu_mesh) = meshes.into_inner().get(mesh_instance.mesh_asset_id) else {
return RenderCommandResult::Failure;
};
let Some(instance_buffer) = instance_buffer else {
return RenderCommandResult::Failure;
};
pass.set_vertex_buffer(0, gpu_mesh.vertex_buffer.slice(..));
pass.set_vertex_buffer(1, instance_buffer.buffer.slice(..));
match &gpu_mesh.buffer_info {
GpuBufferInfo::Indexed {
buffer,
index_format,
count,
} => {
pass.set_index_buffer(buffer.slice(..), 0, *index_format);
pass.draw_indexed(0..*count, 0, 0..instance_buffer.length as u32);
}
GpuBufferInfo::NonIndexed => {
pass.draw(0..gpu_mesh.vertex_count, 0..instance_buffer.length as u32);
}
}
RenderCommandResult::Success
}
More examples
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
fn map_and_read_buffer(
render_device: Res<RenderDevice>,
buffers: Res<Buffers>,
sender: Res<RenderWorldSender>,
) {
// Finally time to get our data back from the gpu.
// First we get a buffer slice which represents a chunk of the buffer (which we
// can't access yet).
// We want the whole thing so use unbounded range.
let buffer_slice = buffers.cpu_buffer.slice(..);
// Now things get complicated. WebGPU, for safety reasons, only allows either the GPU
// or CPU to access a buffer's contents at a time. We need to "map" the buffer which means
// flipping ownership of the buffer over to the CPU and making access legal. We do this
// with `BufferSlice::map_async`.
//
// The problem is that map_async is not an async function so we can't await it. What
// we need to do instead is pass in a closure that will be executed when the slice is
// either mapped or the mapping has failed.
//
// The problem with this is that we don't have a reliable way to wait in the main
// code for the buffer to be mapped and even worse, calling get_mapped_range or
// get_mapped_range_mut prematurely will cause a panic, not return an error.
//
// Using channels solves this as awaiting the receiving of a message from
// the passed closure will force the outside code to wait. It also doesn't hurt
// if the closure finishes before the outside code catches up as the message is
// buffered and receiving will just pick that up.
//
// It may also be worth noting that although on native, the usage of asynchronous
// channels is wholly unnecessary, for the sake of portability to WASM
// we'll use async channels that work on both native and WASM.
let (s, r) = crossbeam_channel::unbounded::<()>();
// Maps the buffer so it can be read on the cpu
buffer_slice.map_async(MapMode::Read, move |r| match r {
// This will execute once the gpu is ready, so after the call to poll()
Ok(_) => s.send(()).expect("Failed to send map update"),
Err(err) => panic!("Failed to map buffer {err}"),
});
// In order for the mapping to be completed, one of three things must happen.
// One of those can be calling `Device::poll`. This isn't necessary on the web as devices
// are polled automatically but natively, we need to make sure this happens manually.
// `Maintain::Wait` will cause the thread to wait on native but not on WebGpu.
// This blocks until the gpu is done executing everything
render_device.poll(Maintain::wait()).panic_on_timeout();
// This blocks until the buffer is mapped
r.recv().expect("Failed to receive the map_async message");
{
let buffer_view = buffer_slice.get_mapped_range();
let data = buffer_view
.chunks(std::mem::size_of::<u32>())
.map(|chunk| u32::from_ne_bytes(chunk.try_into().expect("should be a u32")))
.collect::<Vec<u32>>();
sender
.send(data)
.expect("Failed to send data to main world");
}
// We need to make sure all `BufferView`'s are dropped before we do what we're about
// to do.
// Unmap so that we can copy to the staging buffer in the next iteration.
buffers.cpu_buffer.unmap();
}
pub fn unmap(&self)
pub fn unmap(&self)
Examples found in repository?
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
fn map_and_read_buffer(
render_device: Res<RenderDevice>,
buffers: Res<Buffers>,
sender: Res<RenderWorldSender>,
) {
// Finally time to get our data back from the gpu.
// First we get a buffer slice which represents a chunk of the buffer (which we
// can't access yet).
// We want the whole thing so use unbounded range.
let buffer_slice = buffers.cpu_buffer.slice(..);
// Now things get complicated. WebGPU, for safety reasons, only allows either the GPU
// or CPU to access a buffer's contents at a time. We need to "map" the buffer which means
// flipping ownership of the buffer over to the CPU and making access legal. We do this
// with `BufferSlice::map_async`.
//
// The problem is that map_async is not an async function so we can't await it. What
// we need to do instead is pass in a closure that will be executed when the slice is
// either mapped or the mapping has failed.
//
// The problem with this is that we don't have a reliable way to wait in the main
// code for the buffer to be mapped and even worse, calling get_mapped_range or
// get_mapped_range_mut prematurely will cause a panic, not return an error.
//
// Using channels solves this as awaiting the receiving of a message from
// the passed closure will force the outside code to wait. It also doesn't hurt
// if the closure finishes before the outside code catches up as the message is
// buffered and receiving will just pick that up.
//
// It may also be worth noting that although on native, the usage of asynchronous
// channels is wholly unnecessary, for the sake of portability to WASM
// we'll use async channels that work on both native and WASM.
let (s, r) = crossbeam_channel::unbounded::<()>();
// Maps the buffer so it can be read on the cpu
buffer_slice.map_async(MapMode::Read, move |r| match r {
// This will execute once the gpu is ready, so after the call to poll()
Ok(_) => s.send(()).expect("Failed to send map update"),
Err(err) => panic!("Failed to map buffer {err}"),
});
// In order for the mapping to be completed, one of three things must happen.
// One of those can be calling `Device::poll`. This isn't necessary on the web as devices
// are polled automatically but natively, we need to make sure this happens manually.
// `Maintain::Wait` will cause the thread to wait on native but not on WebGpu.
// This blocks until the gpu is done executing everything
render_device.poll(Maintain::wait()).panic_on_timeout();
// This blocks until the buffer is mapped
r.recv().expect("Failed to receive the map_async message");
{
let buffer_view = buffer_slice.get_mapped_range();
let data = buffer_view
.chunks(std::mem::size_of::<u32>())
.map(|chunk| u32::from_ne_bytes(chunk.try_into().expect("should be a u32")))
.collect::<Vec<u32>>();
sender
.send(data)
.expect("Failed to send data to main world");
}
// We need to make sure all `BufferView`'s are dropped before we do what we're about
// to do.
// Unmap so that we can copy to the staging buffer in the next iteration.
buffers.cpu_buffer.unmap();
}
Methods from Deref<Target = Buffer>§
pub fn as_entire_binding(&self) -> BindingResource<'_>
pub fn as_entire_binding(&self) -> BindingResource<'_>
Return the binding view of the entire buffer.
Examples found in repository?
139 140 141 142 143 144 145 146 147 148 149 150 151
fn prepare_bind_group(
mut commands: Commands,
pipeline: Res<ComputePipeline>,
render_device: Res<RenderDevice>,
buffers: Res<Buffers>,
) {
let bind_group = render_device.create_bind_group(
None,
&pipeline.layout,
&BindGroupEntries::single(buffers.gpu_buffer.as_entire_binding()),
);
commands.insert_resource(GpuBufferBindGroup(bind_group));
}
pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_>
pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_>
Return the binding view of the entire buffer.
pub fn slice<S>(&self, bounds: S) -> BufferSlice<'_>where
S: RangeBounds<u64>,
pub fn slice<S>(&self, bounds: S) -> BufferSlice<'_>where
S: RangeBounds<u64>,
Use only a portion of this Buffer for a given operation. Choosing a range with no end will use the rest of the buffer. Using a totally unbounded range will use the entire buffer.
pub fn unmap(&self)
pub fn unmap(&self)
Flushes any pending write operations and unmaps the buffer from host memory.
pub fn destroy(&self)
pub fn destroy(&self)
Destroy the associated native resources as soon as possible.
pub fn size(&self) -> u64
pub fn size(&self) -> u64
Returns the length of the buffer allocation in bytes.
This is always equal to the size
that was specified when creating the buffer.
pub fn usage(&self) -> BufferUsages
pub fn usage(&self) -> BufferUsages
Returns the allowed usages for this Buffer
.
This is always equal to the usage
that was specified when creating the buffer.
pub fn global_id(&self) -> Id<Buffer>
pub fn global_id(&self) -> Id<Buffer>
Returns a globally-unique identifier for this Buffer
.
Calling this method multiple times on the same object will always return the same value.
The returned value is guaranteed to be different for all resources created from the same Instance
.
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Buffer
impl RefUnwindSafe for Buffer
impl Send for Buffer
impl Sync for Buffer
impl Unpin for Buffer
impl UnwindSafe for Buffer
Blanket Implementations§
§impl<T, U> AsBindGroupShaderType<U> for T
impl<T, U> AsBindGroupShaderType<U> for T
§fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
T
ShaderType
for self
. When used in AsBindGroup
derives, it is safe to assume that all images in self
exist.source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait>
(where Trait: Downcast
) to Box<dyn Any>
. Box<dyn Any>
can
then be further downcast
into Box<ConcreteType>
where ConcreteType
implements Trait
.§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait>
(where Trait: Downcast
) to Rc<Any>
. Rc<Any>
can then be
further downcast
into Rc<ConcreteType>
where ConcreteType
implements Trait
.§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &Any
’s vtable from &Trait
’s.§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &mut Any
’s vtable from &mut Trait
’s.§impl<T> DowncastSync for T
impl<T> DowncastSync for T
§impl<S> FromSample<S> for S
impl<S> FromSample<S> for S
fn from_sample_(s: S) -> S
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
source§impl<T> IntoEither for T
impl<T> IntoEither for T
source§fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moresource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more