Struct bevy::render::render_resource::PipelineCache
pub struct PipelineCache { /* private fields */ }
Expand description
Cache for render and compute pipelines.
The cache stores existing render and compute pipelines allocated on the GPU, as well as
pending creation. Pipelines inserted into the cache are identified by a unique ID, which
can be used to retrieve the actual GPU object once it’s ready. The creation of the GPU
pipeline object is deferred to the RenderSet::Render
step, just before the render
graph starts being processed, as this requires access to the GPU.
Note that the cache does not perform automatic deduplication of identical pipelines. It is up to the user not to insert the same pipeline twice to avoid wasting GPU resources.
Implementations§
§impl PipelineCache
impl PipelineCache
pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline>
pub fn pipelines(&self) -> impl Iterator<Item = &CachedPipeline>
Returns an iterator over the pipelines in the pipeline cache.
pub fn waiting_pipelines(&self) -> impl Iterator<Item = usize>
pub fn waiting_pipelines(&self) -> impl Iterator<Item = usize>
Returns a iterator of the IDs of all currently waiting pipelines.
pub fn new(
device: RenderDevice,
render_adapter: RenderAdapter,
synchronous_pipeline_compilation: bool
) -> PipelineCache
pub fn new( device: RenderDevice, render_adapter: RenderAdapter, synchronous_pipeline_compilation: bool ) -> PipelineCache
Create a new pipeline cache associated with the given render device.
pub fn get_render_pipeline_state(
&self,
id: CachedRenderPipelineId
) -> &CachedPipelineState
pub fn get_render_pipeline_state( &self, id: CachedRenderPipelineId ) -> &CachedPipelineState
Get the state of a cached render pipeline.
pub fn get_compute_pipeline_state(
&self,
id: CachedComputePipelineId
) -> &CachedPipelineState
pub fn get_compute_pipeline_state( &self, id: CachedComputePipelineId ) -> &CachedPipelineState
Get the state of a cached compute pipeline.
Examples found in repository?
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
fn update(&mut self, world: &mut World) {
let pipeline = world.resource::<GameOfLifePipeline>();
let pipeline_cache = world.resource::<PipelineCache>();
// if the corresponding pipeline has loaded, transition to the next stage
match self.state {
GameOfLifeState::Loading => {
if let CachedPipelineState::Ok(_) =
pipeline_cache.get_compute_pipeline_state(pipeline.init_pipeline)
{
self.state = GameOfLifeState::Init;
}
}
GameOfLifeState::Init => {
if let CachedPipelineState::Ok(_) =
pipeline_cache.get_compute_pipeline_state(pipeline.update_pipeline)
{
self.state = GameOfLifeState::Update(1);
}
}
GameOfLifeState::Update(0) => {
self.state = GameOfLifeState::Update(1);
}
GameOfLifeState::Update(1) => {
self.state = GameOfLifeState::Update(0);
}
GameOfLifeState::Update(_) => unreachable!(),
}
}
pub fn get_render_pipeline_descriptor(
&self,
id: CachedRenderPipelineId
) -> &RenderPipelineDescriptor
pub fn get_render_pipeline_descriptor( &self, id: CachedRenderPipelineId ) -> &RenderPipelineDescriptor
Get the render pipeline descriptor a cached render pipeline was inserted from.
pub fn get_compute_pipeline_descriptor(
&self,
id: CachedComputePipelineId
) -> &ComputePipelineDescriptor
pub fn get_compute_pipeline_descriptor( &self, id: CachedComputePipelineId ) -> &ComputePipelineDescriptor
Get the compute pipeline descriptor a cached render pipeline was inserted from.
pub fn get_render_pipeline(
&self,
id: CachedRenderPipelineId
) -> Option<&RenderPipeline>
pub fn get_render_pipeline( &self, id: CachedRenderPipelineId ) -> Option<&RenderPipeline>
Try to retrieve a render pipeline GPU object from a cached ID.
§Returns
This method returns a successfully created render pipeline if any, or None
if the pipeline
was not created yet or if there was an error during creation. You can check the actual creation
state with PipelineCache::get_render_pipeline_state()
.
Examples found in repository?
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(view_target, _post_process_settings): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
// Get the pipeline resource that contains the global data we need
// to create the render pipeline
let post_process_pipeline = world.resource::<PostProcessPipeline>();
// The pipeline cache is a cache of all previously created pipelines.
// It is required to avoid creating a new pipeline each frame,
// which is expensive due to shader compilation.
let pipeline_cache = world.resource::<PipelineCache>();
// Get the pipeline from the cache
let Some(pipeline) = pipeline_cache.get_render_pipeline(post_process_pipeline.pipeline_id)
else {
return Ok(());
};
// Get the settings uniform binding
let settings_uniforms = world.resource::<ComponentUniforms<PostProcessSettings>>();
let Some(settings_binding) = settings_uniforms.uniforms().binding() else {
return Ok(());
};
// This will start a new "post process write", obtaining two texture
// views from the view target - a `source` and a `destination`.
// `source` is the "current" main texture and you _must_ write into
// `destination` because calling `post_process_write()` on the
// [`ViewTarget`] will internally flip the [`ViewTarget`]'s main
// texture to the `destination` texture. Failing to do so will cause
// the current main texture information to be lost.
let post_process = view_target.post_process_write();
// The bind_group gets created each frame.
//
// Normally, you would create a bind_group in the Queue set,
// but this doesn't work with the post_process_write().
// The reason it doesn't work is because each post_process_write will alternate the source/destination.
// The only way to have the correct source/destination for the bind_group
// is to make sure you get it during the node execution.
let bind_group = render_context.render_device().create_bind_group(
"post_process_bind_group",
&post_process_pipeline.layout,
// It's important for this to match the BindGroupLayout defined in the PostProcessPipeline
&BindGroupEntries::sequential((
// Make sure to use the source view
post_process.source,
// Use the sampler created for the pipeline
&post_process_pipeline.sampler,
// Set the settings binding
settings_binding.clone(),
)),
);
// Begin the render pass
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("post_process_pass"),
color_attachments: &[Some(RenderPassColorAttachment {
// We need to specify the post process destination view here
// to make sure we write to the appropriate texture.
view: post_process.destination,
resolve_target: None,
ops: Operations::default(),
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
// This is mostly just wgpu boilerplate for drawing a fullscreen triangle,
// using the pipeline/bind_group created above
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(0, &bind_group, &[]);
render_pass.draw(0..3, 0..1);
Ok(())
}
pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId)
pub fn block_on_render_pipeline(&mut self, id: CachedRenderPipelineId)
Wait for a render pipeline to finish compiling.
pub fn get_compute_pipeline(
&self,
id: CachedComputePipelineId
) -> Option<&ComputePipeline>
pub fn get_compute_pipeline( &self, id: CachedComputePipelineId ) -> Option<&ComputePipeline>
Try to retrieve a compute pipeline GPU object from a cached ID.
§Returns
This method returns a successfully created compute pipeline if any, or None
if the pipeline
was not created yet or if there was an error during creation. You can check the actual creation
state with PipelineCache::get_compute_pipeline_state()
.
Examples found in repository?
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
fn run(
&self,
_graph: &mut render_graph::RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), render_graph::NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline = world.resource::<ComputePipeline>();
let bind_group = world.resource::<GpuBufferBindGroup>();
if let Some(init_pipeline) = pipeline_cache.get_compute_pipeline(pipeline.pipeline) {
let mut pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("GPU readback compute pass"),
..default()
});
pass.set_bind_group(0, &bind_group.0, &[]);
pass.set_pipeline(init_pipeline);
pass.dispatch_workgroups(BUFFER_LEN as u32, 1, 1);
}
// Copy the gpu accessible buffer to the cpu accessible buffer
let buffers = world.resource::<Buffers>();
render_context.command_encoder().copy_buffer_to_buffer(
&buffers.gpu_buffer,
0,
&buffers.cpu_buffer,
0,
(BUFFER_LEN * std::mem::size_of::<u32>()) as u64,
);
Ok(())
}
More examples
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
fn run(
&self,
_graph: &mut render_graph::RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), render_graph::NodeRunError> {
let bind_groups = &world.resource::<GameOfLifeImageBindGroups>().0;
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline = world.resource::<GameOfLifePipeline>();
let mut pass = render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor::default());
// select the pipeline based on the current state
match self.state {
GameOfLifeState::Loading => {}
GameOfLifeState::Init => {
let init_pipeline = pipeline_cache
.get_compute_pipeline(pipeline.init_pipeline)
.unwrap();
pass.set_bind_group(0, &bind_groups[0], &[]);
pass.set_pipeline(init_pipeline);
pass.dispatch_workgroups(SIZE.0 / WORKGROUP_SIZE, SIZE.1 / WORKGROUP_SIZE, 1);
}
GameOfLifeState::Update(index) => {
let update_pipeline = pipeline_cache
.get_compute_pipeline(pipeline.update_pipeline)
.unwrap();
pass.set_bind_group(0, &bind_groups[index], &[]);
pass.set_pipeline(update_pipeline);
pass.dispatch_workgroups(SIZE.0 / WORKGROUP_SIZE, SIZE.1 / WORKGROUP_SIZE, 1);
}
}
Ok(())
}
pub fn queue_render_pipeline(
&self,
descriptor: RenderPipelineDescriptor
) -> CachedRenderPipelineId
pub fn queue_render_pipeline( &self, descriptor: RenderPipelineDescriptor ) -> CachedRenderPipelineId
Insert a render pipeline into the cache, and queue its creation.
The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with an already cached pipeline.
§Returns
This method returns the unique render shader ID of the cached pipeline, which can be used to query
the caching state with get_render_pipeline_state()
and to retrieve the created GPU pipeline once
it’s ready with get_render_pipeline()
.
Examples found in repository?
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
// We need to define the bind group layout used for our pipeline
let layout = render_device.create_bind_group_layout(
"post_process_bind_group_layout",
&BindGroupLayoutEntries::sequential(
// The layout entries will only be visible in the fragment stage
ShaderStages::FRAGMENT,
(
// The screen texture
texture_2d(TextureSampleType::Float { filterable: true }),
// The sampler that will be used to sample the screen texture
sampler(SamplerBindingType::Filtering),
// The settings uniform that will control the effect
uniform_buffer::<PostProcessSettings>(false),
),
),
);
// We can create the sampler here since it won't change at runtime and doesn't depend on the view
let sampler = render_device.create_sampler(&SamplerDescriptor::default());
// Get the shader handle
let shader = world.load_asset("shaders/post_processing.wgsl");
let pipeline_id = world
.resource_mut::<PipelineCache>()
// This will add the pipeline to the cache and queue it's creation
.queue_render_pipeline(RenderPipelineDescriptor {
label: Some("post_process_pipeline".into()),
layout: vec![layout.clone()],
// This will setup a fullscreen triangle for the vertex state
vertex: fullscreen_shader_vertex_state(),
fragment: Some(FragmentState {
shader,
shader_defs: vec![],
// Make sure this matches the entry point of your shader.
// It can be anything as long as it matches here and in the shader.
entry_point: "fragment".into(),
targets: vec![Some(ColorTargetState {
format: TextureFormat::bevy_default(),
blend: None,
write_mask: ColorWrites::ALL,
})],
}),
// All of the following properties are not important for this effect so just use the default values.
// This struct doesn't have the Default trait implemented because not all field can have a default value.
primitive: PrimitiveState::default(),
depth_stencil: None,
multisample: MultisampleState::default(),
push_constant_ranges: vec![],
});
Self {
layout,
sampler,
pipeline_id,
}
}
pub fn queue_compute_pipeline(
&self,
descriptor: ComputePipelineDescriptor
) -> CachedComputePipelineId
pub fn queue_compute_pipeline( &self, descriptor: ComputePipelineDescriptor ) -> CachedComputePipelineId
Insert a compute pipeline into the cache, and queue its creation.
The pipeline is always inserted and queued for creation. There is no attempt to deduplicate it with an already cached pipeline.
§Returns
This method returns the unique compute shader ID of the cached pipeline, which can be used to query
the caching state with get_compute_pipeline_state()
and to retrieve the created GPU pipeline once
it’s ready with get_compute_pipeline()
.
Examples found in repository?
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
let layout = render_device.create_bind_group_layout(
None,
&BindGroupLayoutEntries::single(
ShaderStages::COMPUTE,
storage_buffer::<Vec<u32>>(false),
),
);
let shader = world.load_asset("shaders/gpu_readback.wgsl");
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: Some("GPU readback compute shader".into()),
layout: vec![layout.clone()],
push_constant_ranges: Vec::new(),
shader: shader.clone(),
shader_defs: Vec::new(),
entry_point: "main".into(),
});
ComputePipeline { layout, pipeline }
}
More examples
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
let texture_bind_group_layout = render_device.create_bind_group_layout(
"GameOfLifeImages",
&BindGroupLayoutEntries::sequential(
ShaderStages::COMPUTE,
(
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::ReadOnly),
texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly),
),
),
);
let shader = world.load_asset("shaders/game_of_life.wgsl");
let pipeline_cache = world.resource::<PipelineCache>();
let init_pipeline = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: None,
layout: vec![texture_bind_group_layout.clone()],
push_constant_ranges: Vec::new(),
shader: shader.clone(),
shader_defs: vec![],
entry_point: Cow::from("init"),
});
let update_pipeline = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor {
label: None,
layout: vec![texture_bind_group_layout.clone()],
push_constant_ranges: Vec::new(),
shader,
shader_defs: vec![],
entry_point: Cow::from("update"),
});
GameOfLifePipeline {
texture_bind_group_layout,
init_pipeline,
update_pipeline,
}
}
pub fn process_queue(&mut self)
pub fn process_queue(&mut self)
Process the pipeline queue and create all pending pipelines if possible.
This is generally called automatically during the RenderSet::Render
step, but can
be called manually to force creation at a different time.
Trait Implementations§
impl Resource for PipelineCache
Auto Trait Implementations§
impl !Freeze for PipelineCache
impl !RefUnwindSafe for PipelineCache
impl Send for PipelineCache
impl Sync for PipelineCache
impl Unpin for PipelineCache
impl !UnwindSafe for PipelineCache
Blanket Implementations§
§impl<T, U> AsBindGroupShaderType<U> for T
impl<T, U> AsBindGroupShaderType<U> for T
§fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
T
ShaderType
for self
. When used in AsBindGroup
derives, it is safe to assume that all images in self
exist.source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait>
(where Trait: Downcast
) to Box<dyn Any>
. Box<dyn Any>
can
then be further downcast
into Box<ConcreteType>
where ConcreteType
implements Trait
.§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait>
(where Trait: Downcast
) to Rc<Any>
. Rc<Any>
can then be
further downcast
into Rc<ConcreteType>
where ConcreteType
implements Trait
.§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &Any
’s vtable from &Trait
’s.§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &mut Any
’s vtable from &mut Trait
’s.§impl<T> DowncastSync for T
impl<T> DowncastSync for T
§impl<S> FromSample<S> for S
impl<S> FromSample<S> for S
fn from_sample_(s: S) -> S
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
source§impl<T> IntoEither for T
impl<T> IntoEither for T
source§fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moresource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more