[Bf-blender-cvs] [aac6bf99386] temp-gpu-texture-partial-updates: Merge branch 'master' into temp-gpu-texture-partial-updates
Jeroen Bakker
noreply at git.blender.org
Fri Nov 26 13:29:43 CET 2021
Commit: aac6bf993863432d31b216869c8383e77caf9699
Author: Jeroen Bakker
Date: Fri Nov 26 08:28:52 2021 +0100
Branches: temp-gpu-texture-partial-updates
https://developer.blender.org/rBaac6bf993863432d31b216869c8383e77caf9699
Merge branch 'master' into temp-gpu-texture-partial-updates
===================================================================
===================================================================
diff --cc source/blender/blenkernel/CMakeLists.txt
index 19901b251d8,16e00fe12fa..cc107f16942
--- a/source/blender/blenkernel/CMakeLists.txt
+++ b/source/blender/blenkernel/CMakeLists.txt
@@@ -164,9 -164,8 +164,9 @@@ set(SR
intern/idprop_utils.c
intern/idtype.c
intern/image.c
+ intern/image_partial_update.cc
intern/image_gen.c
- intern/image_gpu.c
+ intern/image_gpu.cc
intern/image_save.c
intern/ipo.c
intern/kelvinlet.c
diff --cc source/blender/blenkernel/intern/image_gpu.cc
index 749ce99554d,fa2c17826d5..68393489150
--- a/source/blender/blenkernel/intern/image_gpu.cc
+++ b/source/blender/blenkernel/intern/image_gpu.cc
@@@ -329,65 -337,6 +329,65 @@@ static void image_update_reusable_textu
}
}
+static void image_gpu_texture_partial_update_changes_available(Image *image, ImageUser *iuser)
+{
+ PartialUpdateRegion changed_region;
+ int last_tile_number = -1;
- ImBuf *tile_buffer = NULL;
- ImageTile *tile = NULL;
- ImageUser tile_user = {0};
++ ImBuf *tile_buffer = nullptr;
++ ImageTile *tile = nullptr;
++ ImageUser tile_user = {nullptr};
+ if (iuser) {
+ tile_user = *iuser;
+ }
+
+ while (BKE_image_partial_update_get_next_change(image->runtime.partial_update_user,
+ &changed_region) ==
+ PARTIAL_UPDATE_ITER_CHANGE_AVAILABLE) {
+ if (last_tile_number != changed_region.tile_number) {
+ if (tile_buffer) {
- BKE_image_release_ibuf(image, tile_buffer, NULL);
- tile_buffer = NULL;
++ BKE_image_release_ibuf(image, tile_buffer, nullptr);
++ tile_buffer = nullptr;
+ }
+ tile_user.tile = changed_region.tile_number;
+ tile = BKE_image_get_tile(image, changed_region.tile_number);
- tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, NULL);
++ tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, nullptr);
+ last_tile_number = changed_region.tile_number;
+ }
+
+ const int tile_offset_x = changed_region.region.xmin;
+ const int tile_offset_y = changed_region.region.ymin;
+ const int tile_width = min_ii(tile_buffer->x, BLI_rcti_size_x(&changed_region.region));
+ const int tile_height = min_ii(tile_buffer->y, BLI_rcti_size_y(&changed_region.region));
+ image_update_gputexture_ex(
+ image, tile, tile_buffer, tile_offset_x, tile_offset_y, tile_width, tile_height);
+ }
+
+ if (tile_buffer) {
- BKE_image_release_ibuf(image, tile_buffer, NULL);
++ BKE_image_release_ibuf(image, tile_buffer, nullptr);
+ }
+}
+
+static void image_gpu_texture_try_partial_update(Image *image, ImageUser *iuser)
+{
+
+ switch (BKE_image_partial_update_collect_changes(image, image->runtime.partial_update_user)) {
+ case PARTIAL_UPDATE_NEED_FULL_UPDATE: {
+ image_free_gpu(image, true);
+ break;
+ }
+
+ case PARTIAL_UPDATE_CHANGES_AVAILABLE: {
+ image_gpu_texture_partial_update_changes_available(image, iuser);
+ break;
+ }
+
+ case PARTIAL_UPDATE_NO_CHANGES: {
+ /* GPUTextures are up to date. */
+ break;
+ }
+ }
+}
+
static GPUTexture *image_get_gpu_texture(Image *ima,
ImageUser *iuser,
ImBuf *ibuf,
@@@ -421,20 -370,31 +421,20 @@@
}
#undef GPU_FLAGS_TO_CHECK
- /* Check if image has been updated and tagged to be updated (full or partial). */
- ImageTile *tile = BKE_image_get_tile(ima, 0);
- if (((ima->gpuflag & IMA_GPU_REFRESH) != 0) ||
- ((ibuf == nullptr || tile == nullptr) && ((ima->gpuflag & IMA_GPU_PARTIAL_REFRESH) != 0))) {
- image_free_gpu(ima, true);
- BLI_freelistN(&ima->gpu_refresh_areas);
- ima->gpuflag &= ~(IMA_GPU_REFRESH | IMA_GPU_PARTIAL_REFRESH);
- }
- else if (ima->gpuflag & IMA_GPU_PARTIAL_REFRESH) {
- BLI_assert(ibuf);
- BLI_assert(tile);
- ImagePartialRefresh *refresh_area;
- while ((
- refresh_area = static_cast<ImagePartialRefresh *>(BLI_pophead(&ima->gpu_refresh_areas)))) {
- const int tile_offset_x = refresh_area->tile_x * IMA_PARTIAL_REFRESH_TILE_SIZE;
- const int tile_offset_y = refresh_area->tile_y * IMA_PARTIAL_REFRESH_TILE_SIZE;
- const int tile_width = MIN2(IMA_PARTIAL_REFRESH_TILE_SIZE, ibuf->x - tile_offset_x);
- const int tile_height = MIN2(IMA_PARTIAL_REFRESH_TILE_SIZE, ibuf->y - tile_offset_y);
- image_update_gputexture_ex(
- ima, tile, ibuf, tile_offset_x, tile_offset_y, tile_width, tile_height);
- MEM_freeN(refresh_area);
- }
- ima->gpuflag &= ~IMA_GPU_PARTIAL_REFRESH;
+ /* TODO(jbakker): We should replace the IMA_GPU_REFRESH flag with a call to
+ * BKE_image-partial_update_mark_full_update. Although the flag is quicker it leads to double
+ * administration. */
+ if ((ima->gpuflag & IMA_GPU_REFRESH) != 0) {
+ BKE_image_partial_update_mark_full_update(ima);
+ ima->gpuflag &= ~IMA_GPU_REFRESH;
}
- if (ima->runtime.partial_update_user == NULL) {
++ if (ima->runtime.partial_update_user == nullptr) {
+ ima->runtime.partial_update_user = BKE_image_partial_update_create(ima);
+ }
+
+ image_gpu_texture_try_partial_update(ima, iuser);
+
/* Tag as in active use for garbage collector. */
BKE_image_tag_time(ima);
@@@ -457,18 -417,18 +457,18 @@@
/* Check if we have a valid image. If not, we return a dummy
* texture with zero bind-code so we don't keep trying. */
+ ImageTile *tile = BKE_image_get_tile(ima, 0);
- if (tile == NULL) {
+ if (tile == nullptr) {
*tex = image_gpu_texture_error_create(textarget);
return *tex;
}
/* check if we have a valid image buffer */
ImBuf *ibuf_intern = ibuf;
- if (ibuf_intern == NULL) {
- ibuf_intern = BKE_image_acquire_ibuf(ima, iuser, NULL);
- if (ibuf_intern == NULL) {
+ if (ibuf_intern == nullptr) {
+ ibuf_intern = BKE_image_acquire_ibuf(ima, iuser, nullptr);
+ if (ibuf_intern == nullptr) {
- *tex = image_gpu_texture_error_create(textarget);
- return *tex;
+ return image_gpu_texture_error_create(textarget);
}
}
@@@ -521,10 -486,6 +521,10 @@@
GPU_texture_orig_size_set(*tex, ibuf_intern->x, ibuf_intern->y);
}
+ if (ibuf != ibuf_intern) {
- BKE_image_release_ibuf(ima, ibuf_intern, NULL);
++ BKE_image_release_ibuf(ima, ibuf_intern, nullptr);
+ }
+
return *tex;
}
@@@ -944,33 -905,91 +944,33 @@@ static void image_update_gputexture_ex
* quicker than fully updating the texture for high resolution images. */
void BKE_image_update_gputexture(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
{
+ ImageTile *image_tile = BKE_image_get_tile_from_iuser(ima, iuser);
- ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, NULL);
+ ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, nullptr);
- ImageTile *tile = BKE_image_get_tile_from_iuser(ima, iuser);
-
- if ((ibuf == nullptr) || (w == 0) || (h == 0)) {
- /* Full reload of texture. */
- BKE_image_free_gputextures(ima);
- }
- image_update_gputexture_ex(ima, tile, ibuf, x, y, w, h);
+ BKE_image_update_gputexture_delayed(ima, image_tile, ibuf, x, y, w, h);
- BKE_image_release_ibuf(ima, ibuf, NULL);
+ BKE_image_release_ibuf(ima, ibuf, nullptr);
}
/* Mark areas on the GPUTexture that needs to be updated. The areas are marked in chunks.
* The next time the GPUTexture is used these tiles will be refreshes. This saves time
* when writing to the same place multiple times This happens for during foreground
* rendering. */
-void BKE_image_update_gputexture_delayed(
- struct Image *ima, struct ImBuf *ibuf, int x, int y, int w, int h)
+void BKE_image_update_gputexture_delayed(struct Image *ima,
+ struct ImageTile *image_tile,
+ struct ImBuf *ibuf,
+ int x,
+ int y,
+ int w,
+ int h)
{
/* Check for full refresh. */
- if (ibuf != NULL && ima->source != IMA_SRC_TILED && x == 0 && y == 0 && w == ibuf->x &&
- if (ibuf && x == 0 && y == 0 && w == ibuf->x && h == ibuf->y) {
- ima->gpuflag |= IMA_GPU_REFRESH;
- }
- /* Check if we can promote partial refresh to a full refresh. */
- if ((ima->gpuflag & (IMA_GPU_REFRESH | IMA_GPU_PARTIAL_REFRESH)) ==
- (IMA_GPU_REFRESH | IMA_GPU_PARTIAL_REFRESH)) {
- ima->gpuflag &= ~IMA_GPU_PARTIAL_REFRESH;
- BLI_freelistN(&ima->gpu_refresh_areas);
- }
- /* Image is already marked for complete refresh. */
- if (ima->gpuflag & IMA_GPU_REFRESH) {
- return;
- }
-
- /* Schedule the tiles that covers the requested area. */
- const int start_tile_x = x / IMA_PARTIAL_REFRESH_TILE_SIZE;
- const int start_tile_y = y / IMA_PARTIAL_REFRESH_TILE_SIZE;
- const int end_tile_x = (x + w) / IMA_PARTIAL_REFRESH_TILE_SIZE;
- const int end_tile_y = (y + h) / IMA_PARTIAL_REFRESH_TILE_SIZE;
- const int num_tiles_x = (end_tile_x + 1) - (start_tile_x);
- const int num_tiles_y = (end_tile_y + 1) - (start_tile_y);
- const int num_tiles = num_tiles_x * num_tiles_y;
- const bool allocate_on_heap = BLI_BITMAP_SIZE(num_tiles) > 16;
- BLI_bitmap *requested_tiles = nullptr;
- if (allocate_on_heap) {
- requested_tiles = BLI_BITMAP_NEW(num_tiles, __func__);
++ if (ibuf != nullptr && ima->source != IMA_SRC_TILED && x == 0 && y == 0 && w == ibuf->x &&
+ h == ibuf->y) {
+ BKE_image_partial_update_mark_full_update(ima);
}
else {
- requested_tiles = BLI_BITMAP_NEW_ALLOCA(num_tiles);
- }
-
- /* Mark the tiles that have already been requested. They don't need to be requested again. */
- int num_tiles_not_scheduled = num_tiles;
- LISTBASE_FOREACH (ImagePartialRefresh *, area, &ima->gpu_refresh_areas) {
- if (area->tile_x < start_tile_x || area->tile_x > end_tile_x || area->tile_y < start_tile_y ||
- area->tile_y > end_tile_y) {
- continue;
- }
- int requested_tile_index = (area->tile_x - start_tile_x) +
-
@@ Diff output truncated at 10240 characters. @@
More information about the Bf-blender-cvs
mailing list