[Bf-blender-cvs] [4263484bd4d] asset-engine: Split Amber into pieces!
Bastien Montagne
noreply at git.blender.org
Thu Jul 20 15:47:54 CEST 2017
Commit: 4263484bd4dda788e9dd124d7cac269fe988d62c
Author: Bastien Montagne
Date: Thu Jul 20 15:47:03 2017 +0200
Branches: asset-engine
https://developer.blender.org/rB4263484bd4dda788e9dd124d7cac269fe988d62c
Split Amber into pieces!
===================================================================
M release/scripts/startup/bl_operators/amber/__init__.py
A release/scripts/startup/bl_operators/amber/engine.py
A release/scripts/startup/bl_operators/amber/operators.py
A release/scripts/startup/bl_operators/amber/ui.py
A release/scripts/startup/bl_operators/amber/utils.py
===================================================================
diff --git a/release/scripts/startup/bl_operators/amber/__init__.py b/release/scripts/startup/bl_operators/amber/__init__.py
index 4d791a45423..5c2f91a3a9b 100644
--- a/release/scripts/startup/bl_operators/amber/__init__.py
+++ b/release/scripts/startup/bl_operators/amber/__init__.py
@@ -21,779 +21,11 @@
# Note: This will be a simple addon later, but until it gets to master, it's simpler to have it
# as a startup module!
-import bpy
-from bpy.types import (
- AssetEngine,
- Panel,
- PropertyGroup,
- UIList,
- )
-from bpy.props import (
- StringProperty,
- BoolProperty,
- IntProperty,
- FloatProperty,
- EnumProperty,
- CollectionProperty,
- )
-import binascii
-import concurrent.futures as futures
-import hashlib
-import json
-import os
-import stat
-import struct
-import time
-import random
+from . import (engine, operators, ui)
-AMBER_DB_NAME = "__amber_db.json"
-AMBER_DBK_VERSION = "version"
-
-
-##########
-# Helpers.
-
-# Notes about UUIDs:
-# * UUID of an asset/variant/revision is computed once at its creation! Later changes to data do not affect it.
-# * Collision, for unlikely it is, may happen across different repositories...
-# Doubt this will be practical issue though.
-# * We keep eight first bytes of 'clear' identifier, to (try to) keep some readable uuid.
-
-def _uuid_gen_single(used_uuids, uuid_root, h, str_arg):
- h.update(str_arg.encode())
- uuid = uuid_root + h.digest()
- uuid = uuid[:23].replace(b'\0', b'\1') # No null chars, RNA 'bytes' use them as in regular strings... :/
- if uuid not in used_uuids: # *Very* likely, but...
- used_uuids.add(uuid)
- return uuid
- return None
-
-
-def _uuid_gen(used_uuids, uuid_root, bytes_seed, *str_args):
- h = hashlib.md5(bytes_seed)
- for arg in str_args:
- uuid = _uuid_gen_single(used_uuids, uuid_root, h, arg)
- if uuid is not None:
- return uuid
- # This is a fallback in case we'd get a collision... Should never be needed in real life!
- for i in range(100000):
- uuid = _uuid_gen_single(used_uuids, uuid_root, h, i.to_bytes(4, 'little'))
- if uuid is not None:
- return uuid
- return None # If this happens...
-
-
-def uuid_asset_gen(used_uuids, path_db, name, tags):
- uuid_root = name.encode()[:8] + b'|'
- return _uuid_gen_single(used_uuids, uuid_root, path_db.encode(), name, *tags)
-
-
-def uuid_variant_gen(used_uuids, asset_uuid, name):
- uuid_root = name.encode()[:8] + b'|'
- return _uuid_gen_single(used_uuids, uuid_root, asset_uuid, name)
-
-
-def uuid_revision_gen(used_uuids, variant_uuid, number, size, time):
- uuid_root = str(number).encode() + b'|'
- return _uuid_gen_single(used_uuids, uuid_root, variant_uuid, str(number), str(size), str(timestamp))
-
-
-def uuid_unpack_bytes(uuid_bytes):
- return struct.unpack("!iiii", uuid_bytes.ljust(16, b'\0'))
-
-
-def uuid_unpack(uuid_hexstr):
- return uuid_unpack_bytes(binascii.unhexlify(uuid_hexstr))
-
-
-def uuid_unpack_asset(uuid_repo_hexstr, uuid_asset_hexstr):
- return uuid_unpack_bytes(binascii.unhexlify(uuid_repo_hexstr).ljust(8, b'\0') +
- binascii.unhexlify(uuid_asset_hexstr).ljust(8, b'\0'))
-
-
-def uuid_pack(uuid_iv4):
- print(uuid_iv4)
- return binascii.hexlify(struct.pack("!iiii", *uuid_iv4))
-
-
-# XXX Hack, once this becomes a real addon we'll just use addons' config system, for now store that in some own config.
-amber_repos_path = os.path.join(bpy.utils.user_resource('CONFIG', create=True), "amber_repos.json")
-amber_repos = None
-if not os.path.exists(amber_repos_path):
- with open(amber_repos_path, 'w') as ar_f:
- json.dump({}, ar_f)
-with open(amber_repos_path, 'r') as ar_f:
- amber_repos = {uuid_unpack(uuid): path for uuid, path in json.load(ar_f).items()}
-assert(amber_repos != None)
-
-
-def save_amber_repos():
- ar = {uuid_pack(uuid).decode(): path for uuid, path in amber_repos.items()}
- with open(amber_repos_path, 'w') as ar_f:
- json.dump(ar, ar_f)
-
-
-#############
-# Amber Jobs.
-class AmberJob:
- def __init__(self, executor, job_id):
- self.executor = executor
- self.job_id = job_id
- self.status = {'VALID'}
- self.progress = 0.0
-
-
-class AmberJobList(AmberJob):
- @staticmethod
- def ls_repo(db_path):
- repo = None
- with open(db_path, 'r') as db_f:
- repo = json.load(db_f)
- if isinstance(repo, dict):
- repo_ver = repo.get(AMBER_DBK_VERSION, "")
- if repo_ver != "1.0.1":
- # Unsupported...
- print("WARNING: unsupported Amber repository version '%s'." % repo_ver)
- repo = None
- else:
- repo = None
- if repo is not None:
- # Convert hexa string to array of four uint32...
- # XXX will have to check endianess mess here, for now always use same one ('network' one).
- repo_uuid = repo["uuid"]
- repo["uuid"] = uuid_unpack(repo_uuid)
- new_entries = {}
- for euuid, e in repo["entries"].items():
- new_variants = {}
- for vuuid, v in e["variants"].items():
- new_revisions = {}
- for ruuid, r in v["revisions"].items():
- new_revisions[uuid_unpack(ruuid)] = r
- new_variants[uuid_unpack(vuuid)] = v
- v["revisions"] = new_revisions
- ruuid = v["revision_default"]
- v["revision_default"] = uuid_unpack(ruuid)
- new_entries[uuid_unpack_asset(repo_uuid, euuid)] = e
- e["variants"] = new_variants
- vuuid = e["variant_default"]
- e["variant_default"] = uuid_unpack(vuuid)
- repo["entries"] = new_entries
- #~ print(repo)
- return repo
-
- @staticmethod
- def ls(path):
- repo = None
- ret = [".."]
- tmp = os.listdir(path)
- if AMBER_DB_NAME in tmp:
- # That dir is an Amber repo, we only list content define by our amber 'db'.
- repo = AmberJobList.ls_repo(os.path.join(path, AMBER_DB_NAME))
- if repo is None:
- ret += tmp
- #~ time.sleep(0.1) # 100% Artificial Lag (c)
- return ret, repo
-
- @staticmethod
- def stat(root, path):
- st = os.lstat(root + path)
- #~ time.sleep(0.1) # 100% Artificial Lag (c)
- return path, (stat.S_ISDIR(st.st_mode), st.st_size, st.st_mtime)
-
- def start(self):
- self.nbr = 0
- self.tot = 0
- self.ls_task = self.executor.submit(self.ls, self.root)
- self.status = {'VALID', 'RUNNING'}
-
- def update(self, repository, dirs):
- self.status = {'VALID', 'RUNNING'}
- if self.ls_task is not None:
- if not self.ls_task.done():
- return
- paths, repo = self.ls_task.result()
- self.ls_task = None
- self.tot = len(paths)
- repository.clear()
- dirs.clear()
- if repo is not None:
- repository.update(repo)
- for p in paths:
- self.stat_tasks.add(self.executor.submit(self.stat, self.root, p))
-
- done = set()
- for tsk in self.stat_tasks:
- if tsk.done():
- path, (is_dir, size, timestamp) = tsk.result()
- self.nbr += 1
- if is_dir:
- # We only list dirs from real file system.
- uuid = uuid_unpack_bytes((path.encode()[:8] + b"|" + self.nbr.to_bytes(4, 'little')))
- dirs.append((path, size, timestamp, uuid))
- done.add(tsk)
- self.stat_tasks -= done
-
- self.progress = self.nbr / self.tot
- if not self.stat_tasks and self.ls_task is None:
- self.status = {'VALID'}
-
- def __init__(self, executor, job_id, root):
- super().__init__(executor, job_id)
- self.root = root
-
- self.ls_task = None
- self.stat_tasks = set()
-
- self.start()
-
- def __del__(self):
- # Avoid useless work!
- if self.ls_task is not None:
- self.ls_task.cancel()
- for tsk in self.stat_tasks:
- tsk.cancel()
-
-
-class AmberJobPreviews(AmberJob):
- @staticmethod
- def preview(uuid):
- time.sleep(0.1) # 100% Artificial Lag (c)
- w = random.randint(2, 8)
- h = random.randint(2, 8)
- return [w, h, [random.getrandbits(32) for i in range(w * h)]]
-
- def start(self, uuids):
- self.nbr = 0
- self.preview_tasks = {uuid.uuid_asset[:]: self.executor.submit(self.preview, uuid.uuid_asset[:]) for uuid in uuids.uuids}
- self.tot = len(self.preview_tasks)
- self.status = {'VALID', 'RUNNING'}
-
- def update(self, uuids):
- self.status = {'VALID', 'RUNNING'}
-
- uuids = {uuid.uuid_asset[:]: uuid for uuid in uuids.uuids}
-
- new_uuids = set(uuids)
- old_uuids = set(self.preview_tasks)
- del_uuids = old_uuids - new_uuids
- new_uuids -= old_uuids
-
- for uuid in del_uuids:
- self.preview_tasks[uuid].cancel()
- del self.preview_tasks[uuid]
-
- for uuid in new_uuids:
- self.preview_tasks[uuid] = self.executor.submit(self.preview, uuid)
-
- self.tot = len(self.preview_tasks)
- self.nbr = 0
-
- done_uuids = set()
- for uuid, tsk in self.preview_tasks.items():
- if tsk.done():
- w, h, pixels = tsk.result()
- uuids[uuid].preview_size = (w, h)
- uuids[uuid].preview_pixels = pixels
- self.nbr += 1
- done_uuids.add(uuid)
-
- for uuid in done_uuids:
- del self.preview_tasks[uuid]
-
- self.progress = self.nbr / self.tot
- if not self.preview_tasks:
- self.status = {'VALID'}
-
- def __init__(self, executor, job_id, uuids):
- super().__init__(executor, job_i
@@ Diff output truncated at 10240 characters. @@
More information about the Bf-blender-cvs
mailing list