Spaces:
Runtime error
Runtime error
Commit
·
dbb6927
1
Parent(s):
4a885d5
Update render method
Browse files- app.py +2 -2
- assets/videos/example0.mp4 +0 -0
- assets/videos/example2.mp4 +0 -0
- assets/videos/example4.mp4 +0 -0
- assets/videos/example5.mp4 +0 -0
- assets/videos/example6.mp4 +0 -0
- assets/videos/example7.mp4 +0 -0
- assets/videos/example8.mp4 +0 -0
- mGPT/render/pyrender/smpl_render.py +54 -106
app.py
CHANGED
|
@@ -125,8 +125,8 @@ def render_motion(data, feats, method='fast'):
|
|
| 125 |
r = RRR.from_rotvec(np.array([np.pi, 0.0, 0.0]))
|
| 126 |
pose[:, 0] = np.matmul(r.as_matrix().reshape(1, 3, 3), pose[:, 0])
|
| 127 |
vid = []
|
| 128 |
-
aroot = data[
|
| 129 |
-
aroot[:, 1] = -aroot[:, 1]
|
| 130 |
params = dict(pred_shape=np.zeros([1, 10]),
|
| 131 |
pred_root=aroot,
|
| 132 |
pred_pose=pose)
|
|
|
|
| 125 |
r = RRR.from_rotvec(np.array([np.pi, 0.0, 0.0]))
|
| 126 |
pose[:, 0] = np.matmul(r.as_matrix().reshape(1, 3, 3), pose[:, 0])
|
| 127 |
vid = []
|
| 128 |
+
aroot = data[:, 0]
|
| 129 |
+
aroot[:, 1:] = -aroot[:, 1:]
|
| 130 |
params = dict(pred_shape=np.zeros([1, 10]),
|
| 131 |
pred_root=aroot,
|
| 132 |
pred_pose=pose)
|
assets/videos/example0.mp4
CHANGED
|
Binary files a/assets/videos/example0.mp4 and b/assets/videos/example0.mp4 differ
|
|
|
assets/videos/example2.mp4
CHANGED
|
Binary files a/assets/videos/example2.mp4 and b/assets/videos/example2.mp4 differ
|
|
|
assets/videos/example4.mp4
CHANGED
|
Binary files a/assets/videos/example4.mp4 and b/assets/videos/example4.mp4 differ
|
|
|
assets/videos/example5.mp4
CHANGED
|
Binary files a/assets/videos/example5.mp4 and b/assets/videos/example5.mp4 differ
|
|
|
assets/videos/example6.mp4
CHANGED
|
Binary files a/assets/videos/example6.mp4 and b/assets/videos/example6.mp4 differ
|
|
|
assets/videos/example7.mp4
CHANGED
|
Binary files a/assets/videos/example7.mp4 and b/assets/videos/example7.mp4 differ
|
|
|
assets/videos/example8.mp4
CHANGED
|
Binary files a/assets/videos/example8.mp4 and b/assets/videos/example8.mp4 differ
|
|
|
mGPT/render/pyrender/smpl_render.py
CHANGED
|
@@ -1,6 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
-
|
| 3 |
-
os.environ['PYOPENGL_PLATFORM'] = 'egl'
|
| 4 |
import torch
|
| 5 |
import numpy as np
|
| 6 |
import cv2
|
|
@@ -10,94 +8,61 @@ import glob
|
|
| 10 |
import pickle
|
| 11 |
import pyrender
|
| 12 |
import trimesh
|
|
|
|
|
|
|
| 13 |
from shapely import geometry
|
| 14 |
from smplx import SMPL as _SMPL
|
| 15 |
from smplx.utils import SMPLOutput as ModelOutput
|
| 16 |
from scipy.spatial.transform.rotation import Rotation as RRR
|
| 17 |
|
| 18 |
-
|
| 19 |
-
class SMPL(_SMPL):
|
| 20 |
-
""" Extension of the official SMPL implementation to support more joints """
|
| 21 |
-
|
| 22 |
-
def __init__(self, *args, **kwargs):
|
| 23 |
-
super(SMPL, self).__init__(*args, **kwargs)
|
| 24 |
-
# joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
|
| 25 |
-
# J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)
|
| 26 |
-
# self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
|
| 27 |
-
# self.joint_map = torch.tensor(joints, dtype=torch.long)
|
| 28 |
-
|
| 29 |
-
def forward(self, *args, **kwargs):
|
| 30 |
-
kwargs['get_skin'] = True
|
| 31 |
-
smpl_output = super(SMPL, self).forward(*args, **kwargs)
|
| 32 |
-
# extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices) #Additional 9 joints #Check doc/J_regressor_extra.png
|
| 33 |
-
# joints = torch.cat([smpl_output.joints, extra_joints], dim=1) #[N, 24 + 21, 3] + [N, 9, 3]
|
| 34 |
-
# joints = joints[:, self.joint_map, :]
|
| 35 |
-
joints = smpl_output.joints
|
| 36 |
-
output = ModelOutput(vertices=smpl_output.vertices,
|
| 37 |
-
global_orient=smpl_output.global_orient,
|
| 38 |
-
body_pose=smpl_output.body_pose,
|
| 39 |
-
joints=joints,
|
| 40 |
-
betas=smpl_output.betas,
|
| 41 |
-
full_pose=smpl_output.full_pose)
|
| 42 |
-
return output
|
| 43 |
-
|
| 44 |
-
|
| 45 |
class Renderer:
|
| 46 |
"""
|
| 47 |
Renderer used for visualizing the SMPL model
|
| 48 |
Code adapted from https://github.com/vchoutas/smplify-x
|
| 49 |
"""
|
| 50 |
-
|
| 51 |
-
def __init__(self,
|
| 52 |
-
vertices,
|
| 53 |
-
focal_length=5000,
|
| 54 |
-
img_res=(224, 224),
|
| 55 |
-
faces=None):
|
| 56 |
self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res[0],
|
| 57 |
-
|
| 58 |
-
|
|
|
|
| 59 |
self.focal_length = focal_length
|
| 60 |
self.camera_center = [img_res[0] // 2, img_res[1] // 2]
|
| 61 |
self.faces = faces
|
| 62 |
-
|
| 63 |
if torch.cuda.is_available():
|
| 64 |
self.device = torch.device("cuda")
|
| 65 |
else:
|
| 66 |
self.device = torch.device("cpu")
|
| 67 |
|
| 68 |
-
self.rot = trimesh.transformations.rotation_matrix(
|
| 69 |
-
|
| 70 |
-
|
| 71 |
minx, miny, minz = vertices.min(axis=(0, 1))
|
| 72 |
maxx, maxy, maxz = vertices.max(axis=(0, 1))
|
| 73 |
minx = minx - 0.5
|
| 74 |
maxx = maxx + 0.5
|
| 75 |
minz = minz - 0.5
|
| 76 |
maxz = maxz + 0.5
|
| 77 |
-
|
| 78 |
-
floor = geometry.Polygon([[minx, minz], [minx, maxz], [maxx, maxz],
|
| 79 |
-
[maxx, minz]])
|
| 80 |
self.floor = trimesh.creation.extrude_polygon(floor, 1e-5)
|
| 81 |
self.floor.visual.face_colors = [0, 0, 0, 0.2]
|
| 82 |
self.floor.apply_transform(self.rot)
|
| 83 |
-
self.floor_pose =
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
|
|
|
| 87 |
c = -np.pi / 6
|
| 88 |
-
self.camera_pose = [[1, 0, 0, (minx
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
max(4, minz + (1.5 - miny) * 2, (maxx - minx))
|
| 95 |
-
], [0, 0, 0, 1]]
|
| 96 |
-
|
| 97 |
def __call__(self, vertices, camera_translation):
|
| 98 |
|
| 99 |
floor_render = pyrender.Mesh.from_trimesh(self.floor, smooth=False)
|
| 100 |
-
|
| 101 |
material = pyrender.MetallicRoughnessMaterial(
|
| 102 |
metallicFactor=0.1,
|
| 103 |
alphaMode='OPAQUE',
|
|
@@ -105,21 +70,18 @@ class Renderer:
|
|
| 105 |
mesh = trimesh.Trimesh(vertices, self.faces)
|
| 106 |
mesh.apply_transform(self.rot)
|
| 107 |
mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
|
| 108 |
-
|
| 109 |
-
camera = pyrender.PerspectiveCamera(yfov=(np.pi / 3.0)
|
| 110 |
-
|
| 111 |
-
light = pyrender.DirectionalLight(color=[1,
|
| 112 |
-
spot_l = pyrender.SpotLight(color=np.ones(3),
|
| 113 |
-
|
| 114 |
-
innerConeAngle=np.pi / 16,
|
| 115 |
-
outerConeAngle=np.pi / 6)
|
| 116 |
point_l = pyrender.PointLight(color=np.ones(3), intensity=300.0)
|
| 117 |
-
|
| 118 |
-
scene = pyrender.Scene(bg_color=(1.,
|
| 119 |
-
ambient_light=(0.4, 0.4, 0.4))
|
| 120 |
scene.add(floor_render, pose=self.floor_pose)
|
| 121 |
scene.add(mesh, 'mesh')
|
| 122 |
-
|
| 123 |
light_pose = np.eye(4)
|
| 124 |
light_pose[:3, 3] = np.array([0, -1, 1])
|
| 125 |
scene.add(light, pose=light_pose)
|
|
@@ -129,68 +91,54 @@ class Renderer:
|
|
| 129 |
|
| 130 |
light_pose[:3, 3] = np.array([1, 1, 2])
|
| 131 |
scene.add(light, pose=light_pose)
|
| 132 |
-
|
| 133 |
scene.add(camera, pose=self.camera_pose)
|
| 134 |
-
|
| 135 |
flags = pyrender.RenderFlags.RGBA | pyrender.RenderFlags.SHADOWS_DIRECTIONAL
|
| 136 |
color, rend_depth = self.renderer.render(scene, flags=flags)
|
| 137 |
-
|
| 138 |
return color
|
| 139 |
|
| 140 |
-
|
| 141 |
class SMPLRender():
|
| 142 |
-
|
| 143 |
def __init__(self, SMPL_MODEL_DIR):
|
| 144 |
if torch.cuda.is_available():
|
| 145 |
self.device = torch.device("cuda")
|
| 146 |
else:
|
| 147 |
self.device = torch.device("cpu")
|
| 148 |
-
self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=1,
|
| 149 |
-
|
| 150 |
|
| 151 |
self.pred_camera_t = []
|
| 152 |
self.focal_length = 110
|
| 153 |
-
|
| 154 |
def init_renderer(self, res, smpl_param, is_headroot=False):
|
| 155 |
poses = smpl_param['pred_pose']
|
| 156 |
pred_rotmats = []
|
| 157 |
for pose in poses:
|
| 158 |
-
if pose.size
|
| 159 |
-
pose = pose.reshape(-1,
|
| 160 |
pose = RRR.from_rotvec(pose).as_matrix()
|
| 161 |
-
pose = pose.reshape(1,
|
| 162 |
-
pred_rotmats.append(
|
| 163 |
-
torch.from_numpy(pose.astype(np.float32)[None]).to(
|
| 164 |
-
self.device))
|
| 165 |
-
|
| 166 |
pred_rotmat = torch.cat(pred_rotmats, dim=0)
|
| 167 |
|
| 168 |
-
pred_betas = torch.from_numpy(smpl_param['pred_shape'].reshape(
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
smpl_output = self.smpl(betas=pred_betas,
|
| 174 |
-
body_pose=pred_rotmat[:, 1:],
|
| 175 |
-
global_orient=pred_rotmat[:, 0].unsqueeze(1),
|
| 176 |
-
pose2rot=False)
|
| 177 |
-
|
| 178 |
self.vertices = smpl_output.vertices.detach().cpu().numpy()
|
| 179 |
|
| 180 |
-
|
| 181 |
|
| 182 |
if is_headroot:
|
| 183 |
-
|
| 184 |
-
0, 12].detach().cpu().numpy()
|
| 185 |
-
|
| 186 |
-
self.pred_camera_t.append(pred_camera_t)
|
| 187 |
|
| 188 |
-
self.
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
|
|
|
| 192 |
|
| 193 |
def render(self, index):
|
| 194 |
-
renderImg = self.renderer(self.vertices[index, ...],
|
| 195 |
-
self.pred_camera_t)
|
| 196 |
return renderImg
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import torch
|
| 3 |
import numpy as np
|
| 4 |
import cv2
|
|
|
|
| 8 |
import pickle
|
| 9 |
import pyrender
|
| 10 |
import trimesh
|
| 11 |
+
import smplx
|
| 12 |
+
from pathlib import Path
|
| 13 |
from shapely import geometry
|
| 14 |
from smplx import SMPL as _SMPL
|
| 15 |
from smplx.utils import SMPLOutput as ModelOutput
|
| 16 |
from scipy.spatial.transform.rotation import Rotation as RRR
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
class Renderer:
|
| 19 |
"""
|
| 20 |
Renderer used for visualizing the SMPL model
|
| 21 |
Code adapted from https://github.com/vchoutas/smplify-x
|
| 22 |
"""
|
| 23 |
+
def __init__(self, vertices, focal_length=5000, img_res=(224,224), faces=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res[0],
|
| 25 |
+
viewport_height=img_res[1],
|
| 26 |
+
point_size=2.0)
|
| 27 |
+
|
| 28 |
self.focal_length = focal_length
|
| 29 |
self.camera_center = [img_res[0] // 2, img_res[1] // 2]
|
| 30 |
self.faces = faces
|
| 31 |
+
|
| 32 |
if torch.cuda.is_available():
|
| 33 |
self.device = torch.device("cuda")
|
| 34 |
else:
|
| 35 |
self.device = torch.device("cpu")
|
| 36 |
|
| 37 |
+
self.rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])
|
| 38 |
+
|
|
|
|
| 39 |
minx, miny, minz = vertices.min(axis=(0, 1))
|
| 40 |
maxx, maxy, maxz = vertices.max(axis=(0, 1))
|
| 41 |
minx = minx - 0.5
|
| 42 |
maxx = maxx + 0.5
|
| 43 |
minz = minz - 0.5
|
| 44 |
maxz = maxz + 0.5
|
| 45 |
+
|
| 46 |
+
floor = geometry.Polygon([[minx, minz], [minx, maxz], [maxx, maxz], [maxx, minz]])
|
|
|
|
| 47 |
self.floor = trimesh.creation.extrude_polygon(floor, 1e-5)
|
| 48 |
self.floor.visual.face_colors = [0, 0, 0, 0.2]
|
| 49 |
self.floor.apply_transform(self.rot)
|
| 50 |
+
self.floor_pose =np.array([[ 1, 0, 0, 0],
|
| 51 |
+
[ 0, np.cos(np.pi / 2), -np.sin(np.pi / 2), miny],
|
| 52 |
+
[ 0, np.sin(np.pi / 2), np.cos(np.pi / 2), 0],
|
| 53 |
+
[ 0, 0, 0, 1]])
|
| 54 |
+
|
| 55 |
c = -np.pi / 6
|
| 56 |
+
self.camera_pose = [[ 1, 0, 0, (minx+maxx)/2],
|
| 57 |
+
[ 0, np.cos(c), -np.sin(c), 1.5],
|
| 58 |
+
[ 0, np.sin(c), np.cos(c), max(4, minz+(1.5-miny)*2, (maxx-minx))],
|
| 59 |
+
[ 0, 0, 0, 1]
|
| 60 |
+
]
|
| 61 |
+
|
|
|
|
|
|
|
|
|
|
| 62 |
def __call__(self, vertices, camera_translation):
|
| 63 |
|
| 64 |
floor_render = pyrender.Mesh.from_trimesh(self.floor, smooth=False)
|
| 65 |
+
|
| 66 |
material = pyrender.MetallicRoughnessMaterial(
|
| 67 |
metallicFactor=0.1,
|
| 68 |
alphaMode='OPAQUE',
|
|
|
|
| 70 |
mesh = trimesh.Trimesh(vertices, self.faces)
|
| 71 |
mesh.apply_transform(self.rot)
|
| 72 |
mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
|
| 73 |
+
|
| 74 |
+
camera = pyrender.PerspectiveCamera(yfov=(np.pi / 3.0))
|
| 75 |
+
|
| 76 |
+
light = pyrender.DirectionalLight(color=[1,1,1], intensity=350)
|
| 77 |
+
spot_l = pyrender.SpotLight(color=np.ones(3), intensity=300.0,
|
| 78 |
+
innerConeAngle=np.pi/16, outerConeAngle=np.pi/6)
|
|
|
|
|
|
|
| 79 |
point_l = pyrender.PointLight(color=np.ones(3), intensity=300.0)
|
| 80 |
+
|
| 81 |
+
scene = pyrender.Scene(bg_color=(1.,1.,1.,0.8),ambient_light=(0.4, 0.4, 0.4))
|
|
|
|
| 82 |
scene.add(floor_render, pose=self.floor_pose)
|
| 83 |
scene.add(mesh, 'mesh')
|
| 84 |
+
|
| 85 |
light_pose = np.eye(4)
|
| 86 |
light_pose[:3, 3] = np.array([0, -1, 1])
|
| 87 |
scene.add(light, pose=light_pose)
|
|
|
|
| 91 |
|
| 92 |
light_pose[:3, 3] = np.array([1, 1, 2])
|
| 93 |
scene.add(light, pose=light_pose)
|
| 94 |
+
|
| 95 |
scene.add(camera, pose=self.camera_pose)
|
| 96 |
+
|
| 97 |
flags = pyrender.RenderFlags.RGBA | pyrender.RenderFlags.SHADOWS_DIRECTIONAL
|
| 98 |
color, rend_depth = self.renderer.render(scene, flags=flags)
|
| 99 |
+
|
| 100 |
return color
|
| 101 |
|
|
|
|
| 102 |
class SMPLRender():
|
|
|
|
| 103 |
def __init__(self, SMPL_MODEL_DIR):
|
| 104 |
if torch.cuda.is_available():
|
| 105 |
self.device = torch.device("cuda")
|
| 106 |
else:
|
| 107 |
self.device = torch.device("cpu")
|
| 108 |
+
# self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=1, create_transl=False).to(self.device)
|
| 109 |
+
self.smpl = smplx.create(Path(SMPL_MODEL_DIR).parent, model_type="smpl", gender="neutral", ext="npz", batch_size=1).to(self.device)
|
| 110 |
|
| 111 |
self.pred_camera_t = []
|
| 112 |
self.focal_length = 110
|
| 113 |
+
|
| 114 |
def init_renderer(self, res, smpl_param, is_headroot=False):
|
| 115 |
poses = smpl_param['pred_pose']
|
| 116 |
pred_rotmats = []
|
| 117 |
for pose in poses:
|
| 118 |
+
if pose.size==72:
|
| 119 |
+
pose = pose.reshape(-1,3)
|
| 120 |
pose = RRR.from_rotvec(pose).as_matrix()
|
| 121 |
+
pose = pose.reshape(1,24,3,3)
|
| 122 |
+
pred_rotmats.append(torch.from_numpy(pose.astype(np.float32)[None]).to(self.device))
|
|
|
|
|
|
|
|
|
|
| 123 |
pred_rotmat = torch.cat(pred_rotmats, dim=0)
|
| 124 |
|
| 125 |
+
pred_betas = torch.from_numpy(smpl_param['pred_shape'].reshape(1, 10).astype(np.float32)).to(self.device)
|
| 126 |
+
pred_root = torch.tensor(smpl_param['pred_root'].reshape(-1, 3).astype(np.float32),device=self.device)
|
| 127 |
+
smpl_output = self.smpl(betas=pred_betas, body_pose=pred_rotmat[:, 1:],transl=pred_root, global_orient=pred_rotmat[:, :1], pose2rot=False)
|
| 128 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
self.vertices = smpl_output.vertices.detach().cpu().numpy()
|
| 130 |
|
| 131 |
+
pred_root = pred_root[0]
|
| 132 |
|
| 133 |
if is_headroot:
|
| 134 |
+
pred_root = pred_root - smpl_output.joints[0,12].detach().cpu().numpy()
|
|
|
|
|
|
|
|
|
|
| 135 |
|
| 136 |
+
self.pred_camera_t.append(pred_root)
|
| 137 |
+
|
| 138 |
+
self.renderer = Renderer(vertices=self.vertices, focal_length=self.focal_length,
|
| 139 |
+
img_res=(res[1], res[0]), faces=self.smpl.faces)
|
| 140 |
+
|
| 141 |
|
| 142 |
def render(self, index):
|
| 143 |
+
renderImg = self.renderer(self.vertices[index, ...], self.pred_camera_t)
|
|
|
|
| 144 |
return renderImg
|