jhj0517
commited on
Commit
·
6a40fe0
1
Parent(s):
99ea33b
Add type hint
Browse files
modules/live_portrait/live_portrait_inferencer.py
CHANGED
|
@@ -135,26 +135,26 @@ class LivePortraitInferencer:
|
|
| 135 |
|
| 136 |
def edit_expression(self,
|
| 137 |
model_type: str = ModelType.HUMAN.value,
|
| 138 |
-
rotate_pitch=0,
|
| 139 |
-
rotate_yaw=0,
|
| 140 |
-
rotate_roll=0,
|
| 141 |
-
blink=0,
|
| 142 |
-
eyebrow=0,
|
| 143 |
-
wink=0,
|
| 144 |
-
pupil_x=0,
|
| 145 |
-
pupil_y=0,
|
| 146 |
-
aaa=0,
|
| 147 |
-
eee=0,
|
| 148 |
-
woo=0,
|
| 149 |
-
smile=0,
|
| 150 |
-
src_ratio=1,
|
| 151 |
-
sample_ratio=1,
|
| 152 |
-
sample_parts=
|
| 153 |
-
crop_factor=1.5,
|
| 154 |
-
src_image=None,
|
| 155 |
-
sample_image=None,
|
| 156 |
-
motion_link=None,
|
| 157 |
-
add_exp=None):
|
| 158 |
if isinstance(model_type, ModelType):
|
| 159 |
model_type = model_type.value
|
| 160 |
if model_type not in [mode.value for mode in ModelType]:
|
|
@@ -249,6 +249,7 @@ class LivePortraitInferencer:
|
|
| 249 |
animate_without_vid: bool = False,
|
| 250 |
crop_factor: float = 1.5,
|
| 251 |
src_image_list: Optional[List[np.ndarray]] = None,
|
|
|
|
| 252 |
driving_images: Optional[List[np.ndarray]] = None,
|
| 253 |
progress: gr.Progress = gr.Progress()
|
| 254 |
):
|
|
|
|
| 135 |
|
| 136 |
def edit_expression(self,
|
| 137 |
model_type: str = ModelType.HUMAN.value,
|
| 138 |
+
rotate_pitch: float = 0,
|
| 139 |
+
rotate_yaw: float = 0,
|
| 140 |
+
rotate_roll: float = 0,
|
| 141 |
+
blink: float = 0,
|
| 142 |
+
eyebrow: float = 0,
|
| 143 |
+
wink: float = 0,
|
| 144 |
+
pupil_x: float = 0,
|
| 145 |
+
pupil_y: float = 0,
|
| 146 |
+
aaa: float = 0,
|
| 147 |
+
eee: float = 0,
|
| 148 |
+
woo: float = 0,
|
| 149 |
+
smile: float = 0,
|
| 150 |
+
src_ratio: float = 1,
|
| 151 |
+
sample_ratio: float = 1,
|
| 152 |
+
sample_parts: str = SamplePart.ALL.value,
|
| 153 |
+
crop_factor: float = 1.5,
|
| 154 |
+
src_image: Optional[str] = None,
|
| 155 |
+
sample_image: Optional[str] = None,
|
| 156 |
+
motion_link: Optional[str] = None,
|
| 157 |
+
add_exp: Optional['ExpressionSet'] = None) -> None:
|
| 158 |
if isinstance(model_type, ModelType):
|
| 159 |
model_type = model_type.value
|
| 160 |
if model_type not in [mode.value for mode in ModelType]:
|
|
|
|
| 249 |
animate_without_vid: bool = False,
|
| 250 |
crop_factor: float = 1.5,
|
| 251 |
src_image_list: Optional[List[np.ndarray]] = None,
|
| 252 |
+
driving_vid_path: Optional[str] = None,
|
| 253 |
driving_images: Optional[List[np.ndarray]] = None,
|
| 254 |
progress: gr.Progress = gr.Progress()
|
| 255 |
):
|