|
|
|
@ -27,9 +27,25 @@ class ImagePrompts: |
|
|
|
|
|
|
|
def _get_image_prompts(self, img, borders, vae, crop_first): |
|
|
|
if crop_first: |
|
|
|
assert borders['right'] + borders['left'] + borders['down'] == 0 |
|
|
|
up_border = borders['up'] * 8 |
|
|
|
_, _, [_, _, vqg_img] = vae.model.encode(img[:, :, :up_border, :]) |
|
|
|
bs, _, img_w, img_h = img.shape |
|
|
|
vqg_img_w, vqg_img_h = img_w // 8, img_h // 8 |
|
|
|
vqg_img = torch.zeros((bs, vqg_img_w, vqg_img_h), dtype=torch.int32, device=img.device) |
|
|
|
if borders['down'] != 0: |
|
|
|
down_border = borders['down'] * 8 |
|
|
|
_, _, [_, _, down_vqg_img] = vae.model.encode(img[:, :, -down_border:, :]) |
|
|
|
vqg_img[:, -borders['down']:, :] = down_vqg_img |
|
|
|
if borders['right'] != 0: |
|
|
|
right_border = borders['right'] * 8 |
|
|
|
_, _, [_, _, right_vqg_img] = vae.model.encode(img[:, :, :, :right_border]) |
|
|
|
vqg_img[:, :, :borders['right']] = right_vqg_img |
|
|
|
if borders['left'] != 0: |
|
|
|
left_border = borders['left'] * 8 |
|
|
|
_, _, [_, _, left_vqg_img] = vae.model.encode(img[:, :, :, -left_border:]) |
|
|
|
vqg_img[:, :, -borders['left']:] = left_vqg_img |
|
|
|
if borders['up'] != 0: |
|
|
|
up_border = borders['up'] * 8 |
|
|
|
_, _, [_, _, up_vqg_img] = vae.model.encode(img[:, :, :up_border, :]) |
|
|
|
vqg_img[:, :borders['up'], :] = up_vqg_img |
|
|
|
else: |
|
|
|
_, _, [_, _, vqg_img] = vae.model.encode(img) |
|
|
|
|
|
|
|
|