Kaggle / docker-python

Kaggle Python docker image
Apache License 2.0
2.43k stars 946 forks source link

GLIBCXX_3.4.26 not found Error #1091

Closed qo4on closed 1 year ago

qo4on commented 2 years ago

Trying to run StyleGAN3 projection with lpips on a Kaggle kernel I get GLIBCXX_3.4.26 not found error. The same code works in Colab without any problem. Is it possible to fix this?

Traceback (most recent call last):
  File "projector.py", line 15, in <module>
    import lpips
  File "/content/stylegan3/lpips/__init__.py", line 7, in <module>
    from skimage.measure import compare_ssim
  File "/opt/conda/lib/python3.7/site-packages/skimage/measure/__init__.py", line 7, in <module>
    from ._polygon import approximate_polygon, subdivide_polygon
  File "/opt/conda/lib/python3.7/site-packages/skimage/measure/_polygon.py", line 2, in <module>
    from scipy import signal
  File "/opt/conda/lib/python3.7/site-packages/scipy/signal/__init__.py", line 294, in <module>
    from ._max_len_seq import max_len_seq
  File "/opt/conda/lib/python3.7/site-packages/scipy/signal/_max_len_seq.py", line 8, in <module>
    from ._max_len_seq_inner import _max_len_seq_inner
ImportError: /usr/lib/x86_64-linux-gnu/libstdc++.so.6: version `GLIBCXX_3.4.26' not found (required by /opt/conda/lib/python3.7/site-packages/scipy/signal/_max_len_seq_inner.cpython-37m-x86_64-linux-gnu.so)
Philmod commented 2 years ago

Hi! Could you share a reproducible code sample?

qo4on commented 2 years ago

Hi! Thanks for your help! I managed to fix GLIBCXX_3.4.26 error by installing:

!apt-get install sudo > /dev/null
!sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test > /dev/null
!sudo apt-get -y dist-upgrade > /dev/null
!strings /usr/lib/x86_64-linux-gnu/libstdc++.so.6 | grep GLIBCXX

And saw another scikit-image error that I fixed with:

!pip install scikit-image==0.16.2 > /dev/null

Unfortunately I got a Ninja error in the end:

subprocess.CalledProcessError: Command '['ninja', '-v']' returned non-zero exit status 1.
LONG CODE

``` !apt-get install sudo > /dev/null !sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test > /dev/null !sudo apt-get -y dist-upgrade > /dev/null !pip install scikit-image==0.16.2 > /dev/null !pip install ninja > /dev/null #@title Inputs try: from natsort import natsorted except ModuleNotFoundError: !pip install natsort > /dev/null from natsort import natsorted try: from gdown import download as gdownload except ModuleNotFoundError: !pip install gdown > /dev/null from gdown import download as gdownload import sys, time, glob from pathlib import Path #@title Download Face Detector and Shape Predictor def download(file_path, min_size=100000, tries=10): file_path = Path(file_path) if not file_path.exists() or file_path.stat().st_size < min_size: zip_filename = file_path.name + '.bz2' url = f"http://dlib.net/files/{zip_filename}" for i in range(tries): !wget -q {url} -P {file_path.parent} > /dev/null temp_path = str(file_path) + '.bz2' !bzip2 -dk {temp_path} Path(temp_path).unlink() if file_path.exists() and file_path.stat().st_size >= min_size: return sys.exit(f"Can't download {file_path}") pretrained_path = Path('/content', 'pretrained_models') pretrained_path.mkdir(parents=True, exist_ok=True) shape_predictor = "shape_predictor_68_face_landmarks.dat" face_detector = "mmod_human_face_detector.dat" shape_predictor = pretrained_path / shape_predictor face_detector = pretrained_path / face_detector download(shape_predictor, min_size=99693937) download(face_detector, min_size=729940) # Download Image inp_dir = Path('/content', 'inp') inp_dir.mkdir(parents=True, exist_ok=True) aligned_dir = inp_dir / 'aligned' aligned_dir.mkdir(parents=True, exist_ok=True) test_img = inp_dir / 'test.jpg' !wget https://free-images.com/lg/b828/girl_woman_female_young_2.jpg -O {test_img} ``` ``` %%writefile /content/align.py #@title Image Alignment import argparse import numpy as np import dlib, glob from scipy import ndimage from PIL import Image from natsort import natsorted from pathlib import Path def get_landmark(filepath, detector, predictor): """get landmark with dlib :return: np.array shape=(68, 2) """ detector = dlib.cnn_face_detection_model_v1(detector) predictor = dlib.shape_predictor(predictor) img = dlib.load_rgb_image(str(filepath)) dets = detector(img, 1) for k, d in enumerate(dets): shape = predictor(img, d.rect) t = list(shape.parts()) a = [] for tt in t: a.append([tt.x, tt.y]) lm = np.array(a) return lm def align_face(filepath, detector, predictor, output_size=1024, transform_size=1024): """ :param filepath: str :return: PIL Image """ lm = get_landmark(filepath, detector, predictor) img = Image.open(filepath).convert("RGB") # import cv2 # import matplotlib.pyplot as plt # img = cv2.imread(Path(address, filename)) # for lmk in lm: # cv2.circle( # img, # (lmk[0], lmk[1]) # # (landmarks.part(landmarkNumber).x, landmarks.part(landmarkNumber).y), # 2, # (255, 0, 0), # -1 # ) # plt.imshow(img) lm_chin = lm[0: 17] # left-right lm_eyebrow_left = lm[17: 22] # left-right lm_eyebrow_right = lm[22: 27] # left-right lm_nose = lm[27: 31] # top-down lm_nostrils = lm[31: 36] # top-down lm_eye_left = lm[36: 42] # left-clockwise lm_eye_right = lm[42: 48] # left-clockwise lm_mouth_outer = lm[48: 60] # left-clockwise lm_mouth_inner = lm[60: 68] # left-clockwise # Calculate auxiliary vectors. eye_left = np.mean(lm_eye_left, axis=0) eye_right = np.mean(lm_eye_right, axis=0) eye_avg = (eye_left + eye_right) * 0.5 eye_to_eye = eye_right - eye_left mouth_left = lm_mouth_outer[0] mouth_right = lm_mouth_outer[6] mouth_avg = (mouth_left + mouth_right) * 0.5 eye_to_mouth = mouth_avg - eye_avg # Choose oriented crop rectangle. x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] x /= np.hypot(*x) x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) y = np.flipud(x) * [-1, 1] c = eye_avg + eye_to_mouth * 0.1 quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) qsize = np.hypot(*x) * 2 # read image img = Image.open(filepath).convert("RGB") enable_padding = True # Shrink. shrink = int(np.floor(qsize / output_size * 0.5)) if shrink > 1: rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) img = img.resize(rsize, Image.ANTIALIAS) quad /= shrink qsize /= shrink # Crop. border = max(int(np.rint(qsize * 0.1)), 3) crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: img = img.crop(crop) quad -= crop[0:2] # Pad. pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1])))) pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) if enable_padding and max(pad) > border - 4: pad = np.maximum(pad, int(np.rint(qsize * 0.3))) img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') h, w, _ = img.shape y, x, _ = np.ogrid[:h, :w, :1] mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) blur = qsize * 0.02 img += (ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') quad += pad[:2] # Transform. img = img.transform((transform_size, transform_size), Image.QUAD, (quad + 0.5).flatten(), Image.BILINEAR) if output_size < transform_size: img = img.resize((output_size, output_size), Image.ANTIALIAS) return img def run_alignment(inp_dir, out_dir, detector, predictor): img_list = glob.glob1(inp_dir, '*.jpg') + glob.glob1(inp_dir, '*.png') img_list = natsorted(img_list) for image_path in img_list: aligned_image = align_face(Path(inp_dir, image_path), detector, predictor) aligned_image.save(f"{Path(out_dir, Path(image_path).stem + '.png')}") def main(): parser = argparse.ArgumentParser() parser.add_argument('--inp_dir') parser.add_argument('--out_dir') parser.add_argument('--detector') parser.add_argument('--predictor') args = parser.parse_args() assert Path(args.detector).exists() assert Path(args.predictor).exists() run_alignment(**vars(args)) if __name__ == "__main__": main() ``` ``` !python /content/align.py --inp_dir={inp_dir} --out_dir={aligned_dir} \ --detector={face_detector} --predictor={shape_predictor} ``` ``` save_folder = Path('/content', 'out') save_folder.mkdir(parents=True, exist_ok=True) img_list = natsorted(glob.glob(f"{aligned_dir}/*.png")) %cd /content !git clone https://github.com/rosinality/stylegan2-pytorch.git &> /dev/null !git clone https://github.com/NVlabs/stylegan3.git &> /dev/null !cp -R /content/stylegan2-pytorch/lpips /content/stylegan3 %cd /content/stylegan3 ``` ``` %%writefile /content/stylegan3/projector.py ##@title projector.py import copy import os from time import perf_counter from tqdm import tqdm import click import imageio import numpy as np import PIL.Image import torch import torch.nn.functional as F from torchvision import transforms import lpips import dnnlib import legacy def project( G, target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution *, num_steps = 1000, w_avg_samples = 10000, initial_learning_rate = 0.1, initial_noise_factor = 0.05, lr_rampdown_length = 0.25, lr_rampup_length = 0.05, noise_ramp_length = 0.75, regularize_noise_weight = 1e5, verbose = False, device: torch.device, label:torch.Tensor, noise_mode = 'const' ): assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution) def logprint(*args): if verbose: print(*args) G = copy.deepcopy(G).eval().requires_grad_(False).to(device) # type: ignore # Compute w stats. logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...') z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C] w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 # Load lpips percept = lpips.PerceptualLoss(model='net-lin', net='vgg', use_gpu=True) # Features for target image. target_images = [] target_images.append(target) target_images = torch.stack(target_images, dim=0).cuda() w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device) optimizer = torch.optim.Adam([w_opt], betas=(0.9, 0.999), lr=initial_learning_rate) for step in tqdm(range(num_steps), desc='Project steps'): # Learning rate schedule. t = step / num_steps lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) lr = initial_learning_rate * lr_ramp for param_group in optimizer.param_groups: param_group['lr'] = lr # Synth images from opt_w. synth_images = G(w_opt[0], label, noise_mode=noise_mode) # Features for synth images. dist = percept(synth_images,target_images).sum() loss = dist # Step optimizer.zero_grad(set_to_none=True) loss.backward() optimizer.step() # logprint(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') # Save projected W for each optimization step. w_out[step] = w_opt.detach()[0] return w_out #---------------------------------------------------------------------------- @click.command() @click.option('--network', 'network_pkl', help='Network pickle filename', required=True) @click.option('--target', 'target_fname', help='Target image file to project to', required=True, metavar='FILE') @click.option('--num-steps', help='Number of optimization steps', type=int, default=1000, show_default=True) @click.option('--seed', help='Random seed', type=int, default=303, show_default=True) @click.option('--save-video', help='Save an mp4 video of optimization progress', type=bool, default=True, show_default=True) @click.option('--outdir', help='Where to save the output images', required=True, metavar='DIR') def run_projection( network_pkl: str, target_fname: str, outdir: str, save_video: bool, seed: int, num_steps: int ): """Project given image to the latent space of pretrained network pickle. Examples: \b python projector.py --outdir=out --target=~/mytargetimg.png \\ --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl """ np.random.seed(seed) torch.manual_seed(seed) # Load networks. print('Loading networks from "%s"...' % network_pkl) device = torch.device('cuda') with dnnlib.util.open_url(network_pkl) as fp: G = legacy.load_network_pkl(fp)['G_ema'].to(device) # type: ignore # Load target image. target_pil = PIL.Image.open(target_fname).convert('RGB') w, h = target_pil.size s = min(w, h) target_pil = target_pil.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2)) target_pil = target_pil.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS) # 1024 target_uint8 = np.array(target_pil, dtype=np.uint8) transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) target_img = transform(target_pil) # label label = torch.zeros([1, G.c_dim], device=device) # noise_mode: "random" "none" "const" noise_mode = 'const' # Optimize projection. start_time = perf_counter() projected_w_steps = project( G, target=target_img, num_steps=num_steps, device=device, verbose=True, label = label, noise_mode = noise_mode ) print (f'Elapsed: {(perf_counter() - start_time):.1f} s') # Render debug output: optional video and projected image and W vector. outdir = os.path.join(outdir, os.path.basename(os.path.normpath(target_fname[:-4]))) pngdir = os.path.join(outdir, 'png') os.makedirs(pngdir, exist_ok=True) if save_video: # video = imageio.get_writer(f'{outdir}/proj.mp4', mode='I', fps=10, codec='libx264', bitrate='16M') print (f'Saving optimization progress video "{outdir}"') cnt=0 for projected_w in tqdm(projected_w_steps, desc='Synthesize images'): synth_image = G(projected_w, label, truncation_psi=1, noise_mode=noise_mode) synth_image = (synth_image + 1) * (255/2) synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy() # video.append_data(np.concatenate([target_uint8, synth_image], axis=1)) PIL.Image.fromarray(synth_image, 'RGB').save(f'{pngdir}/{cnt:04d}.png') np.savez(f'{outdir}/{cnt:04d}.npz', w=projected_w.unsqueeze(0).cpu().numpy()) cnt += 1 # video.close() #---------------------------------------------------------------------------- if __name__ == "__main__": run_projection() # pylint: disable=no-value-for-parameter ``` ``` model = "https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl" for image in img_list: !python projector.py --network={model} --target={image} --seed=4 --outdir={save_folder} ```

bilzard commented 1 year ago

FYI: I have the same issue using old version of machine image (2023-02-02). However, the problem resolved using the latest machine image.

You can change this behavior in the right side menu of your notebook:

Screenshot 2023-03-12 at 20 42 25