diff --git a/hpvm/projects/torch2hpvm/torch2hpvm/compile.py b/hpvm/projects/torch2hpvm/torch2hpvm/compile.py index bb13d3474771258f3a4187c48d7253c1bf74e2e4..06c9a1c297f3bdfd69a2871387a71b91f37aae0e 100644 --- a/hpvm/projects/torch2hpvm/torch2hpvm/compile.py +++ b/hpvm/projects/torch2hpvm/torch2hpvm/compile.py @@ -38,7 +38,7 @@ class ModelExporter: self.dfg = DFG(onnx_model.graph) self.output_dir = output_dir = Path(output_dir).absolute() - os.makedirs(output_dir, exist_ok=False) # Will throw if already existss + os.makedirs(output_dir, exist_ok=True) self.weight_dir = output_dir / self.weight_dir_name self.weight_dir.mkdir(exist_ok=True) self.codefile = output_dir / self.source_file_name @@ -59,15 +59,19 @@ class ModelExporter: self.dfg.dump_weights(self.weight_dir, to_fp16=True) return self - def export_datasets(self): + def export_datasets(self, n_images: Optional[int]): from PIL import Image + from math import log10, ceil labels = [] - for i in range(self.dataset_size): + n_images = self.dataset_size if n_images is None else n_images + n_digits = int(ceil(log10(n_images))) + for i in range(n_images): image, label = self.dataset[i] image = (image - image.min()) / (image.max() - image.min()) * 255 image = image.transpose((1, 2, 0)).astype(np.uint8) - Image.fromarray(image).save(self.dataset_dir / f"{i}.jpg") + name = str(i).zfill(n_digits) + Image.fromarray(image).save(self.dataset_dir / f"{name}.jpg") labels.append(label) np.array(labels).tofile(self.output_dir / self.label_name) return self @@ -88,14 +92,14 @@ class ModelExporter: return self def generate( - self, output_code_file: PathLike = None, batch_size: Optional[int] = None + self, output_code_file: PathLike = None, n_images: Optional[int] = None ): self.codefile = ( self.codefile if output_code_file is None else Path(output_code_file) ) - self.export_source_code(self.codefile, batch_size) + self.export_source_code(self.codefile) self.export_weights() - self.export_datasets() + self.export_datasets(n_images) return self @staticmethod diff --git a/hpvm/test/epoch_dnn/main.py b/hpvm/test/epoch_dnn/main.py index 8ce1aad0d69fe0fd4bc046bd327a53e699b2f4db..fb02a15f40177d01e134fd4afbd8fec77290e167 100644 --- a/hpvm/test/epoch_dnn/main.py +++ b/hpvm/test/epoch_dnn/main.py @@ -1,6 +1,7 @@ import site from pathlib import Path from os import makedirs +import numpy as np import torch from torch2hpvm import BinDataset, ModelExporter @@ -10,10 +11,66 @@ site.addsitedir(self_folder.as_posix()) from torch_dnn import MiniERA, quantize -SCP_TARGET = "" + +# Consts (don't change) +BUFFER_NAME = "hpvm-mod.nvdla" + + +def split_and_scp( + local_srcs: list, host: str, remote_dst: str, password: str, options: str +): + import pexpect + + print(f"Copying files to remote host {host}...") + args = options.split(" ") + local_srcs = [str(s) for s in local_srcs] + args += ["-r", *local_srcs, f"{host}:{remote_dst}"] + child = pexpect.spawn("scp", args) + child.expect(r"password:") + child.sendline(password) + # A rough approach to at least print something when scp is alive + for line in child: + print(line.decode()) + + +def run_test_over_ssh(host: str, password: str, working_dir: str, image_dir: Path, options: str): + import pexpect + + print(f"Running test on remote host {host}...") + args = options.split(" ") + [host] + child = pexpect.spawn("ssh", args) + child.expect(r"password:") + child.sendline(password) + child.expect("# ") # The bash prompt + child.sendline(f"cd {working_dir}") + child.expect("# ") + child.delimiter = "# " + for image in image_dir.glob("*"): + remote_path = f"{image_dir.name}/{image.name}" + print(f"Sending {image.name} to run") + child.sendline(f"./nvdla_runtime --loadable {BUFFER_NAME} --image {remote_path} --rawdump") + child.expect("# ") + child.sendline("cat output.dimg") + child.expect("# ") + result_lines = child.before.decode().splitlines() + # Should have 2 lines. First line is the command we keyed in. + output = [int(s) for s in result_lines[1].strip().split()] + yield image, output + + +# Local configs ASSET_DIR = self_folder / "assets/miniera" QUANT_STRAT = "NONE" # Quantization method WORKING_DIR = Path("/tmp/miniera") +N_IMAGES = 100 +# Remote configs +SCP_OPTS = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -P 5506" +SSH_OPTS = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 5506" +SCP_HOST = "root@espgate.cs.columbia.edu" +SCP_PWD = "openesp" +SCP_DST = "~/NV_NVDLA" + + makedirs(WORKING_DIR, exist_ok=True) # Calculate quantization scales @@ -23,12 +80,21 @@ model.load_state_dict(torch.load(ckpt)) scale_output = quantize(model, ASSET_DIR, QUANT_STRAT, WORKING_DIR) # Code generation (into /tmp/miniera/hpvm-mod.nvdla) +nvdla_buffer = WORKING_DIR / BUFFER_NAME +print(f"Generating NVDLA buffer into {nvdla_buffer}") bin_dataset = BinDataset( ASSET_DIR / "input.bin", ASSET_DIR / "labels.bin", (5000, 3, 32, 32) ) exporter = ModelExporter(model, bin_dataset, WORKING_DIR, scale_output) -exporter.generate().compile(WORKING_DIR / "miniera", WORKING_DIR) +exporter.generate(n_images=N_IMAGES).compile(WORKING_DIR / "miniera", WORKING_DIR) + # SCP essential files to remote device -nvdla_buffer = WORKING_DIR / "hpvm-mod.nvdla" input_images = exporter.dataset_dir -labels = WORKING_DIR / exporter.label_name +split_and_scp([nvdla_buffer, input_images], SCP_HOST, SCP_DST, SCP_PWD, SCP_OPTS) + +# SSH to run test remotely +labels_file = WORKING_DIR / exporter.label_name +labels = np.fromfile(labels_file, dtype=np.int32) +for image_path, output in run_test_over_ssh(SCP_HOST, SCP_PWD, SCP_DST, input_images, SSH_OPTS): + idx = int(image_path.stem) + print(idx, np.array(output).argmax(), labels[idx])