From c77c6008c2df01a0159e92c5d98d61212aa865e6 Mon Sep 17 00:00:00 2001 From: victoryang00 Date: Fri, 8 Mar 2024 20:54:02 -0800 Subject: [PATCH] get optimistic computing monitoring done --- artifact/bench_comparison.py | 18 +++--- artifact/bench_policy.py | 6 +- artifact/common_util.py | 24 ++++---- artifact/result/comparison.csv | 22 ++++--- artifact/run_with_cpu_monitoring_mac.sh | 27 +++++++++ artifact/run_with_energy_monitoring.sh | 24 ++++++++ artifact/usecase_burst_computing.py | 69 ++++++++++++++++++---- artifact/usecase_optimistic_computing.py | 75 ++++++++++++++++++++++-- include/wamr_export.h | 1 + include/wamr_read_write.h | 44 ++++++++++---- src/checkpoint.cpp | 26 +------- src/restore.cpp | 2 +- src/wamr_export.cpp | 50 ++++++++++++---- 13 files changed, 291 insertions(+), 97 deletions(-) create mode 100644 artifact/run_with_cpu_monitoring_mac.sh create mode 100644 artifact/run_with_energy_monitoring.sh diff --git a/artifact/bench_comparison.py b/artifact/bench_comparison.py index 4333e6c..0445a82 100644 --- a/artifact/bench_comparison.py +++ b/artifact/bench_comparison.py @@ -34,7 +34,7 @@ arg = [ [], ["stories110M.bin", "-z", "tokenizer.bin", "-t", "0.0"], - ["./ORBvoc.txt,", "./TUM3.yaml", "./", "./associations/fr1_xyz.txt"], + ["./ORBvoc.txt", "./TUM3.yaml", "./", "./associations/fr1_xyz.txt"], [], [], [], @@ -46,14 +46,14 @@ ] envs = [ "a=b", - "OMP_NUM_THREADS=4", + "OMP_NUM_THREADS=1", "a=b", - "OMP_NUM_THREADS=4", - "OMP_NUM_THREADS=4", - "OMP_NUM_THREADS=4", - "OMP_NUM_THREADS=4", - "OMP_NUM_THREADS=4", - "OMP_NUM_THREADS=4", + "OMP_NUM_THREADS=1", + "OMP_NUM_THREADS=1", + "OMP_NUM_THREADS=1", + "OMP_NUM_THREADS=1", + "OMP_NUM_THREADS=1", + "OMP_NUM_THREADS=1", "a=b", "a=b", ] @@ -289,7 +289,7 @@ def plot(results): .replace("-vn300", "") .replace("maze-6404.txt", "") .replace("stories110M.bin", "") - .replace("-z tokenizer.bin -t 0.0", "") + .replace("-z tokenizer.bin -t 0.0", "").replace("a=b", "") .strip() ].append(( hcontainer_values, mvvm_values, qemu_x86_64_values,qemu_aarch64_values,native_values)) diff --git a/artifact/bench_policy.py b/artifact/bench_policy.py index 4eff8c1..ac61742 100644 --- a/artifact/bench_policy.py +++ b/artifact/bench_policy.py @@ -291,7 +291,7 @@ def plot(results): plt.savefig("performance_singlethread.pdf") if __name__ == "__main__": - mvvm_results = run_mvvm() - write_to_csv("policy.csv") +# mvvm_results = run_mvvm() +# write_to_csv("policy.csv") mvvm_results = read_from_csv("policy.csv") - plot(mvvm_results) \ No newline at end of file + plot(mvvm_results) diff --git a/artifact/common_util.py b/artifact/common_util.py index 50b09ed..b8e84ce 100644 --- a/artifact/common_util.py +++ b/artifact/common_util.py @@ -4,7 +4,7 @@ import time pwd = "/mnt/MVVM" - +slowtier = "epyc" def get_func_index(func, file): cmd = ["wasm2wat", "--enable-all", file] @@ -280,7 +280,7 @@ def run_qemu_checkpoint( return (exec, output) -def run(aot_file: str, arg: list[str], env: str, extra:str="") -> tuple[str, str]: +def run(aot_file: str, arg: list[str], env: str, extra: str = "") -> tuple[str, str]: cmd = f"./MVVM_checkpoint -t ../build/bench/{aot_file} {' '.join(['-a ' + str(x) for x in arg])} -e {env} {extra}" print(cmd) cmd = cmd.split() @@ -294,25 +294,29 @@ def run(aot_file: str, arg: list[str], env: str, extra:str="") -> tuple[str, str # print(output) return (exec, output) + def run_checkpoint_restore_slowtier( - aot_file: str, folder, arg: list[str], env: str, extra:str="" -) -> tuple[str, str, str, str]: + aot_file: str, folder, arg: list[str], env: str, extra1: str = "", extra2 :str= "" +): # Execute run_checkpoint and capture its result res = [] - for _ in range(trial): - checkpoint_result = run(aot_file, folder, arg, env,extra) + for i in range(trial): + os.system(f"./run_with_cpu_monitoring.sh ./MVVM_checkpoint -t ./bench/{aot_file} {' '.join(['-a ' + str(x) for x in arg])} -e {env} {extra1} &") # Execute run_restore with the same arguments (or modify as needed) - restore_result = run_criu_restore(aot_file, arg, env) + os.system(f"ssh {slowtier} ./run_with_cpu_monitoring.sh ./MVVM_restore -t ./bench/{aot_file} {' '.join(['-a ' + str(x) for x in arg])} -e {env} {extra1} &") + # print(checkpoint_result, restore_result) # Return a combined result or just the checkpoint result as needed - res.append(checkpoint_result[1] + restore_result[1]) + res.append(f"./bench/{aot_file}{i}.log") return (checkpoint_result[0], res) -def run_slowtier(aot_file: str, arg: list[str], env: str, extra:str="") -> tuple[str, str]: - cmd = f"ssh epyc {pwd}/MVVM_checkpoint -t {pwd}/build/bench/{aot_file} {' '.join(['-a ' + str(x) for x in arg])} -e {env} {extra}" +def run_slowtier( + aot_file: str, arg: list[str], env: str, extra: str = "" +) -> tuple[str, str]: + cmd = f"ssh {slowtier} {pwd}/MVVM_checkpoint -t {pwd}/build/bench/{aot_file} {' '.join(['-a ' + str(x) for x in arg])} -e {env} {extra}" print(cmd) cmd = cmd.split() result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/artifact/result/comparison.csv b/artifact/result/comparison.csv index 7d34e06..ea5bd02 100644 --- a/artifact/result/comparison.csv +++ b/artifact/result/comparison.csv @@ -1,13 +1,11 @@ name,mvvm,hcontainer,qemu_x86_64,qemu_aach64,native -a=b linpack.aot,33.597456,21.068,36.012,0.051,24.056 -OMP_NUM_THREADS=4 llama.aot stories110M.bin -z tokenizer.bin -t 0.0,10.116758,29.039,1187.058,0.051,7.054 -"a=b rgbd_tum.aot ./ORBvoc.txt, ./TUM3.yaml ./ ./associations/fr1_xyz.txt",939.389244,0.001,0.045,0.051,0.006 -OMP_NUM_THREADS=4 tc.aot,0.00049,0.012,0.029,0.048,0.003 -OMP_NUM_THREADS=4 bt.aot,69.410827,131.075,1869.096,0.051,0.005 -OMP_NUM_THREADS=4 cg.aot,29.317632,20.012,161.058,0.052,7.027 -OMP_NUM_THREADS=4 ft.aot,15.010672,52.047,1550.071,0.051,0.005 -OMP_NUM_THREADS=4 lu.aot,0.060571,0.047,1.067,0.051,0.006 -OMP_NUM_THREADS=4 mg.aot,27.543796,13.053,764.02,0.051,0.005 -OMP_NUM_THREADS=4 sp.aot,42.649225,1.08,55.073,0.05,2.062 -a=b redis.aot,51.861305,124.087,368.022,0.055,0.004 -a=b hdastar.aot maze-6404.txt 8,11.936914,0.012,11.067,0.049,0.002 +a=b linpack.aot,24.743766,21.013,27.082,20.083,22.085 +OMP_NUM_THREADS=1 llama.aot stories110M.bin -z tokenizer.bin -t 0.0,27.760763,24.099,27.082,146.043,29.091 +OMP_NUM_THREADS=1 bt.aot,72.971184,133.0,0.005,1205.071,37.062 +OMP_NUM_THREADS=1 cg.aot,27.552785,19.046,486.078,271.0,16.082 +OMP_NUM_THREADS=1 ft.aot,32.585113,54.008,486.078,871.079,21.024 +OMP_NUM_THREADS=1 lu.aot,0.056297,0.01,2.024,0.096,0.005 +OMP_NUM_THREADS=1 mg.aot,26.387951,12.093,2464.017,279.036,9.065 +OMP_NUM_THREADS=1 sp.aot,13.955374,1.056,116.063,16.001,1.043 +a=b redis.aot,43.189769,127.083,328.08,428.015,18.09 +a=b hdastar.aot maze-6404.txt 8,9.070406,0.0,11.03,0.004,4.082 diff --git a/artifact/run_with_cpu_monitoring_mac.sh b/artifact/run_with_cpu_monitoring_mac.sh new file mode 100644 index 0000000..9b2d7fc --- /dev/null +++ b/artifact/run_with_cpu_monitoring_mac.sh @@ -0,0 +1,27 @@ +#!/bin/bash +echo "$@" +"$@" &> $1.out & +pid1=$! +# echo $pid > /sys/fs/cgroup/memory/my_cgroup/cgroup.procs +# echo $(($first_arg * 1024 * 1024 * 1024)) > /sys/fs/cgroup/memory/my_cgroup/memory.limit_in_bytes +sudo asitop --show_cores &> $1.cpu.out & +pid2=$! +while true; do + line=$(ps auxh -q $pid1) + if [ "$line" == "" ]; then + break + fi + echo $line >>$1.ps.out + for child in $(pgrep -P $pid1); do + line=$(ps auxh -q $child) + if [ "$line" == "" ]; then + continue + fi + echo $line >>$1.ps.out + done + sleep 0.005 + if ! ps -p $pid >/dev/null; then + sleep 0.5 + sudo kill -9 $pid2 + fi +done \ No newline at end of file diff --git a/artifact/run_with_energy_monitoring.sh b/artifact/run_with_energy_monitoring.sh new file mode 100644 index 0000000..aa0cb43 --- /dev/null +++ b/artifact/run_with_energy_monitoring.sh @@ -0,0 +1,24 @@ +#!/bin/bash +echo "$@" +"$@" &> $1.out & +pid1=$! +# echo $pid > /sys/fs/cgroup/memory/my_cgroup/cgroup.procs +# echo $(($first_arg * 1024 * 1024 * 1024)) > /sys/fs/cgroup/memory/my_cgroup/memory.limit_in_bytes +while true; do + line=$(ps auxh -q $pid1) + if [ "$line" == "" ]; then + break + fi + echo $line >>$1.energy.out + for child in $(pgrep -P $pid1); do + line=$(ps auxh -q $child) + if [ "$line" == "" ]; then + continue + fi + echo $line >>$1.energy.out + done + sleep 0.005 + if ! ps -p $pid >/dev/null; then + sleep 0.5 + fi +done \ No newline at end of file diff --git a/artifact/usecase_burst_computing.py b/artifact/usecase_burst_computing.py index 20654db..b8d241f 100644 --- a/artifact/usecase_burst_computing.py +++ b/artifact/usecase_burst_computing.py @@ -5,14 +5,16 @@ import numpy as np from collections import defaultdict - +ip = ["128.114.53.32", "192.168.0.1"] +port = 1234 +new_port = 1235 cmd = [ - "redis", # low priority task - "rgbd_tum", # high priority task + "redis", # low priority task + "rgbd_tum", # high priority task ] folder = [ "redis", - "ORB_SLAM2", # networkbound? + "ORB_SLAM2", # networkbound? ] arg = [ [], @@ -25,6 +27,7 @@ pool = Pool(processes=20) + def get_fasttier_result(): results = [] results1 = [] @@ -42,13 +45,25 @@ def get_fasttier_result(): print(exec, exec_time) results.append((exec, exec_time)) # discover 4 aot_variant + def get_slowtier_result(): results = [] results1 = [] for _ in range(common_util.trial): for i in range(len(cmd)): aot = cmd[i] + ".aot" - results1.append(pool.apply_async(common_util.run_slowtier, (aot, arg[i], envs[i]))) + results1.append( + pool.apply_async( + common_util.run_slowtier, + ( + aot, + arg[i], + envs[i], + f"-i {ip[0]} -e {port}", + f"-o {ip[1]} -s {port}", + ), + ) + ) # print the results results1 = [x.get() for x in results1] for exec, output in results1: @@ -59,14 +74,22 @@ def get_slowtier_result(): print(exec, exec_time) results.append((exec, exec_time)) # discover 4 aot_variant + def get_snapshot_overhead(): results = [] results1 = [] for _ in range(common_util.trial): for i in range(len(cmd)): aot = cmd[i] + ".aot" - results1.append(pool.apply_async(common_util.run_checkpoint_restore_slowtier, (aot, arg[i], envs[i]))) - # print the results + results1.append( + pool.apply_async( + common_util.run_checkpoint_restore_slowtier, + (aot, arg[i], envs[i]), + f"-o {ip[0]} -s {port}", + f"-i {ip[1]} -e {port} -o {ip[0]} -s {new_port}", + f"-i {ip[1]} -e {new_port}", + ) + ) results1 = [x.get() for x in results1] for exec, output in results1: lines = output.split("\n") @@ -76,13 +99,18 @@ def get_snapshot_overhead(): print(exec, exec_time) results.append((exec, exec_time)) # discover 4 aot_variant + def get_burst_compute(): results = [] results1 = [] for _ in range(common_util.trial): for i in range(len(cmd)): aot = cmd[i] + ".aot" - results1.append(pool.apply_async(common_util.run_checkpoint_restore_slowtier, (aot, arg[i], envs[i]))) + results1.append( + pool.apply_async( + common_util.run_checkpoint_restore_slowtier, (aot, arg[i], envs[i]) + ) + ) # print the results results1 = [x.get() for x in results1] for exec, output in results1: @@ -94,13 +122,32 @@ def get_burst_compute(): results.append((exec, exec_time)) # discover 4 aot_variant +def write_to_csv(filename): + # 'data' is a list of tuples, e.g., [(checkpoint_result_0, checkpoint_result_1, restore_result_2), ...] + with open(filename, "a+", newline="") as csvfile: + writer = csv.writer(csvfile) + # Optionally write headers + writer.writerow( + [ + "name", + "fasttier", + "slowtier", + "snapshot Time", + ] + ) + + # Write the data + for idx, row in enumerate(fasttier): + writer.writerow([row[0], row[1], slowtier[1], snapshot[1]]) + + def plot(): fasttier = get_fasttier_result() slowtier = get_slowtier_result() snapshot = get_snapshot_overhead() reu = get_burst_compute() - # plot skew + # plot skew write_to_csv("burst_computing.csv") - + results = read_from_csv("burst_computing.csv") - plot(results) \ No newline at end of file + plot(results) diff --git a/artifact/usecase_optimistic_computing.py b/artifact/usecase_optimistic_computing.py index 0c8238a..dbd9847 100644 --- a/artifact/usecase_optimistic_computing.py +++ b/artifact/usecase_optimistic_computing.py @@ -5,6 +5,9 @@ import numpy as np from collections import defaultdict +ip = ["128.114.53.32", "128.114.53.24"] +port = 1234 +new_port = 1235 cmd = [ "bc", # low priority task @@ -15,17 +18,18 @@ "gapbs", ] arg = [ - ["-g20", "-n100"], - ["-g20", "-n1000"], + ["-g10", "-n1000"], + ["-g10", "-n10000"], ] envs = [ "OMP_NUM_THREADS=1", "OMP_NUM_THREADS=1", ] -pool = Pool(processes=20) +pool = Pool(processes=1) -def get_avx512_result(): + +def get_fasttier_result(): results = [] results1 = [] for _ in range(common_util.trial): @@ -42,13 +46,25 @@ def get_avx512_result(): print(exec, exec_time) results.append((exec, exec_time)) # discover 4 aot_variant -def get_arm_result(): + +def get_slowtier_result(): results = [] results1 = [] for _ in range(common_util.trial): for i in range(len(cmd)): aot = cmd[i] + ".aot" - results1.append(pool.apply_async(common_util.run_slowtier, (aot, arg[i], envs[i]))) + results1.append( + pool.apply_async( + common_util.run_slowtier, + ( + aot, + arg[i], + envs[i], + f"-i {ip[0]} -e {port}", + f"-o {ip[1]} -s {port}", + ), + ) + ) # print the results results1 = [x.get() for x in results1] for exec, output in results1: @@ -59,6 +75,31 @@ def get_arm_result(): print(exec, exec_time) results.append((exec, exec_time)) # discover 4 aot_variant + +def get_snapshot_overhead(): + results = [] + results1 = [] + for _ in range(common_util.trial): + for i in range(len(cmd)): + aot = cmd[i] + ".aot" + results1.append( + pool.apply_async( + common_util.run_checkpoint_restore_slowtier, + (aot, arg[i], envs[i]), + f"-o {ip[0]} -s {port}", + f"-i {ip[1]} -e {port}", + ) + ) + results1 = [x.get() for x in results1] + for exec, output in results1: + lines = output.split("\n") + for line in lines: + if line.__contains__("Snapshot time:"): + exec_time = line.split(" ")[-2] + print(exec, exec_time) + results.append((exec, exec_time)) # discover 4 aot_variant + + def get_snapshot_overhead(): results = [] results1 = [] @@ -92,8 +133,30 @@ def optimistic_snapshot_overhead(): # monitoring CPU exec_time = line.split(" ")[-2] print(exec, exec_time) results.append((exec, exec_time)) # discover 4 aot_variant + +def write_to_csv(filename): + # 'data' is a list of tuples, e.g., [(checkpoint_result_0, checkpoint_result_1, restore_result_2), ...] + with open(filename, "a+", newline="") as csvfile: + writer = csv.writer(csvfile) + # Optionally write headers + writer.writerow( + [ + "name", + "fasttier", + "slowtier", + "snapshot Time", + ] + ) + + # Write the data + for idx, row in enumerate(fasttier): + writer.writerow([row[0], row[1], slowtier[1], snapshot[1]]) + def plot(): + with open("MVVM_checkpoint.out","r"): + +if __name__ == "__main__": avx512 = get_avx512_result() arm = get_arm_result() snapshot = get_snapshot_overhead() diff --git a/include/wamr_export.h b/include/wamr_export.h index 045e5fd..eba2ec1 100644 --- a/include/wamr_export.h +++ b/include/wamr_export.h @@ -94,6 +94,7 @@ void lightweight_uncheckpoint(WASMExecEnv *); void wamr_wait(wasm_exec_env_t); void sigint_handler(int sig); void register_sigtrap(); +void register_sigint(); void sigtrap_handler(int sig); extern size_t snapshot_threshold; extern int stop_func_threshold; diff --git a/include/wamr_read_write.h b/include/wamr_read_write.h index 24ce898..e9a7af0 100644 --- a/include/wamr_read_write.h +++ b/include/wamr_read_write.h @@ -21,12 +21,12 @@ #include #endif struct WriteStream { - virtual bool write(const char *data, std::size_t sz) const {}; + virtual bool write(const char *data, std::size_t sz) const {}; }; struct ReadStream { - virtual bool read(char *data, std::size_t sz) const {}; - virtual bool ignore(std::size_t sz) const {}; - virtual std::size_t tellg() const {}; + virtual bool read(char *data, std::size_t sz) const {}; + virtual bool ignore(std::size_t sz) const {}; + virtual std::size_t tellg() const {}; }; struct FwriteStream : public WriteStream { FILE *file; @@ -198,15 +198,16 @@ class RDMAReadStream { qp_init_attr.send_cq = cq; qp_init_attr.recv_cq = cq; qp_init_attr.qp_type = IBV_QPT_RC; // Reliable Connection - qp_init_attr.cap.max_send_wr = 1; // Max Work Requests - qp_init_attr.cap.max_recv_wr = 1; + qp_init_attr.cap.max_send_wr = 10; // Max Work Requests + qp_init_attr.cap.max_recv_wr = 10; qp_init_attr.cap.max_send_sge = 1; // Max Scatter/Gather Elements qp_init_attr.cap.max_recv_sge = 1; qp = ibv_create_qp(pd, &qp_init_attr); // Allocate and register memory region buffer = std::malloc(buffer_size); - mr = ibv_reg_mr(pd, buffer, buffer_size, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE); + mr = ibv_reg_mr(pd, buffer, buffer_size, + IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE); // Connection setup, exchange QP info with the peer, etc., are omitted for simplicity } @@ -270,9 +271,7 @@ class RDMAReadStream { return true; } - std::size_t tellg() const { - return position; - } + std::size_t tellg() const { return position; } ~RDMAReadStream() { ibv_dereg_mr(mr); @@ -299,7 +298,32 @@ class RDMAWriteStream { public: explicit RDMAWriteStream(const char *device_name, std::size_t buffer_size) : buffer_size(buffer_size) { // Initialization (device, PD, CQ, QP, buffer, and MR) is similar to RDMAReadStream + // Initialize RDMA device + struct ibv_device **dev_list = ibv_get_device_list(NULL); + context = ibv_open_device(*dev_list); + + // Allocate Protection Domain + pd = ibv_alloc_pd(context); + + // Create Completion Queue + cq = ibv_create_cq(context, 1, NULL, NULL, 0); + // Initialize QP + struct ibv_qp_init_attr qp_init_attr; + memset(&qp_init_attr, 0, sizeof(qp_init_attr)); + qp_init_attr.send_cq = cq; + qp_init_attr.recv_cq = cq; + qp_init_attr.qp_type = IBV_QPT_RC; // Reliable Connection + qp_init_attr.cap.max_send_wr = 10; // Max Work Requests + qp_init_attr.cap.max_recv_wr = 10; + qp_init_attr.cap.max_send_sge = 1; // Max Scatter/Gather Elements + qp_init_attr.cap.max_recv_sge = 1; + qp = ibv_create_qp(pd, &qp_init_attr); + + // Allocate and register memory region + buffer = std::malloc(buffer_size); + mr = ibv_reg_mr(pd, buffer, buffer_size, + IBV_ACCESS_LOCAL_READ | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); // Assume remote_address and remote_key are set up through some initialization method } diff --git a/src/checkpoint.cpp b/src/checkpoint.cpp index c551946..7a4a02e 100644 --- a/src/checkpoint.cpp +++ b/src/checkpoint.cpp @@ -26,7 +26,7 @@ WAMRInstance *wamr = nullptr; std::ostringstream re{}; -SocketWriteStream *writer; +WriteStream *writer; std::vector> as; std::mutex as_mtx; long snapshot_memory = 0; @@ -89,6 +89,7 @@ int main(int argc, char *argv[]) { SPDLOG_DEBUG("arg {}", e); } register_sigtrap(); + register_sigint(); if (offload_addr.empty()) writer = new FwriteStream((removeExtension(target) + ".bin").c_str()); #ifndef _WIN32 @@ -101,29 +102,6 @@ int main(int argc, char *argv[]) { wamr->get_int3_addr(); wamr->replace_int3_with_nop(); - // freopen("output.txt", "w", stdout); -#if defined(_WIN32) - // Define the sigaction structure - signal(SIGINT, sigint_handler); -#else - // Define the sigaction structure - struct sigaction sa {}; - - // Clear the structure - sigemptyset(&sa.sa_mask); - - // Set the signal handler function - sa.sa_handler = sigint_handler; - - // Set the flags - sa.sa_flags = SA_RESTART; - - // Register the signal handler for SIGINT - if (sigaction(SIGINT, &sa, nullptr) == -1) { - SPDLOG_ERROR("Error: cannot handle SIGINT"); - return 1; - } -#endif // get current time auto start = std::chrono::high_resolution_clock::now(); diff --git a/src/restore.cpp b/src/restore.cpp index 1ce7c7b..022a69c 100644 --- a/src/restore.cpp +++ b/src/restore.cpp @@ -57,7 +57,7 @@ int main(int argc, char **argv) { snapshot_threshold = count; register_sigtrap(); - + register_sigint(); wamr = new WAMRInstance(target.c_str(), false); wamr->instantiate(); diff --git a/src/wamr_export.cpp b/src/wamr_export.cpp index 13c80b9..c9c9ae2 100644 --- a/src/wamr_export.cpp +++ b/src/wamr_export.cpp @@ -13,6 +13,7 @@ #include "wamr.h" #include "wamr_wasi_context.h" #include +#include extern WAMRInstance *wamr; size_t snapshot_threshold; size_t call_count = 0; @@ -486,9 +487,12 @@ void print_memory(WASMExecEnv *exec_env) { void segfault_handler(int sig) { // auto end = std::chrono::high_resolution_clock::now(); // auto dur = std::chronro::duration_cast(end - wamr->time); + auto exec_env = wamr->get_exec_env(); + print_exec_env_debug_info(exec_env); + print_memory(exec_env); // printf("Execution time: %f s\n", dur.count() / 1000000.0); - exit(0); + exit(EXIT_FAILURE); } void sigtrap_handler(int sig) { // fprintf(stderr, "Caught signal %d, performing custom logic...\n", sig); @@ -521,10 +525,10 @@ void register_sigtrap() { sa.sa_handler = sigtrap_handler; sa.sa_flags = SA_RESTART; - // struct sigaction sb {}; - // sigemptyset(&sb.sa_mask); - // sb.sa_handler = segfault_handler; - // sb.sa_flags = SA_RESTART; + struct sigaction sb {}; + sigemptyset(&sb.sa_mask); + sb.sa_handler = segfault_handler; + sb.sa_flags = SA_RESTART; // Register the signal handler for SIGTRAP if (sigaction(SIGTRAP, &sa, nullptr) == -1) { @@ -535,12 +539,12 @@ void register_sigtrap() { SPDLOG_ERROR("Error: cannot handle SIGSYS"); exit(-1); } else { - if (sigaction(SIGSEGV, &sa, nullptr) == -1) { - SPDLOG_ERROR("Error: cannot handle SIGSEGV"); - exit(-1); - } else { - SPDLOG_DEBUG( "SIGSEGV registered"); - } + // if (sigaction(SIGSEGV, &sb, nullptr) == -1) { + // SPDLOG_ERROR("Error: cannot handle SIGSEGV"); + // exit(-1); + // } else { + // SPDLOG_DEBUG( "SIGSEGV registered"); + // } SPDLOG_DEBUG("SIGSYS registered"); } SPDLOG_DEBUG("SIGTRAP registered"); @@ -574,3 +578,27 @@ void sigint_handler(int sig) { } #endif } +void register_sigint() { +#if defined(_WIN32) + // Define the sigaction structure + signal(SIGINT, sigint_handler); +#else + // Define the sigaction structure + struct sigaction sa {}; + + // Clear the structure + sigemptyset(&sa.sa_mask); + + // Set the signal handler function + sa.sa_handler = sigint_handler; + + // Set the flags + sa.sa_flags = SA_RESTART; + + // Register the signal handler for SIGINT + if (sigaction(SIGINT, &sa, nullptr) == -1) { + SPDLOG_ERROR("Error: cannot handle SIGINT"); + exit(EXIT_FAILURE); + } +#endif +} \ No newline at end of file